repo
stringlengths 7
55
| path
stringlengths 4
223
| func_name
stringlengths 1
134
| original_string
stringlengths 75
104k
| language
stringclasses 1
value | code
stringlengths 75
104k
| code_tokens
listlengths 19
28.4k
| docstring
stringlengths 1
46.9k
| docstring_tokens
listlengths 1
1.97k
| sha
stringlengths 40
40
| url
stringlengths 87
315
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
googleapis/google-cloud-python
|
spanner/google/cloud/spanner_v1/client.py
|
Client.database_admin_api
|
def database_admin_api(self):
"""Helper for session-related API calls."""
if self._database_admin_api is None:
self._database_admin_api = DatabaseAdminClient(
credentials=self.credentials, client_info=_CLIENT_INFO
)
return self._database_admin_api
|
python
|
def database_admin_api(self):
"""Helper for session-related API calls."""
if self._database_admin_api is None:
self._database_admin_api = DatabaseAdminClient(
credentials=self.credentials, client_info=_CLIENT_INFO
)
return self._database_admin_api
|
[
"def",
"database_admin_api",
"(",
"self",
")",
":",
"if",
"self",
".",
"_database_admin_api",
"is",
"None",
":",
"self",
".",
"_database_admin_api",
"=",
"DatabaseAdminClient",
"(",
"credentials",
"=",
"self",
".",
"credentials",
",",
"client_info",
"=",
"_CLIENT_INFO",
")",
"return",
"self",
".",
"_database_admin_api"
] |
Helper for session-related API calls.
|
[
"Helper",
"for",
"session",
"-",
"related",
"API",
"calls",
"."
] |
85e80125a59cb10f8cb105f25ecc099e4b940b50
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/client.py#L161-L167
|
train
|
googleapis/google-cloud-python
|
spanner/google/cloud/spanner_v1/client.py
|
Client.copy
|
def copy(self):
"""Make a copy of this client.
Copies the local data stored as simple types but does not copy the
current state of any open connections with the Cloud Bigtable API.
:rtype: :class:`.Client`
:returns: A copy of the current client.
"""
return self.__class__(
project=self.project,
credentials=self._credentials,
user_agent=self.user_agent,
)
|
python
|
def copy(self):
"""Make a copy of this client.
Copies the local data stored as simple types but does not copy the
current state of any open connections with the Cloud Bigtable API.
:rtype: :class:`.Client`
:returns: A copy of the current client.
"""
return self.__class__(
project=self.project,
credentials=self._credentials,
user_agent=self.user_agent,
)
|
[
"def",
"copy",
"(",
"self",
")",
":",
"return",
"self",
".",
"__class__",
"(",
"project",
"=",
"self",
".",
"project",
",",
"credentials",
"=",
"self",
".",
"_credentials",
",",
"user_agent",
"=",
"self",
".",
"user_agent",
",",
")"
] |
Make a copy of this client.
Copies the local data stored as simple types but does not copy the
current state of any open connections with the Cloud Bigtable API.
:rtype: :class:`.Client`
:returns: A copy of the current client.
|
[
"Make",
"a",
"copy",
"of",
"this",
"client",
"."
] |
85e80125a59cb10f8cb105f25ecc099e4b940b50
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/client.py#L169-L182
|
train
|
googleapis/google-cloud-python
|
spanner/google/cloud/spanner_v1/client.py
|
Client.list_instance_configs
|
def list_instance_configs(self, page_size=None, page_token=None):
"""List available instance configurations for the client's project.
.. _RPC docs: https://cloud.google.com/spanner/docs/reference/rpc/\
google.spanner.admin.instance.v1#google.spanner.admin.\
instance.v1.InstanceAdmin.ListInstanceConfigs
See `RPC docs`_.
:type page_size: int
:param page_size:
Optional. The maximum number of configs in each page of results
from this request. Non-positive values are ignored. Defaults
to a sensible value set by the API.
:type page_token: str
:param page_token:
Optional. If present, return the next batch of configs, using
the value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing
the token.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns:
Iterator of
:class:`~google.cloud.spanner_v1.instance.InstanceConfig`
resources within the client's project.
"""
metadata = _metadata_with_prefix(self.project_name)
path = "projects/%s" % (self.project,)
page_iter = self.instance_admin_api.list_instance_configs(
path, page_size=page_size, metadata=metadata
)
page_iter.next_page_token = page_token
page_iter.item_to_value = _item_to_instance_config
return page_iter
|
python
|
def list_instance_configs(self, page_size=None, page_token=None):
"""List available instance configurations for the client's project.
.. _RPC docs: https://cloud.google.com/spanner/docs/reference/rpc/\
google.spanner.admin.instance.v1#google.spanner.admin.\
instance.v1.InstanceAdmin.ListInstanceConfigs
See `RPC docs`_.
:type page_size: int
:param page_size:
Optional. The maximum number of configs in each page of results
from this request. Non-positive values are ignored. Defaults
to a sensible value set by the API.
:type page_token: str
:param page_token:
Optional. If present, return the next batch of configs, using
the value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing
the token.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns:
Iterator of
:class:`~google.cloud.spanner_v1.instance.InstanceConfig`
resources within the client's project.
"""
metadata = _metadata_with_prefix(self.project_name)
path = "projects/%s" % (self.project,)
page_iter = self.instance_admin_api.list_instance_configs(
path, page_size=page_size, metadata=metadata
)
page_iter.next_page_token = page_token
page_iter.item_to_value = _item_to_instance_config
return page_iter
|
[
"def",
"list_instance_configs",
"(",
"self",
",",
"page_size",
"=",
"None",
",",
"page_token",
"=",
"None",
")",
":",
"metadata",
"=",
"_metadata_with_prefix",
"(",
"self",
".",
"project_name",
")",
"path",
"=",
"\"projects/%s\"",
"%",
"(",
"self",
".",
"project",
",",
")",
"page_iter",
"=",
"self",
".",
"instance_admin_api",
".",
"list_instance_configs",
"(",
"path",
",",
"page_size",
"=",
"page_size",
",",
"metadata",
"=",
"metadata",
")",
"page_iter",
".",
"next_page_token",
"=",
"page_token",
"page_iter",
".",
"item_to_value",
"=",
"_item_to_instance_config",
"return",
"page_iter"
] |
List available instance configurations for the client's project.
.. _RPC docs: https://cloud.google.com/spanner/docs/reference/rpc/\
google.spanner.admin.instance.v1#google.spanner.admin.\
instance.v1.InstanceAdmin.ListInstanceConfigs
See `RPC docs`_.
:type page_size: int
:param page_size:
Optional. The maximum number of configs in each page of results
from this request. Non-positive values are ignored. Defaults
to a sensible value set by the API.
:type page_token: str
:param page_token:
Optional. If present, return the next batch of configs, using
the value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing
the token.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns:
Iterator of
:class:`~google.cloud.spanner_v1.instance.InstanceConfig`
resources within the client's project.
|
[
"List",
"available",
"instance",
"configurations",
"for",
"the",
"client",
"s",
"project",
"."
] |
85e80125a59cb10f8cb105f25ecc099e4b940b50
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/client.py#L184-L220
|
train
|
googleapis/google-cloud-python
|
spanner/google/cloud/spanner_v1/client.py
|
Client.instance
|
def instance(
self,
instance_id,
configuration_name=None,
display_name=None,
node_count=DEFAULT_NODE_COUNT,
):
"""Factory to create a instance associated with this client.
:type instance_id: str
:param instance_id: The ID of the instance.
:type configuration_name: string
:param configuration_name:
(Optional) Name of the instance configuration used to set up the
instance's cluster, in the form:
``projects/<project>/instanceConfigs/<config>``.
**Required** for instances which do not yet exist.
:type display_name: str
:param display_name: (Optional) The display name for the instance in
the Cloud Console UI. (Must be between 4 and 30
characters.) If this value is not set in the
constructor, will fall back to the instance ID.
:type node_count: int
:param node_count: (Optional) The number of nodes in the instance's
cluster; used to set up the instance's cluster.
:rtype: :class:`~google.cloud.spanner_v1.instance.Instance`
:returns: an instance owned by this client.
"""
return Instance(instance_id, self, configuration_name, node_count, display_name)
|
python
|
def instance(
self,
instance_id,
configuration_name=None,
display_name=None,
node_count=DEFAULT_NODE_COUNT,
):
"""Factory to create a instance associated with this client.
:type instance_id: str
:param instance_id: The ID of the instance.
:type configuration_name: string
:param configuration_name:
(Optional) Name of the instance configuration used to set up the
instance's cluster, in the form:
``projects/<project>/instanceConfigs/<config>``.
**Required** for instances which do not yet exist.
:type display_name: str
:param display_name: (Optional) The display name for the instance in
the Cloud Console UI. (Must be between 4 and 30
characters.) If this value is not set in the
constructor, will fall back to the instance ID.
:type node_count: int
:param node_count: (Optional) The number of nodes in the instance's
cluster; used to set up the instance's cluster.
:rtype: :class:`~google.cloud.spanner_v1.instance.Instance`
:returns: an instance owned by this client.
"""
return Instance(instance_id, self, configuration_name, node_count, display_name)
|
[
"def",
"instance",
"(",
"self",
",",
"instance_id",
",",
"configuration_name",
"=",
"None",
",",
"display_name",
"=",
"None",
",",
"node_count",
"=",
"DEFAULT_NODE_COUNT",
",",
")",
":",
"return",
"Instance",
"(",
"instance_id",
",",
"self",
",",
"configuration_name",
",",
"node_count",
",",
"display_name",
")"
] |
Factory to create a instance associated with this client.
:type instance_id: str
:param instance_id: The ID of the instance.
:type configuration_name: string
:param configuration_name:
(Optional) Name of the instance configuration used to set up the
instance's cluster, in the form:
``projects/<project>/instanceConfigs/<config>``.
**Required** for instances which do not yet exist.
:type display_name: str
:param display_name: (Optional) The display name for the instance in
the Cloud Console UI. (Must be between 4 and 30
characters.) If this value is not set in the
constructor, will fall back to the instance ID.
:type node_count: int
:param node_count: (Optional) The number of nodes in the instance's
cluster; used to set up the instance's cluster.
:rtype: :class:`~google.cloud.spanner_v1.instance.Instance`
:returns: an instance owned by this client.
|
[
"Factory",
"to",
"create",
"a",
"instance",
"associated",
"with",
"this",
"client",
"."
] |
85e80125a59cb10f8cb105f25ecc099e4b940b50
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/client.py#L222-L254
|
train
|
googleapis/google-cloud-python
|
spanner/google/cloud/spanner_v1/client.py
|
Client.list_instances
|
def list_instances(self, filter_="", page_size=None, page_token=None):
"""List instances for the client's project.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.InstanceAdmin.ListInstances
:type filter_: string
:param filter_: (Optional) Filter to select instances listed. See
the ``ListInstancesRequest`` docs above for examples.
:type page_size: int
:param page_size:
Optional. The maximum number of instances in each page of results
from this request. Non-positive values are ignored. Defaults
to a sensible value set by the API.
:type page_token: str
:param page_token:
Optional. If present, return the next batch of instances, using
the value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing
the token.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns:
Iterator of :class:`~google.cloud.spanner_v1.instance.Instance`
resources within the client's project.
"""
metadata = _metadata_with_prefix(self.project_name)
path = "projects/%s" % (self.project,)
page_iter = self.instance_admin_api.list_instances(
path, page_size=page_size, metadata=metadata
)
page_iter.item_to_value = self._item_to_instance
page_iter.next_page_token = page_token
return page_iter
|
python
|
def list_instances(self, filter_="", page_size=None, page_token=None):
"""List instances for the client's project.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.InstanceAdmin.ListInstances
:type filter_: string
:param filter_: (Optional) Filter to select instances listed. See
the ``ListInstancesRequest`` docs above for examples.
:type page_size: int
:param page_size:
Optional. The maximum number of instances in each page of results
from this request. Non-positive values are ignored. Defaults
to a sensible value set by the API.
:type page_token: str
:param page_token:
Optional. If present, return the next batch of instances, using
the value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing
the token.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns:
Iterator of :class:`~google.cloud.spanner_v1.instance.Instance`
resources within the client's project.
"""
metadata = _metadata_with_prefix(self.project_name)
path = "projects/%s" % (self.project,)
page_iter = self.instance_admin_api.list_instances(
path, page_size=page_size, metadata=metadata
)
page_iter.item_to_value = self._item_to_instance
page_iter.next_page_token = page_token
return page_iter
|
[
"def",
"list_instances",
"(",
"self",
",",
"filter_",
"=",
"\"\"",
",",
"page_size",
"=",
"None",
",",
"page_token",
"=",
"None",
")",
":",
"metadata",
"=",
"_metadata_with_prefix",
"(",
"self",
".",
"project_name",
")",
"path",
"=",
"\"projects/%s\"",
"%",
"(",
"self",
".",
"project",
",",
")",
"page_iter",
"=",
"self",
".",
"instance_admin_api",
".",
"list_instances",
"(",
"path",
",",
"page_size",
"=",
"page_size",
",",
"metadata",
"=",
"metadata",
")",
"page_iter",
".",
"item_to_value",
"=",
"self",
".",
"_item_to_instance",
"page_iter",
".",
"next_page_token",
"=",
"page_token",
"return",
"page_iter"
] |
List instances for the client's project.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.InstanceAdmin.ListInstances
:type filter_: string
:param filter_: (Optional) Filter to select instances listed. See
the ``ListInstancesRequest`` docs above for examples.
:type page_size: int
:param page_size:
Optional. The maximum number of instances in each page of results
from this request. Non-positive values are ignored. Defaults
to a sensible value set by the API.
:type page_token: str
:param page_token:
Optional. If present, return the next batch of instances, using
the value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing
the token.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns:
Iterator of :class:`~google.cloud.spanner_v1.instance.Instance`
resources within the client's project.
|
[
"List",
"instances",
"for",
"the",
"client",
"s",
"project",
"."
] |
85e80125a59cb10f8cb105f25ecc099e4b940b50
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/client.py#L256-L292
|
train
|
googleapis/google-cloud-python
|
bigquery/google/cloud/bigquery/retry.py
|
_should_retry
|
def _should_retry(exc):
"""Predicate for determining when to retry.
We retry if and only if the 'reason' is 'backendError'
or 'rateLimitExceeded'.
"""
if not hasattr(exc, "errors"):
return False
if len(exc.errors) == 0:
# Check for unstructured error returns, e.g. from GFE
return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)
reason = exc.errors[0]["reason"]
return reason in _RETRYABLE_REASONS
|
python
|
def _should_retry(exc):
"""Predicate for determining when to retry.
We retry if and only if the 'reason' is 'backendError'
or 'rateLimitExceeded'.
"""
if not hasattr(exc, "errors"):
return False
if len(exc.errors) == 0:
# Check for unstructured error returns, e.g. from GFE
return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)
reason = exc.errors[0]["reason"]
return reason in _RETRYABLE_REASONS
|
[
"def",
"_should_retry",
"(",
"exc",
")",
":",
"if",
"not",
"hasattr",
"(",
"exc",
",",
"\"errors\"",
")",
":",
"return",
"False",
"if",
"len",
"(",
"exc",
".",
"errors",
")",
"==",
"0",
":",
"# Check for unstructured error returns, e.g. from GFE",
"return",
"isinstance",
"(",
"exc",
",",
"_UNSTRUCTURED_RETRYABLE_TYPES",
")",
"reason",
"=",
"exc",
".",
"errors",
"[",
"0",
"]",
"[",
"\"reason\"",
"]",
"return",
"reason",
"in",
"_RETRYABLE_REASONS"
] |
Predicate for determining when to retry.
We retry if and only if the 'reason' is 'backendError'
or 'rateLimitExceeded'.
|
[
"Predicate",
"for",
"determining",
"when",
"to",
"retry",
"."
] |
85e80125a59cb10f8cb105f25ecc099e4b940b50
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/retry.py#L30-L44
|
train
|
googleapis/google-cloud-python
|
logging/noxfile.py
|
default
|
def default(session, django_dep=('django',)):
"""Default unit test session.
"""
# Install all test dependencies, then install this package in-place.
deps = UNIT_TEST_DEPS
deps += django_dep
session.install(*deps)
for local_dep in LOCAL_DEPS:
session.install('-e', local_dep)
session.install('-e', '.')
# Run py.test against the unit tests.
session.run(
'py.test',
'--quiet',
'--cov=google.cloud.logging',
'--cov=tests.unit',
'--cov-append',
'--cov-config=.coveragerc',
'--cov-report=',
'--cov-fail-under=97',
'tests/unit',
*session.posargs
)
|
python
|
def default(session, django_dep=('django',)):
"""Default unit test session.
"""
# Install all test dependencies, then install this package in-place.
deps = UNIT_TEST_DEPS
deps += django_dep
session.install(*deps)
for local_dep in LOCAL_DEPS:
session.install('-e', local_dep)
session.install('-e', '.')
# Run py.test against the unit tests.
session.run(
'py.test',
'--quiet',
'--cov=google.cloud.logging',
'--cov=tests.unit',
'--cov-append',
'--cov-config=.coveragerc',
'--cov-report=',
'--cov-fail-under=97',
'tests/unit',
*session.posargs
)
|
[
"def",
"default",
"(",
"session",
",",
"django_dep",
"=",
"(",
"'django'",
",",
")",
")",
":",
"# Install all test dependencies, then install this package in-place.",
"deps",
"=",
"UNIT_TEST_DEPS",
"deps",
"+=",
"django_dep",
"session",
".",
"install",
"(",
"*",
"deps",
")",
"for",
"local_dep",
"in",
"LOCAL_DEPS",
":",
"session",
".",
"install",
"(",
"'-e'",
",",
"local_dep",
")",
"session",
".",
"install",
"(",
"'-e'",
",",
"'.'",
")",
"# Run py.test against the unit tests.",
"session",
".",
"run",
"(",
"'py.test'",
",",
"'--quiet'",
",",
"'--cov=google.cloud.logging'",
",",
"'--cov=tests.unit'",
",",
"'--cov-append'",
",",
"'--cov-config=.coveragerc'",
",",
"'--cov-report='",
",",
"'--cov-fail-under=97'",
",",
"'tests/unit'",
",",
"*",
"session",
".",
"posargs",
")"
] |
Default unit test session.
|
[
"Default",
"unit",
"test",
"session",
"."
] |
85e80125a59cb10f8cb105f25ecc099e4b940b50
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/noxfile.py#L77-L102
|
train
|
googleapis/google-cloud-python
|
logging/noxfile.py
|
unit
|
def unit(session):
"""Run the unit test suite."""
# Testing multiple version of django
# See https://www.djangoproject.com/download/ for supported version
django_deps_27 = [
('django==1.8.19',),
('django >= 1.11.0, < 2.0.0dev',),
]
if session.virtualenv.interpreter == '2.7':
[default(session, django_dep=django) for django in django_deps_27]
else:
default(session)
|
python
|
def unit(session):
"""Run the unit test suite."""
# Testing multiple version of django
# See https://www.djangoproject.com/download/ for supported version
django_deps_27 = [
('django==1.8.19',),
('django >= 1.11.0, < 2.0.0dev',),
]
if session.virtualenv.interpreter == '2.7':
[default(session, django_dep=django) for django in django_deps_27]
else:
default(session)
|
[
"def",
"unit",
"(",
"session",
")",
":",
"# Testing multiple version of django",
"# See https://www.djangoproject.com/download/ for supported version",
"django_deps_27",
"=",
"[",
"(",
"'django==1.8.19'",
",",
")",
",",
"(",
"'django >= 1.11.0, < 2.0.0dev'",
",",
")",
",",
"]",
"if",
"session",
".",
"virtualenv",
".",
"interpreter",
"==",
"'2.7'",
":",
"[",
"default",
"(",
"session",
",",
"django_dep",
"=",
"django",
")",
"for",
"django",
"in",
"django_deps_27",
"]",
"else",
":",
"default",
"(",
"session",
")"
] |
Run the unit test suite.
|
[
"Run",
"the",
"unit",
"test",
"suite",
"."
] |
85e80125a59cb10f8cb105f25ecc099e4b940b50
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/noxfile.py#L106-L119
|
train
|
googleapis/google-cloud-python
|
logging/noxfile.py
|
system
|
def system(session):
"""Run the system test suite."""
# Sanity check: Only run system tests if the environment variable is set.
if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
session.skip('Credentials must be set via environment variable.')
# Use pre-release gRPC for system tests.
session.install('--pre', 'grpcio')
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install('mock', 'pytest')
for local_dep in LOCAL_DEPS:
session.install('-e', local_dep)
systest_deps = [
'../bigquery/',
'../pubsub/',
'../storage/',
'../test_utils/',
]
for systest_dep in systest_deps:
session.install('-e', systest_dep)
session.install('-e', '.')
# Run py.test against the system tests.
session.run(
'py.test',
'-vvv',
'-s',
'tests/system',
*session.posargs)
|
python
|
def system(session):
"""Run the system test suite."""
# Sanity check: Only run system tests if the environment variable is set.
if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
session.skip('Credentials must be set via environment variable.')
# Use pre-release gRPC for system tests.
session.install('--pre', 'grpcio')
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install('mock', 'pytest')
for local_dep in LOCAL_DEPS:
session.install('-e', local_dep)
systest_deps = [
'../bigquery/',
'../pubsub/',
'../storage/',
'../test_utils/',
]
for systest_dep in systest_deps:
session.install('-e', systest_dep)
session.install('-e', '.')
# Run py.test against the system tests.
session.run(
'py.test',
'-vvv',
'-s',
'tests/system',
*session.posargs)
|
[
"def",
"system",
"(",
"session",
")",
":",
"# Sanity check: Only run system tests if the environment variable is set.",
"if",
"not",
"os",
".",
"environ",
".",
"get",
"(",
"'GOOGLE_APPLICATION_CREDENTIALS'",
",",
"''",
")",
":",
"session",
".",
"skip",
"(",
"'Credentials must be set via environment variable.'",
")",
"# Use pre-release gRPC for system tests.",
"session",
".",
"install",
"(",
"'--pre'",
",",
"'grpcio'",
")",
"# Install all test dependencies, then install this package into the",
"# virtualenv's dist-packages.",
"session",
".",
"install",
"(",
"'mock'",
",",
"'pytest'",
")",
"for",
"local_dep",
"in",
"LOCAL_DEPS",
":",
"session",
".",
"install",
"(",
"'-e'",
",",
"local_dep",
")",
"systest_deps",
"=",
"[",
"'../bigquery/'",
",",
"'../pubsub/'",
",",
"'../storage/'",
",",
"'../test_utils/'",
",",
"]",
"for",
"systest_dep",
"in",
"systest_deps",
":",
"session",
".",
"install",
"(",
"'-e'",
",",
"systest_dep",
")",
"session",
".",
"install",
"(",
"'-e'",
",",
"'.'",
")",
"# Run py.test against the system tests.",
"session",
".",
"run",
"(",
"'py.test'",
",",
"'-vvv'",
",",
"'-s'",
",",
"'tests/system'",
",",
"*",
"session",
".",
"posargs",
")"
] |
Run the system test suite.
|
[
"Run",
"the",
"system",
"test",
"suite",
"."
] |
85e80125a59cb10f8cb105f25ecc099e4b940b50
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/noxfile.py#L123-L154
|
train
|
googleapis/google-cloud-python
|
logging/google/cloud/logging/_helpers.py
|
entry_from_resource
|
def entry_from_resource(resource, client, loggers):
"""Detect correct entry type from resource and instantiate.
:type resource: dict
:param resource: One entry resource from API response.
:type client: :class:`~google.cloud.logging.client.Client`
:param client: Client that owns the log entry.
:type loggers: dict
:param loggers:
A mapping of logger fullnames -> loggers. If the logger
that owns the entry is not in ``loggers``, the entry
will have a newly-created logger.
:rtype: :class:`~google.cloud.logging.entries._BaseEntry`
:returns: The entry instance, constructed via the resource
"""
if "textPayload" in resource:
return TextEntry.from_api_repr(resource, client, loggers)
if "jsonPayload" in resource:
return StructEntry.from_api_repr(resource, client, loggers)
if "protoPayload" in resource:
return ProtobufEntry.from_api_repr(resource, client, loggers)
return LogEntry.from_api_repr(resource, client, loggers)
|
python
|
def entry_from_resource(resource, client, loggers):
"""Detect correct entry type from resource and instantiate.
:type resource: dict
:param resource: One entry resource from API response.
:type client: :class:`~google.cloud.logging.client.Client`
:param client: Client that owns the log entry.
:type loggers: dict
:param loggers:
A mapping of logger fullnames -> loggers. If the logger
that owns the entry is not in ``loggers``, the entry
will have a newly-created logger.
:rtype: :class:`~google.cloud.logging.entries._BaseEntry`
:returns: The entry instance, constructed via the resource
"""
if "textPayload" in resource:
return TextEntry.from_api_repr(resource, client, loggers)
if "jsonPayload" in resource:
return StructEntry.from_api_repr(resource, client, loggers)
if "protoPayload" in resource:
return ProtobufEntry.from_api_repr(resource, client, loggers)
return LogEntry.from_api_repr(resource, client, loggers)
|
[
"def",
"entry_from_resource",
"(",
"resource",
",",
"client",
",",
"loggers",
")",
":",
"if",
"\"textPayload\"",
"in",
"resource",
":",
"return",
"TextEntry",
".",
"from_api_repr",
"(",
"resource",
",",
"client",
",",
"loggers",
")",
"if",
"\"jsonPayload\"",
"in",
"resource",
":",
"return",
"StructEntry",
".",
"from_api_repr",
"(",
"resource",
",",
"client",
",",
"loggers",
")",
"if",
"\"protoPayload\"",
"in",
"resource",
":",
"return",
"ProtobufEntry",
".",
"from_api_repr",
"(",
"resource",
",",
"client",
",",
"loggers",
")",
"return",
"LogEntry",
".",
"from_api_repr",
"(",
"resource",
",",
"client",
",",
"loggers",
")"
] |
Detect correct entry type from resource and instantiate.
:type resource: dict
:param resource: One entry resource from API response.
:type client: :class:`~google.cloud.logging.client.Client`
:param client: Client that owns the log entry.
:type loggers: dict
:param loggers:
A mapping of logger fullnames -> loggers. If the logger
that owns the entry is not in ``loggers``, the entry
will have a newly-created logger.
:rtype: :class:`~google.cloud.logging.entries._BaseEntry`
:returns: The entry instance, constructed via the resource
|
[
"Detect",
"correct",
"entry",
"type",
"from",
"resource",
"and",
"instantiate",
"."
] |
85e80125a59cb10f8cb105f25ecc099e4b940b50
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging/_helpers.py#L28-L55
|
train
|
googleapis/google-cloud-python
|
logging/google/cloud/logging/_helpers.py
|
retrieve_metadata_server
|
def retrieve_metadata_server(metadata_key):
"""Retrieve the metadata key in the metadata server.
See: https://cloud.google.com/compute/docs/storing-retrieving-metadata
:type metadata_key: str
:param metadata_key: Key of the metadata which will form the url. You can
also supply query parameters after the metadata key.
e.g. "tags?alt=json"
:rtype: str
:returns: The value of the metadata key returned by the metadata server.
"""
url = METADATA_URL + metadata_key
try:
response = requests.get(url, headers=METADATA_HEADERS)
if response.status_code == requests.codes.ok:
return response.text
except requests.exceptions.RequestException:
# Ignore the exception, connection failed means the attribute does not
# exist in the metadata server.
pass
return None
|
python
|
def retrieve_metadata_server(metadata_key):
"""Retrieve the metadata key in the metadata server.
See: https://cloud.google.com/compute/docs/storing-retrieving-metadata
:type metadata_key: str
:param metadata_key: Key of the metadata which will form the url. You can
also supply query parameters after the metadata key.
e.g. "tags?alt=json"
:rtype: str
:returns: The value of the metadata key returned by the metadata server.
"""
url = METADATA_URL + metadata_key
try:
response = requests.get(url, headers=METADATA_HEADERS)
if response.status_code == requests.codes.ok:
return response.text
except requests.exceptions.RequestException:
# Ignore the exception, connection failed means the attribute does not
# exist in the metadata server.
pass
return None
|
[
"def",
"retrieve_metadata_server",
"(",
"metadata_key",
")",
":",
"url",
"=",
"METADATA_URL",
"+",
"metadata_key",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"METADATA_HEADERS",
")",
"if",
"response",
".",
"status_code",
"==",
"requests",
".",
"codes",
".",
"ok",
":",
"return",
"response",
".",
"text",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
":",
"# Ignore the exception, connection failed means the attribute does not",
"# exist in the metadata server.",
"pass",
"return",
"None"
] |
Retrieve the metadata key in the metadata server.
See: https://cloud.google.com/compute/docs/storing-retrieving-metadata
:type metadata_key: str
:param metadata_key: Key of the metadata which will form the url. You can
also supply query parameters after the metadata key.
e.g. "tags?alt=json"
:rtype: str
:returns: The value of the metadata key returned by the metadata server.
|
[
"Retrieve",
"the",
"metadata",
"key",
"in",
"the",
"metadata",
"server",
"."
] |
85e80125a59cb10f8cb105f25ecc099e4b940b50
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging/_helpers.py#L58-L84
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
model_loss
|
def model_loss(y, model, mean=True):
"""
Define loss of TF graph
:param y: correct labels
:param model: output of the model
:param mean: boolean indicating whether should return mean of loss
or vector of losses for each input of the batch
:return: return mean of loss if True, otherwise return vector with per
sample loss
"""
warnings.warn("This function is deprecated and will be removed on or after"
" 2019-04-05. Switch to cleverhans.train.train.")
op = model.op
if op.type == "Softmax":
logits, = op.inputs
else:
logits = model
out = softmax_cross_entropy_with_logits(logits=logits, labels=y)
if mean:
out = reduce_mean(out)
return out
|
python
|
def model_loss(y, model, mean=True):
"""
Define loss of TF graph
:param y: correct labels
:param model: output of the model
:param mean: boolean indicating whether should return mean of loss
or vector of losses for each input of the batch
:return: return mean of loss if True, otherwise return vector with per
sample loss
"""
warnings.warn("This function is deprecated and will be removed on or after"
" 2019-04-05. Switch to cleverhans.train.train.")
op = model.op
if op.type == "Softmax":
logits, = op.inputs
else:
logits = model
out = softmax_cross_entropy_with_logits(logits=logits, labels=y)
if mean:
out = reduce_mean(out)
return out
|
[
"def",
"model_loss",
"(",
"y",
",",
"model",
",",
"mean",
"=",
"True",
")",
":",
"warnings",
".",
"warn",
"(",
"\"This function is deprecated and will be removed on or after\"",
"\" 2019-04-05. Switch to cleverhans.train.train.\"",
")",
"op",
"=",
"model",
".",
"op",
"if",
"op",
".",
"type",
"==",
"\"Softmax\"",
":",
"logits",
",",
"=",
"op",
".",
"inputs",
"else",
":",
"logits",
"=",
"model",
"out",
"=",
"softmax_cross_entropy_with_logits",
"(",
"logits",
"=",
"logits",
",",
"labels",
"=",
"y",
")",
"if",
"mean",
":",
"out",
"=",
"reduce_mean",
"(",
"out",
")",
"return",
"out"
] |
Define loss of TF graph
:param y: correct labels
:param model: output of the model
:param mean: boolean indicating whether should return mean of loss
or vector of losses for each input of the batch
:return: return mean of loss if True, otherwise return vector with per
sample loss
|
[
"Define",
"loss",
"of",
"TF",
"graph",
":",
"param",
"y",
":",
"correct",
"labels",
":",
"param",
"model",
":",
"output",
"of",
"the",
"model",
":",
"param",
"mean",
":",
"boolean",
"indicating",
"whether",
"should",
"return",
"mean",
"of",
"loss",
"or",
"vector",
"of",
"losses",
"for",
"each",
"input",
"of",
"the",
"batch",
":",
"return",
":",
"return",
"mean",
"of",
"loss",
"if",
"True",
"otherwise",
"return",
"vector",
"with",
"per",
"sample",
"loss"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L28-L50
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
initialize_uninitialized_global_variables
|
def initialize_uninitialized_global_variables(sess):
"""
Only initializes the variables of a TensorFlow session that were not
already initialized.
:param sess: the TensorFlow session
:return:
"""
# List all global variables
global_vars = tf.global_variables()
# Find initialized status for all variables
is_var_init = [tf.is_variable_initialized(var) for var in global_vars]
is_initialized = sess.run(is_var_init)
# List all variables that were not initialized previously
not_initialized_vars = [var for (var, init) in
zip(global_vars, is_initialized) if not init]
# Initialize all uninitialized variables found, if any
if len(not_initialized_vars):
sess.run(tf.variables_initializer(not_initialized_vars))
|
python
|
def initialize_uninitialized_global_variables(sess):
"""
Only initializes the variables of a TensorFlow session that were not
already initialized.
:param sess: the TensorFlow session
:return:
"""
# List all global variables
global_vars = tf.global_variables()
# Find initialized status for all variables
is_var_init = [tf.is_variable_initialized(var) for var in global_vars]
is_initialized = sess.run(is_var_init)
# List all variables that were not initialized previously
not_initialized_vars = [var for (var, init) in
zip(global_vars, is_initialized) if not init]
# Initialize all uninitialized variables found, if any
if len(not_initialized_vars):
sess.run(tf.variables_initializer(not_initialized_vars))
|
[
"def",
"initialize_uninitialized_global_variables",
"(",
"sess",
")",
":",
"# List all global variables",
"global_vars",
"=",
"tf",
".",
"global_variables",
"(",
")",
"# Find initialized status for all variables",
"is_var_init",
"=",
"[",
"tf",
".",
"is_variable_initialized",
"(",
"var",
")",
"for",
"var",
"in",
"global_vars",
"]",
"is_initialized",
"=",
"sess",
".",
"run",
"(",
"is_var_init",
")",
"# List all variables that were not initialized previously",
"not_initialized_vars",
"=",
"[",
"var",
"for",
"(",
"var",
",",
"init",
")",
"in",
"zip",
"(",
"global_vars",
",",
"is_initialized",
")",
"if",
"not",
"init",
"]",
"# Initialize all uninitialized variables found, if any",
"if",
"len",
"(",
"not_initialized_vars",
")",
":",
"sess",
".",
"run",
"(",
"tf",
".",
"variables_initializer",
"(",
"not_initialized_vars",
")",
")"
] |
Only initializes the variables of a TensorFlow session that were not
already initialized.
:param sess: the TensorFlow session
:return:
|
[
"Only",
"initializes",
"the",
"variables",
"of",
"a",
"TensorFlow",
"session",
"that",
"were",
"not",
"already",
"initialized",
".",
":",
"param",
"sess",
":",
"the",
"TensorFlow",
"session",
":",
"return",
":"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L53-L73
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
train
|
def train(sess, loss, x, y, X_train, Y_train, save=False,
init_all=False, evaluate=None, feed=None, args=None,
rng=None, var_list=None, fprop_args=None, optimizer=None):
"""
Train a TF graph.
This function is deprecated. Prefer cleverhans.train.train when possible.
cleverhans.train.train supports multiple GPUs but this function is still
needed to support legacy models that do not support calling fprop more
than once.
:param sess: TF session to use when training the graph
:param loss: tensor, the model training loss.
:param x: input placeholder
:param y: output placeholder (for labels)
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param save: boolean controlling the save operation
:param init_all: (boolean) If set to true, all TF variables in the session
are (re)initialized, otherwise only previously
uninitialized variables are initialized before training.
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
If save is True, should also contain 'train_dir'
and 'filename'
:param rng: Instance of numpy.random.RandomState
:param var_list: Optional list of parameters to train.
:param fprop_args: dict, extra arguments to pass to fprop (loss and model).
:param optimizer: Optimizer to be used for training
:return: True if model trained
"""
warnings.warn("This function is deprecated and will be removed on or after"
" 2019-04-05. Switch to cleverhans.train.train.")
args = _ArgsWrapper(args or {})
fprop_args = fprop_args or {}
# Check that necessary arguments were given (see doc above)
assert args.nb_epochs, "Number of epochs was not given in args dict"
if optimizer is None:
assert args.learning_rate is not None, ("Learning rate was not given "
"in args dict")
assert args.batch_size, "Batch size was not given in args dict"
if save:
assert args.train_dir, "Directory for save was not given in args dict"
assert args.filename, "Filename for save was not given in args dict"
if rng is None:
rng = np.random.RandomState()
# Define optimizer
loss_value = loss.fprop(x, y, **fprop_args)
if optimizer is None:
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
else:
if not isinstance(optimizer, tf.train.Optimizer):
raise ValueError("optimizer object must be from a child class of "
"tf.train.Optimizer")
# Trigger update operations within the default graph (such as batch_norm).
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = optimizer.minimize(loss_value, var_list=var_list)
with sess.as_default():
if hasattr(tf, "global_variables_initializer"):
if init_all:
tf.global_variables_initializer().run()
else:
initialize_uninitialized_global_variables(sess)
else:
warnings.warn("Update your copy of tensorflow; future versions of "
"CleverHans may drop support for this version.")
sess.run(tf.initialize_all_variables())
for epoch in xrange(args.nb_epochs):
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
# Compute batch start and end indices
start, end = batch_indices(
batch, len(X_train), args.batch_size)
# Perform one training step
feed_dict = {x: X_train[index_shuf[start:end]],
y: Y_train[index_shuf[start:end]]}
if feed is not None:
feed_dict.update(feed)
train_step.run(feed_dict=feed_dict)
assert end >= len(X_train) # Check that all examples were used
cur = time.time()
_logger.info("Epoch " + str(epoch) + " took " +
str(cur - prev) + " seconds")
if evaluate is not None:
evaluate()
if save:
save_path = os.path.join(args.train_dir, args.filename)
saver = tf.train.Saver()
saver.save(sess, save_path)
_logger.info("Completed model training and saved at: " +
str(save_path))
else:
_logger.info("Completed model training.")
return True
|
python
|
def train(sess, loss, x, y, X_train, Y_train, save=False,
init_all=False, evaluate=None, feed=None, args=None,
rng=None, var_list=None, fprop_args=None, optimizer=None):
"""
Train a TF graph.
This function is deprecated. Prefer cleverhans.train.train when possible.
cleverhans.train.train supports multiple GPUs but this function is still
needed to support legacy models that do not support calling fprop more
than once.
:param sess: TF session to use when training the graph
:param loss: tensor, the model training loss.
:param x: input placeholder
:param y: output placeholder (for labels)
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param save: boolean controlling the save operation
:param init_all: (boolean) If set to true, all TF variables in the session
are (re)initialized, otherwise only previously
uninitialized variables are initialized before training.
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
If save is True, should also contain 'train_dir'
and 'filename'
:param rng: Instance of numpy.random.RandomState
:param var_list: Optional list of parameters to train.
:param fprop_args: dict, extra arguments to pass to fprop (loss and model).
:param optimizer: Optimizer to be used for training
:return: True if model trained
"""
warnings.warn("This function is deprecated and will be removed on or after"
" 2019-04-05. Switch to cleverhans.train.train.")
args = _ArgsWrapper(args or {})
fprop_args = fprop_args or {}
# Check that necessary arguments were given (see doc above)
assert args.nb_epochs, "Number of epochs was not given in args dict"
if optimizer is None:
assert args.learning_rate is not None, ("Learning rate was not given "
"in args dict")
assert args.batch_size, "Batch size was not given in args dict"
if save:
assert args.train_dir, "Directory for save was not given in args dict"
assert args.filename, "Filename for save was not given in args dict"
if rng is None:
rng = np.random.RandomState()
# Define optimizer
loss_value = loss.fprop(x, y, **fprop_args)
if optimizer is None:
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
else:
if not isinstance(optimizer, tf.train.Optimizer):
raise ValueError("optimizer object must be from a child class of "
"tf.train.Optimizer")
# Trigger update operations within the default graph (such as batch_norm).
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = optimizer.minimize(loss_value, var_list=var_list)
with sess.as_default():
if hasattr(tf, "global_variables_initializer"):
if init_all:
tf.global_variables_initializer().run()
else:
initialize_uninitialized_global_variables(sess)
else:
warnings.warn("Update your copy of tensorflow; future versions of "
"CleverHans may drop support for this version.")
sess.run(tf.initialize_all_variables())
for epoch in xrange(args.nb_epochs):
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
# Compute batch start and end indices
start, end = batch_indices(
batch, len(X_train), args.batch_size)
# Perform one training step
feed_dict = {x: X_train[index_shuf[start:end]],
y: Y_train[index_shuf[start:end]]}
if feed is not None:
feed_dict.update(feed)
train_step.run(feed_dict=feed_dict)
assert end >= len(X_train) # Check that all examples were used
cur = time.time()
_logger.info("Epoch " + str(epoch) + " took " +
str(cur - prev) + " seconds")
if evaluate is not None:
evaluate()
if save:
save_path = os.path.join(args.train_dir, args.filename)
saver = tf.train.Saver()
saver.save(sess, save_path)
_logger.info("Completed model training and saved at: " +
str(save_path))
else:
_logger.info("Completed model training.")
return True
|
[
"def",
"train",
"(",
"sess",
",",
"loss",
",",
"x",
",",
"y",
",",
"X_train",
",",
"Y_train",
",",
"save",
"=",
"False",
",",
"init_all",
"=",
"False",
",",
"evaluate",
"=",
"None",
",",
"feed",
"=",
"None",
",",
"args",
"=",
"None",
",",
"rng",
"=",
"None",
",",
"var_list",
"=",
"None",
",",
"fprop_args",
"=",
"None",
",",
"optimizer",
"=",
"None",
")",
":",
"warnings",
".",
"warn",
"(",
"\"This function is deprecated and will be removed on or after\"",
"\" 2019-04-05. Switch to cleverhans.train.train.\"",
")",
"args",
"=",
"_ArgsWrapper",
"(",
"args",
"or",
"{",
"}",
")",
"fprop_args",
"=",
"fprop_args",
"or",
"{",
"}",
"# Check that necessary arguments were given (see doc above)",
"assert",
"args",
".",
"nb_epochs",
",",
"\"Number of epochs was not given in args dict\"",
"if",
"optimizer",
"is",
"None",
":",
"assert",
"args",
".",
"learning_rate",
"is",
"not",
"None",
",",
"(",
"\"Learning rate was not given \"",
"\"in args dict\"",
")",
"assert",
"args",
".",
"batch_size",
",",
"\"Batch size was not given in args dict\"",
"if",
"save",
":",
"assert",
"args",
".",
"train_dir",
",",
"\"Directory for save was not given in args dict\"",
"assert",
"args",
".",
"filename",
",",
"\"Filename for save was not given in args dict\"",
"if",
"rng",
"is",
"None",
":",
"rng",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
")",
"# Define optimizer",
"loss_value",
"=",
"loss",
".",
"fprop",
"(",
"x",
",",
"y",
",",
"*",
"*",
"fprop_args",
")",
"if",
"optimizer",
"is",
"None",
":",
"optimizer",
"=",
"tf",
".",
"train",
".",
"AdamOptimizer",
"(",
"learning_rate",
"=",
"args",
".",
"learning_rate",
")",
"else",
":",
"if",
"not",
"isinstance",
"(",
"optimizer",
",",
"tf",
".",
"train",
".",
"Optimizer",
")",
":",
"raise",
"ValueError",
"(",
"\"optimizer object must be from a child class of \"",
"\"tf.train.Optimizer\"",
")",
"# Trigger update operations within the default graph (such as batch_norm).",
"with",
"tf",
".",
"control_dependencies",
"(",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"UPDATE_OPS",
")",
")",
":",
"train_step",
"=",
"optimizer",
".",
"minimize",
"(",
"loss_value",
",",
"var_list",
"=",
"var_list",
")",
"with",
"sess",
".",
"as_default",
"(",
")",
":",
"if",
"hasattr",
"(",
"tf",
",",
"\"global_variables_initializer\"",
")",
":",
"if",
"init_all",
":",
"tf",
".",
"global_variables_initializer",
"(",
")",
".",
"run",
"(",
")",
"else",
":",
"initialize_uninitialized_global_variables",
"(",
"sess",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"\"Update your copy of tensorflow; future versions of \"",
"\"CleverHans may drop support for this version.\"",
")",
"sess",
".",
"run",
"(",
"tf",
".",
"initialize_all_variables",
"(",
")",
")",
"for",
"epoch",
"in",
"xrange",
"(",
"args",
".",
"nb_epochs",
")",
":",
"# Compute number of batches",
"nb_batches",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"float",
"(",
"len",
"(",
"X_train",
")",
")",
"/",
"args",
".",
"batch_size",
")",
")",
"assert",
"nb_batches",
"*",
"args",
".",
"batch_size",
">=",
"len",
"(",
"X_train",
")",
"# Indices to shuffle training set",
"index_shuf",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"X_train",
")",
")",
")",
"rng",
".",
"shuffle",
"(",
"index_shuf",
")",
"prev",
"=",
"time",
".",
"time",
"(",
")",
"for",
"batch",
"in",
"range",
"(",
"nb_batches",
")",
":",
"# Compute batch start and end indices",
"start",
",",
"end",
"=",
"batch_indices",
"(",
"batch",
",",
"len",
"(",
"X_train",
")",
",",
"args",
".",
"batch_size",
")",
"# Perform one training step",
"feed_dict",
"=",
"{",
"x",
":",
"X_train",
"[",
"index_shuf",
"[",
"start",
":",
"end",
"]",
"]",
",",
"y",
":",
"Y_train",
"[",
"index_shuf",
"[",
"start",
":",
"end",
"]",
"]",
"}",
"if",
"feed",
"is",
"not",
"None",
":",
"feed_dict",
".",
"update",
"(",
"feed",
")",
"train_step",
".",
"run",
"(",
"feed_dict",
"=",
"feed_dict",
")",
"assert",
"end",
">=",
"len",
"(",
"X_train",
")",
"# Check that all examples were used",
"cur",
"=",
"time",
".",
"time",
"(",
")",
"_logger",
".",
"info",
"(",
"\"Epoch \"",
"+",
"str",
"(",
"epoch",
")",
"+",
"\" took \"",
"+",
"str",
"(",
"cur",
"-",
"prev",
")",
"+",
"\" seconds\"",
")",
"if",
"evaluate",
"is",
"not",
"None",
":",
"evaluate",
"(",
")",
"if",
"save",
":",
"save_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"train_dir",
",",
"args",
".",
"filename",
")",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
")",
"saver",
".",
"save",
"(",
"sess",
",",
"save_path",
")",
"_logger",
".",
"info",
"(",
"\"Completed model training and saved at: \"",
"+",
"str",
"(",
"save_path",
")",
")",
"else",
":",
"_logger",
".",
"info",
"(",
"\"Completed model training.\"",
")",
"return",
"True"
] |
Train a TF graph.
This function is deprecated. Prefer cleverhans.train.train when possible.
cleverhans.train.train supports multiple GPUs but this function is still
needed to support legacy models that do not support calling fprop more
than once.
:param sess: TF session to use when training the graph
:param loss: tensor, the model training loss.
:param x: input placeholder
:param y: output placeholder (for labels)
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param save: boolean controlling the save operation
:param init_all: (boolean) If set to true, all TF variables in the session
are (re)initialized, otherwise only previously
uninitialized variables are initialized before training.
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
If save is True, should also contain 'train_dir'
and 'filename'
:param rng: Instance of numpy.random.RandomState
:param var_list: Optional list of parameters to train.
:param fprop_args: dict, extra arguments to pass to fprop (loss and model).
:param optimizer: Optimizer to be used for training
:return: True if model trained
|
[
"Train",
"a",
"TF",
"graph",
".",
"This",
"function",
"is",
"deprecated",
".",
"Prefer",
"cleverhans",
".",
"train",
".",
"train",
"when",
"possible",
".",
"cleverhans",
".",
"train",
".",
"train",
"supports",
"multiple",
"GPUs",
"but",
"this",
"function",
"is",
"still",
"needed",
"to",
"support",
"legacy",
"models",
"that",
"do",
"not",
"support",
"calling",
"fprop",
"more",
"than",
"once",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L76-L193
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
model_eval
|
def model_eval(sess, x, y, predictions, X_test=None, Y_test=None,
feed=None, args=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:return: a float with the accuracy value
"""
global _model_eval_cache
args = _ArgsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument "
"must be supplied.")
# Define accuracy symbolically
key = (y, predictions)
if key in _model_eval_cache:
correct_preds = _model_eval_cache[key]
else:
correct_preds = tf.equal(tf.argmax(y, axis=-1),
tf.argmax(predictions, axis=-1))
_model_eval_cache[key] = correct_preds
# Init result var
accuracy = 0.0
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
dtype=X_test.dtype)
Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
dtype=Y_test.dtype)
for batch in range(nb_batches):
if batch % 100 == 0 and batch > 0:
_logger.debug("Batch " + str(batch))
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
# The last batch may be smaller than all others. This should not
# affect the accuarcy disproportionately.
cur_batch_size = end - start
X_cur[:cur_batch_size] = X_test[start:end]
Y_cur[:cur_batch_size] = Y_test[start:end]
feed_dict = {x: X_cur, y: Y_cur}
if feed is not None:
feed_dict.update(feed)
cur_corr_preds = correct_preds.eval(feed_dict=feed_dict)
accuracy += cur_corr_preds[:cur_batch_size].sum()
assert end >= len(X_test)
# Divide by number of examples to get final value
accuracy /= len(X_test)
return accuracy
|
python
|
def model_eval(sess, x, y, predictions, X_test=None, Y_test=None,
feed=None, args=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:return: a float with the accuracy value
"""
global _model_eval_cache
args = _ArgsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument "
"must be supplied.")
# Define accuracy symbolically
key = (y, predictions)
if key in _model_eval_cache:
correct_preds = _model_eval_cache[key]
else:
correct_preds = tf.equal(tf.argmax(y, axis=-1),
tf.argmax(predictions, axis=-1))
_model_eval_cache[key] = correct_preds
# Init result var
accuracy = 0.0
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
dtype=X_test.dtype)
Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
dtype=Y_test.dtype)
for batch in range(nb_batches):
if batch % 100 == 0 and batch > 0:
_logger.debug("Batch " + str(batch))
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
# The last batch may be smaller than all others. This should not
# affect the accuarcy disproportionately.
cur_batch_size = end - start
X_cur[:cur_batch_size] = X_test[start:end]
Y_cur[:cur_batch_size] = Y_test[start:end]
feed_dict = {x: X_cur, y: Y_cur}
if feed is not None:
feed_dict.update(feed)
cur_corr_preds = correct_preds.eval(feed_dict=feed_dict)
accuracy += cur_corr_preds[:cur_batch_size].sum()
assert end >= len(X_test)
# Divide by number of examples to get final value
accuracy /= len(X_test)
return accuracy
|
[
"def",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"predictions",
",",
"X_test",
"=",
"None",
",",
"Y_test",
"=",
"None",
",",
"feed",
"=",
"None",
",",
"args",
"=",
"None",
")",
":",
"global",
"_model_eval_cache",
"args",
"=",
"_ArgsWrapper",
"(",
"args",
"or",
"{",
"}",
")",
"assert",
"args",
".",
"batch_size",
",",
"\"Batch size was not given in args dict\"",
"if",
"X_test",
"is",
"None",
"or",
"Y_test",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"X_test argument and Y_test argument \"",
"\"must be supplied.\"",
")",
"# Define accuracy symbolically",
"key",
"=",
"(",
"y",
",",
"predictions",
")",
"if",
"key",
"in",
"_model_eval_cache",
":",
"correct_preds",
"=",
"_model_eval_cache",
"[",
"key",
"]",
"else",
":",
"correct_preds",
"=",
"tf",
".",
"equal",
"(",
"tf",
".",
"argmax",
"(",
"y",
",",
"axis",
"=",
"-",
"1",
")",
",",
"tf",
".",
"argmax",
"(",
"predictions",
",",
"axis",
"=",
"-",
"1",
")",
")",
"_model_eval_cache",
"[",
"key",
"]",
"=",
"correct_preds",
"# Init result var",
"accuracy",
"=",
"0.0",
"with",
"sess",
".",
"as_default",
"(",
")",
":",
"# Compute number of batches",
"nb_batches",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"float",
"(",
"len",
"(",
"X_test",
")",
")",
"/",
"args",
".",
"batch_size",
")",
")",
"assert",
"nb_batches",
"*",
"args",
".",
"batch_size",
">=",
"len",
"(",
"X_test",
")",
"X_cur",
"=",
"np",
".",
"zeros",
"(",
"(",
"args",
".",
"batch_size",
",",
")",
"+",
"X_test",
".",
"shape",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"X_test",
".",
"dtype",
")",
"Y_cur",
"=",
"np",
".",
"zeros",
"(",
"(",
"args",
".",
"batch_size",
",",
")",
"+",
"Y_test",
".",
"shape",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"Y_test",
".",
"dtype",
")",
"for",
"batch",
"in",
"range",
"(",
"nb_batches",
")",
":",
"if",
"batch",
"%",
"100",
"==",
"0",
"and",
"batch",
">",
"0",
":",
"_logger",
".",
"debug",
"(",
"\"Batch \"",
"+",
"str",
"(",
"batch",
")",
")",
"# Must not use the `batch_indices` function here, because it",
"# repeats some examples.",
"# It's acceptable to repeat during training, but not eval.",
"start",
"=",
"batch",
"*",
"args",
".",
"batch_size",
"end",
"=",
"min",
"(",
"len",
"(",
"X_test",
")",
",",
"start",
"+",
"args",
".",
"batch_size",
")",
"# The last batch may be smaller than all others. This should not",
"# affect the accuarcy disproportionately.",
"cur_batch_size",
"=",
"end",
"-",
"start",
"X_cur",
"[",
":",
"cur_batch_size",
"]",
"=",
"X_test",
"[",
"start",
":",
"end",
"]",
"Y_cur",
"[",
":",
"cur_batch_size",
"]",
"=",
"Y_test",
"[",
"start",
":",
"end",
"]",
"feed_dict",
"=",
"{",
"x",
":",
"X_cur",
",",
"y",
":",
"Y_cur",
"}",
"if",
"feed",
"is",
"not",
"None",
":",
"feed_dict",
".",
"update",
"(",
"feed",
")",
"cur_corr_preds",
"=",
"correct_preds",
".",
"eval",
"(",
"feed_dict",
"=",
"feed_dict",
")",
"accuracy",
"+=",
"cur_corr_preds",
"[",
":",
"cur_batch_size",
"]",
".",
"sum",
"(",
")",
"assert",
"end",
">=",
"len",
"(",
"X_test",
")",
"# Divide by number of examples to get final value",
"accuracy",
"/=",
"len",
"(",
"X_test",
")",
"return",
"accuracy"
] |
Compute the accuracy of a TF model on some data
:param sess: TF session to use
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:return: a float with the accuracy value
|
[
"Compute",
"the",
"accuracy",
"of",
"a",
"TF",
"model",
"on",
"some",
"data",
":",
"param",
"sess",
":",
"TF",
"session",
"to",
"use",
":",
"param",
"x",
":",
"input",
"placeholder",
":",
"param",
"y",
":",
"output",
"placeholder",
"(",
"for",
"labels",
")",
":",
"param",
"predictions",
":",
"model",
"output",
"predictions",
":",
"param",
"X_test",
":",
"numpy",
"array",
"with",
"training",
"inputs",
":",
"param",
"Y_test",
":",
"numpy",
"array",
"with",
"training",
"outputs",
":",
"param",
"feed",
":",
"An",
"optional",
"dictionary",
"that",
"is",
"appended",
"to",
"the",
"feeding",
"dictionary",
"before",
"the",
"session",
"runs",
".",
"Can",
"be",
"used",
"to",
"feed",
"the",
"learning",
"phase",
"of",
"a",
"Keras",
"model",
"for",
"instance",
".",
":",
"param",
"args",
":",
"dict",
"or",
"argparse",
"Namespace",
"object",
".",
"Should",
"contain",
"batch_size",
":",
"return",
":",
"a",
"float",
"with",
"the",
"accuracy",
"value"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L196-L269
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
batch_eval
|
def batch_eval(*args, **kwargs):
"""
Wrapper around deprecated function.
"""
# Inside function to avoid circular import
from cleverhans.evaluation import batch_eval as new_batch_eval
warnings.warn("batch_eval has moved to cleverhans.evaluation. "
"batch_eval will be removed from utils_tf on or after "
"2019-03-09.")
return new_batch_eval(*args, **kwargs)
|
python
|
def batch_eval(*args, **kwargs):
"""
Wrapper around deprecated function.
"""
# Inside function to avoid circular import
from cleverhans.evaluation import batch_eval as new_batch_eval
warnings.warn("batch_eval has moved to cleverhans.evaluation. "
"batch_eval will be removed from utils_tf on or after "
"2019-03-09.")
return new_batch_eval(*args, **kwargs)
|
[
"def",
"batch_eval",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Inside function to avoid circular import",
"from",
"cleverhans",
".",
"evaluation",
"import",
"batch_eval",
"as",
"new_batch_eval",
"warnings",
".",
"warn",
"(",
"\"batch_eval has moved to cleverhans.evaluation. \"",
"\"batch_eval will be removed from utils_tf on or after \"",
"\"2019-03-09.\"",
")",
"return",
"new_batch_eval",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Wrapper around deprecated function.
|
[
"Wrapper",
"around",
"deprecated",
"function",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L292-L301
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
model_argmax
|
def model_argmax(sess, x, predictions, samples, feed=None):
"""
Helper function that computes the current class prediction
:param sess: TF session
:param x: the input placeholder
:param predictions: the model's symbolic output
:param samples: numpy array with input samples (dims must match x)
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:return: the argmax output of predictions, i.e. the current predicted class
"""
feed_dict = {x: samples}
if feed is not None:
feed_dict.update(feed)
probabilities = sess.run(predictions, feed_dict)
if samples.shape[0] == 1:
return np.argmax(probabilities)
else:
return np.argmax(probabilities, axis=1)
|
python
|
def model_argmax(sess, x, predictions, samples, feed=None):
"""
Helper function that computes the current class prediction
:param sess: TF session
:param x: the input placeholder
:param predictions: the model's symbolic output
:param samples: numpy array with input samples (dims must match x)
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:return: the argmax output of predictions, i.e. the current predicted class
"""
feed_dict = {x: samples}
if feed is not None:
feed_dict.update(feed)
probabilities = sess.run(predictions, feed_dict)
if samples.shape[0] == 1:
return np.argmax(probabilities)
else:
return np.argmax(probabilities, axis=1)
|
[
"def",
"model_argmax",
"(",
"sess",
",",
"x",
",",
"predictions",
",",
"samples",
",",
"feed",
"=",
"None",
")",
":",
"feed_dict",
"=",
"{",
"x",
":",
"samples",
"}",
"if",
"feed",
"is",
"not",
"None",
":",
"feed_dict",
".",
"update",
"(",
"feed",
")",
"probabilities",
"=",
"sess",
".",
"run",
"(",
"predictions",
",",
"feed_dict",
")",
"if",
"samples",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
":",
"return",
"np",
".",
"argmax",
"(",
"probabilities",
")",
"else",
":",
"return",
"np",
".",
"argmax",
"(",
"probabilities",
",",
"axis",
"=",
"1",
")"
] |
Helper function that computes the current class prediction
:param sess: TF session
:param x: the input placeholder
:param predictions: the model's symbolic output
:param samples: numpy array with input samples (dims must match x)
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:return: the argmax output of predictions, i.e. the current predicted class
|
[
"Helper",
"function",
"that",
"computes",
"the",
"current",
"class",
"prediction",
":",
"param",
"sess",
":",
"TF",
"session",
":",
"param",
"x",
":",
"the",
"input",
"placeholder",
":",
"param",
"predictions",
":",
"the",
"model",
"s",
"symbolic",
"output",
":",
"param",
"samples",
":",
"numpy",
"array",
"with",
"input",
"samples",
"(",
"dims",
"must",
"match",
"x",
")",
":",
"param",
"feed",
":",
"An",
"optional",
"dictionary",
"that",
"is",
"appended",
"to",
"the",
"feeding",
"dictionary",
"before",
"the",
"session",
"runs",
".",
"Can",
"be",
"used",
"to",
"feed",
"the",
"learning",
"phase",
"of",
"a",
"Keras",
"model",
"for",
"instance",
".",
":",
"return",
":",
"the",
"argmax",
"output",
"of",
"predictions",
"i",
".",
"e",
".",
"the",
"current",
"predicted",
"class"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L304-L324
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
l2_batch_normalize
|
def l2_batch_normalize(x, epsilon=1e-12, scope=None):
"""
Helper function to normalize a batch of vectors.
:param x: the input placeholder
:param epsilon: stabilizes division
:return: the batch of l2 normalized vector
"""
with tf.name_scope(scope, "l2_batch_normalize") as name_scope:
x_shape = tf.shape(x)
x = tf.contrib.layers.flatten(x)
x /= (epsilon + reduce_max(tf.abs(x), 1, keepdims=True))
square_sum = reduce_sum(tf.square(x), 1, keepdims=True)
x_inv_norm = tf.rsqrt(np.sqrt(epsilon) + square_sum)
x_norm = tf.multiply(x, x_inv_norm)
return tf.reshape(x_norm, x_shape, name_scope)
|
python
|
def l2_batch_normalize(x, epsilon=1e-12, scope=None):
"""
Helper function to normalize a batch of vectors.
:param x: the input placeholder
:param epsilon: stabilizes division
:return: the batch of l2 normalized vector
"""
with tf.name_scope(scope, "l2_batch_normalize") as name_scope:
x_shape = tf.shape(x)
x = tf.contrib.layers.flatten(x)
x /= (epsilon + reduce_max(tf.abs(x), 1, keepdims=True))
square_sum = reduce_sum(tf.square(x), 1, keepdims=True)
x_inv_norm = tf.rsqrt(np.sqrt(epsilon) + square_sum)
x_norm = tf.multiply(x, x_inv_norm)
return tf.reshape(x_norm, x_shape, name_scope)
|
[
"def",
"l2_batch_normalize",
"(",
"x",
",",
"epsilon",
"=",
"1e-12",
",",
"scope",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"scope",
",",
"\"l2_batch_normalize\"",
")",
"as",
"name_scope",
":",
"x_shape",
"=",
"tf",
".",
"shape",
"(",
"x",
")",
"x",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"flatten",
"(",
"x",
")",
"x",
"/=",
"(",
"epsilon",
"+",
"reduce_max",
"(",
"tf",
".",
"abs",
"(",
"x",
")",
",",
"1",
",",
"keepdims",
"=",
"True",
")",
")",
"square_sum",
"=",
"reduce_sum",
"(",
"tf",
".",
"square",
"(",
"x",
")",
",",
"1",
",",
"keepdims",
"=",
"True",
")",
"x_inv_norm",
"=",
"tf",
".",
"rsqrt",
"(",
"np",
".",
"sqrt",
"(",
"epsilon",
")",
"+",
"square_sum",
")",
"x_norm",
"=",
"tf",
".",
"multiply",
"(",
"x",
",",
"x_inv_norm",
")",
"return",
"tf",
".",
"reshape",
"(",
"x_norm",
",",
"x_shape",
",",
"name_scope",
")"
] |
Helper function to normalize a batch of vectors.
:param x: the input placeholder
:param epsilon: stabilizes division
:return: the batch of l2 normalized vector
|
[
"Helper",
"function",
"to",
"normalize",
"a",
"batch",
"of",
"vectors",
".",
":",
"param",
"x",
":",
"the",
"input",
"placeholder",
":",
"param",
"epsilon",
":",
"stabilizes",
"division",
":",
"return",
":",
"the",
"batch",
"of",
"l2",
"normalized",
"vector"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L327-L341
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
kl_with_logits
|
def kl_with_logits(p_logits, q_logits, scope=None,
loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES):
"""Helper function to compute kl-divergence KL(p || q)
"""
with tf.name_scope(scope, "kl_divergence") as name:
p = tf.nn.softmax(p_logits)
p_log = tf.nn.log_softmax(p_logits)
q_log = tf.nn.log_softmax(q_logits)
loss = reduce_mean(reduce_sum(p * (p_log - q_log), axis=1),
name=name)
tf.losses.add_loss(loss, loss_collection)
return loss
|
python
|
def kl_with_logits(p_logits, q_logits, scope=None,
loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES):
"""Helper function to compute kl-divergence KL(p || q)
"""
with tf.name_scope(scope, "kl_divergence") as name:
p = tf.nn.softmax(p_logits)
p_log = tf.nn.log_softmax(p_logits)
q_log = tf.nn.log_softmax(q_logits)
loss = reduce_mean(reduce_sum(p * (p_log - q_log), axis=1),
name=name)
tf.losses.add_loss(loss, loss_collection)
return loss
|
[
"def",
"kl_with_logits",
"(",
"p_logits",
",",
"q_logits",
",",
"scope",
"=",
"None",
",",
"loss_collection",
"=",
"tf",
".",
"GraphKeys",
".",
"REGULARIZATION_LOSSES",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"scope",
",",
"\"kl_divergence\"",
")",
"as",
"name",
":",
"p",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"p_logits",
")",
"p_log",
"=",
"tf",
".",
"nn",
".",
"log_softmax",
"(",
"p_logits",
")",
"q_log",
"=",
"tf",
".",
"nn",
".",
"log_softmax",
"(",
"q_logits",
")",
"loss",
"=",
"reduce_mean",
"(",
"reduce_sum",
"(",
"p",
"*",
"(",
"p_log",
"-",
"q_log",
")",
",",
"axis",
"=",
"1",
")",
",",
"name",
"=",
"name",
")",
"tf",
".",
"losses",
".",
"add_loss",
"(",
"loss",
",",
"loss_collection",
")",
"return",
"loss"
] |
Helper function to compute kl-divergence KL(p || q)
|
[
"Helper",
"function",
"to",
"compute",
"kl",
"-",
"divergence",
"KL",
"(",
"p",
"||",
"q",
")"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L344-L355
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
clip_eta
|
def clip_eta(eta, ord, eps):
"""
Helper function to clip the perturbation to epsilon norm ball.
:param eta: A tensor with the current perturbation.
:param ord: Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param eps: Epsilon, bound of the perturbation.
"""
# Clipping perturbation eta to self.ord norm ball
if ord not in [np.inf, 1, 2]:
raise ValueError('ord must be np.inf, 1, or 2.')
reduc_ind = list(xrange(1, len(eta.get_shape())))
avoid_zero_div = 1e-12
if ord == np.inf:
eta = clip_by_value(eta, -eps, eps)
else:
if ord == 1:
raise NotImplementedError("The expression below is not the correct way"
" to project onto the L1 norm ball.")
norm = tf.maximum(avoid_zero_div,
reduce_sum(tf.abs(eta),
reduc_ind, keepdims=True))
elif ord == 2:
# avoid_zero_div must go inside sqrt to avoid a divide by zero
# in the gradient through this operation
norm = tf.sqrt(tf.maximum(avoid_zero_div,
reduce_sum(tf.square(eta),
reduc_ind,
keepdims=True)))
# We must *clip* to within the norm ball, not *normalize* onto the
# surface of the ball
factor = tf.minimum(1., div(eps, norm))
eta = eta * factor
return eta
|
python
|
def clip_eta(eta, ord, eps):
"""
Helper function to clip the perturbation to epsilon norm ball.
:param eta: A tensor with the current perturbation.
:param ord: Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param eps: Epsilon, bound of the perturbation.
"""
# Clipping perturbation eta to self.ord norm ball
if ord not in [np.inf, 1, 2]:
raise ValueError('ord must be np.inf, 1, or 2.')
reduc_ind = list(xrange(1, len(eta.get_shape())))
avoid_zero_div = 1e-12
if ord == np.inf:
eta = clip_by_value(eta, -eps, eps)
else:
if ord == 1:
raise NotImplementedError("The expression below is not the correct way"
" to project onto the L1 norm ball.")
norm = tf.maximum(avoid_zero_div,
reduce_sum(tf.abs(eta),
reduc_ind, keepdims=True))
elif ord == 2:
# avoid_zero_div must go inside sqrt to avoid a divide by zero
# in the gradient through this operation
norm = tf.sqrt(tf.maximum(avoid_zero_div,
reduce_sum(tf.square(eta),
reduc_ind,
keepdims=True)))
# We must *clip* to within the norm ball, not *normalize* onto the
# surface of the ball
factor = tf.minimum(1., div(eps, norm))
eta = eta * factor
return eta
|
[
"def",
"clip_eta",
"(",
"eta",
",",
"ord",
",",
"eps",
")",
":",
"# Clipping perturbation eta to self.ord norm ball",
"if",
"ord",
"not",
"in",
"[",
"np",
".",
"inf",
",",
"1",
",",
"2",
"]",
":",
"raise",
"ValueError",
"(",
"'ord must be np.inf, 1, or 2.'",
")",
"reduc_ind",
"=",
"list",
"(",
"xrange",
"(",
"1",
",",
"len",
"(",
"eta",
".",
"get_shape",
"(",
")",
")",
")",
")",
"avoid_zero_div",
"=",
"1e-12",
"if",
"ord",
"==",
"np",
".",
"inf",
":",
"eta",
"=",
"clip_by_value",
"(",
"eta",
",",
"-",
"eps",
",",
"eps",
")",
"else",
":",
"if",
"ord",
"==",
"1",
":",
"raise",
"NotImplementedError",
"(",
"\"The expression below is not the correct way\"",
"\" to project onto the L1 norm ball.\"",
")",
"norm",
"=",
"tf",
".",
"maximum",
"(",
"avoid_zero_div",
",",
"reduce_sum",
"(",
"tf",
".",
"abs",
"(",
"eta",
")",
",",
"reduc_ind",
",",
"keepdims",
"=",
"True",
")",
")",
"elif",
"ord",
"==",
"2",
":",
"# avoid_zero_div must go inside sqrt to avoid a divide by zero",
"# in the gradient through this operation",
"norm",
"=",
"tf",
".",
"sqrt",
"(",
"tf",
".",
"maximum",
"(",
"avoid_zero_div",
",",
"reduce_sum",
"(",
"tf",
".",
"square",
"(",
"eta",
")",
",",
"reduc_ind",
",",
"keepdims",
"=",
"True",
")",
")",
")",
"# We must *clip* to within the norm ball, not *normalize* onto the",
"# surface of the ball",
"factor",
"=",
"tf",
".",
"minimum",
"(",
"1.",
",",
"div",
"(",
"eps",
",",
"norm",
")",
")",
"eta",
"=",
"eta",
"*",
"factor",
"return",
"eta"
] |
Helper function to clip the perturbation to epsilon norm ball.
:param eta: A tensor with the current perturbation.
:param ord: Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param eps: Epsilon, bound of the perturbation.
|
[
"Helper",
"function",
"to",
"clip",
"the",
"perturbation",
"to",
"epsilon",
"norm",
"ball",
".",
":",
"param",
"eta",
":",
"A",
"tensor",
"with",
"the",
"current",
"perturbation",
".",
":",
"param",
"ord",
":",
"Order",
"of",
"the",
"norm",
"(",
"mimics",
"Numpy",
")",
".",
"Possible",
"values",
":",
"np",
".",
"inf",
"1",
"or",
"2",
".",
":",
"param",
"eps",
":",
"Epsilon",
"bound",
"of",
"the",
"perturbation",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L358-L392
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
infer_devices
|
def infer_devices(devices=None):
"""
Returns the list of devices that multi-replica code should use.
:param devices: list of string device names, e.g. ["/GPU:0"]
If the user specifies this, `infer_devices` checks that it is
valid, and then uses this user-specified list.
If the user does not specify this, infer_devices uses:
- All available GPUs, if there are any
- CPU otherwise
"""
if devices is None:
devices = get_available_gpus()
if len(devices) == 0:
warnings.warn("No GPUS, running on CPU")
# Set device to empy string, tf will figure out whether to use
# XLA or not, etc., automatically
devices = [""]
else:
assert len(devices) > 0
for device in devices:
assert isinstance(device, six.string_types), type(device)
return devices
|
python
|
def infer_devices(devices=None):
"""
Returns the list of devices that multi-replica code should use.
:param devices: list of string device names, e.g. ["/GPU:0"]
If the user specifies this, `infer_devices` checks that it is
valid, and then uses this user-specified list.
If the user does not specify this, infer_devices uses:
- All available GPUs, if there are any
- CPU otherwise
"""
if devices is None:
devices = get_available_gpus()
if len(devices) == 0:
warnings.warn("No GPUS, running on CPU")
# Set device to empy string, tf will figure out whether to use
# XLA or not, etc., automatically
devices = [""]
else:
assert len(devices) > 0
for device in devices:
assert isinstance(device, six.string_types), type(device)
return devices
|
[
"def",
"infer_devices",
"(",
"devices",
"=",
"None",
")",
":",
"if",
"devices",
"is",
"None",
":",
"devices",
"=",
"get_available_gpus",
"(",
")",
"if",
"len",
"(",
"devices",
")",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"No GPUS, running on CPU\"",
")",
"# Set device to empy string, tf will figure out whether to use",
"# XLA or not, etc., automatically",
"devices",
"=",
"[",
"\"\"",
"]",
"else",
":",
"assert",
"len",
"(",
"devices",
")",
">",
"0",
"for",
"device",
"in",
"devices",
":",
"assert",
"isinstance",
"(",
"device",
",",
"six",
".",
"string_types",
")",
",",
"type",
"(",
"device",
")",
"return",
"devices"
] |
Returns the list of devices that multi-replica code should use.
:param devices: list of string device names, e.g. ["/GPU:0"]
If the user specifies this, `infer_devices` checks that it is
valid, and then uses this user-specified list.
If the user does not specify this, infer_devices uses:
- All available GPUs, if there are any
- CPU otherwise
|
[
"Returns",
"the",
"list",
"of",
"devices",
"that",
"multi",
"-",
"replica",
"code",
"should",
"use",
".",
":",
"param",
"devices",
":",
"list",
"of",
"string",
"device",
"names",
"e",
".",
"g",
".",
"[",
"/",
"GPU",
":",
"0",
"]",
"If",
"the",
"user",
"specifies",
"this",
"infer_devices",
"checks",
"that",
"it",
"is",
"valid",
"and",
"then",
"uses",
"this",
"user",
"-",
"specified",
"list",
".",
"If",
"the",
"user",
"does",
"not",
"specify",
"this",
"infer_devices",
"uses",
":",
"-",
"All",
"available",
"GPUs",
"if",
"there",
"are",
"any",
"-",
"CPU",
"otherwise"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L502-L523
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
get_available_gpus
|
def get_available_gpus():
"""
Returns a list of string names of all available GPUs
"""
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
|
python
|
def get_available_gpus():
"""
Returns a list of string names of all available GPUs
"""
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
|
[
"def",
"get_available_gpus",
"(",
")",
":",
"local_device_protos",
"=",
"device_lib",
".",
"list_local_devices",
"(",
")",
"return",
"[",
"x",
".",
"name",
"for",
"x",
"in",
"local_device_protos",
"if",
"x",
".",
"device_type",
"==",
"'GPU'",
"]"
] |
Returns a list of string names of all available GPUs
|
[
"Returns",
"a",
"list",
"of",
"string",
"names",
"of",
"all",
"available",
"GPUs"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L526-L531
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
clip_by_value
|
def clip_by_value(t, clip_value_min, clip_value_max, name=None):
"""
A wrapper for clip_by_value that casts the clipping range if needed.
"""
def cast_clip(clip):
"""
Cast clipping range argument if needed.
"""
if t.dtype in (tf.float32, tf.float64):
if hasattr(clip, 'dtype'):
# Convert to tf dtype in case this is a numpy dtype
clip_dtype = tf.as_dtype(clip.dtype)
if clip_dtype != t.dtype:
return tf.cast(clip, t.dtype)
return clip
clip_value_min = cast_clip(clip_value_min)
clip_value_max = cast_clip(clip_value_max)
return tf.clip_by_value(t, clip_value_min, clip_value_max, name)
|
python
|
def clip_by_value(t, clip_value_min, clip_value_max, name=None):
"""
A wrapper for clip_by_value that casts the clipping range if needed.
"""
def cast_clip(clip):
"""
Cast clipping range argument if needed.
"""
if t.dtype in (tf.float32, tf.float64):
if hasattr(clip, 'dtype'):
# Convert to tf dtype in case this is a numpy dtype
clip_dtype = tf.as_dtype(clip.dtype)
if clip_dtype != t.dtype:
return tf.cast(clip, t.dtype)
return clip
clip_value_min = cast_clip(clip_value_min)
clip_value_max = cast_clip(clip_value_max)
return tf.clip_by_value(t, clip_value_min, clip_value_max, name)
|
[
"def",
"clip_by_value",
"(",
"t",
",",
"clip_value_min",
",",
"clip_value_max",
",",
"name",
"=",
"None",
")",
":",
"def",
"cast_clip",
"(",
"clip",
")",
":",
"\"\"\"\n Cast clipping range argument if needed.\n \"\"\"",
"if",
"t",
".",
"dtype",
"in",
"(",
"tf",
".",
"float32",
",",
"tf",
".",
"float64",
")",
":",
"if",
"hasattr",
"(",
"clip",
",",
"'dtype'",
")",
":",
"# Convert to tf dtype in case this is a numpy dtype",
"clip_dtype",
"=",
"tf",
".",
"as_dtype",
"(",
"clip",
".",
"dtype",
")",
"if",
"clip_dtype",
"!=",
"t",
".",
"dtype",
":",
"return",
"tf",
".",
"cast",
"(",
"clip",
",",
"t",
".",
"dtype",
")",
"return",
"clip",
"clip_value_min",
"=",
"cast_clip",
"(",
"clip_value_min",
")",
"clip_value_max",
"=",
"cast_clip",
"(",
"clip_value_max",
")",
"return",
"tf",
".",
"clip_by_value",
"(",
"t",
",",
"clip_value_min",
",",
"clip_value_max",
",",
"name",
")"
] |
A wrapper for clip_by_value that casts the clipping range if needed.
|
[
"A",
"wrapper",
"for",
"clip_by_value",
"that",
"casts",
"the",
"clipping",
"range",
"if",
"needed",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L540-L559
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
mul
|
def mul(a, b):
"""
A wrapper around tf multiplication that does more automatic casting of
the input.
"""
def multiply(a, b):
"""Multiplication"""
return a * b
return op_with_scalar_cast(a, b, multiply)
|
python
|
def mul(a, b):
"""
A wrapper around tf multiplication that does more automatic casting of
the input.
"""
def multiply(a, b):
"""Multiplication"""
return a * b
return op_with_scalar_cast(a, b, multiply)
|
[
"def",
"mul",
"(",
"a",
",",
"b",
")",
":",
"def",
"multiply",
"(",
"a",
",",
"b",
")",
":",
"\"\"\"Multiplication\"\"\"",
"return",
"a",
"*",
"b",
"return",
"op_with_scalar_cast",
"(",
"a",
",",
"b",
",",
"multiply",
")"
] |
A wrapper around tf multiplication that does more automatic casting of
the input.
|
[
"A",
"wrapper",
"around",
"tf",
"multiplication",
"that",
"does",
"more",
"automatic",
"casting",
"of",
"the",
"input",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L561-L569
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
div
|
def div(a, b):
"""
A wrapper around tf division that does more automatic casting of
the input.
"""
def divide(a, b):
"""Division"""
return a / b
return op_with_scalar_cast(a, b, divide)
|
python
|
def div(a, b):
"""
A wrapper around tf division that does more automatic casting of
the input.
"""
def divide(a, b):
"""Division"""
return a / b
return op_with_scalar_cast(a, b, divide)
|
[
"def",
"div",
"(",
"a",
",",
"b",
")",
":",
"def",
"divide",
"(",
"a",
",",
"b",
")",
":",
"\"\"\"Division\"\"\"",
"return",
"a",
"/",
"b",
"return",
"op_with_scalar_cast",
"(",
"a",
",",
"b",
",",
"divide",
")"
] |
A wrapper around tf division that does more automatic casting of
the input.
|
[
"A",
"wrapper",
"around",
"tf",
"division",
"that",
"does",
"more",
"automatic",
"casting",
"of",
"the",
"input",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L571-L579
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
op_with_scalar_cast
|
def op_with_scalar_cast(a, b, f):
"""
Builds the graph to compute f(a, b).
If only one of the two arguments is a scalar and the operation would
cause a type error without casting, casts the scalar to match the
tensor.
:param a: a tf-compatible array or scalar
:param b: a tf-compatible array or scalar
"""
try:
return f(a, b)
except (TypeError, ValueError):
pass
def is_scalar(x):
"""Return True if `x` is a scalar"""
if hasattr(x, "get_shape"):
shape = x.get_shape()
return shape.ndims == 0
if hasattr(x, "ndim"):
return x.ndim == 0
assert isinstance(x, (int, float))
return True
a_scalar = is_scalar(a)
b_scalar = is_scalar(b)
if a_scalar and b_scalar:
raise TypeError("Trying to apply " + str(f) + " with mixed types")
if a_scalar and not b_scalar:
a = tf.cast(a, b.dtype)
if b_scalar and not a_scalar:
b = tf.cast(b, a.dtype)
return f(a, b)
|
python
|
def op_with_scalar_cast(a, b, f):
"""
Builds the graph to compute f(a, b).
If only one of the two arguments is a scalar and the operation would
cause a type error without casting, casts the scalar to match the
tensor.
:param a: a tf-compatible array or scalar
:param b: a tf-compatible array or scalar
"""
try:
return f(a, b)
except (TypeError, ValueError):
pass
def is_scalar(x):
"""Return True if `x` is a scalar"""
if hasattr(x, "get_shape"):
shape = x.get_shape()
return shape.ndims == 0
if hasattr(x, "ndim"):
return x.ndim == 0
assert isinstance(x, (int, float))
return True
a_scalar = is_scalar(a)
b_scalar = is_scalar(b)
if a_scalar and b_scalar:
raise TypeError("Trying to apply " + str(f) + " with mixed types")
if a_scalar and not b_scalar:
a = tf.cast(a, b.dtype)
if b_scalar and not a_scalar:
b = tf.cast(b, a.dtype)
return f(a, b)
|
[
"def",
"op_with_scalar_cast",
"(",
"a",
",",
"b",
",",
"f",
")",
":",
"try",
":",
"return",
"f",
"(",
"a",
",",
"b",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"pass",
"def",
"is_scalar",
"(",
"x",
")",
":",
"\"\"\"Return True if `x` is a scalar\"\"\"",
"if",
"hasattr",
"(",
"x",
",",
"\"get_shape\"",
")",
":",
"shape",
"=",
"x",
".",
"get_shape",
"(",
")",
"return",
"shape",
".",
"ndims",
"==",
"0",
"if",
"hasattr",
"(",
"x",
",",
"\"ndim\"",
")",
":",
"return",
"x",
".",
"ndim",
"==",
"0",
"assert",
"isinstance",
"(",
"x",
",",
"(",
"int",
",",
"float",
")",
")",
"return",
"True",
"a_scalar",
"=",
"is_scalar",
"(",
"a",
")",
"b_scalar",
"=",
"is_scalar",
"(",
"b",
")",
"if",
"a_scalar",
"and",
"b_scalar",
":",
"raise",
"TypeError",
"(",
"\"Trying to apply \"",
"+",
"str",
"(",
"f",
")",
"+",
"\" with mixed types\"",
")",
"if",
"a_scalar",
"and",
"not",
"b_scalar",
":",
"a",
"=",
"tf",
".",
"cast",
"(",
"a",
",",
"b",
".",
"dtype",
")",
"if",
"b_scalar",
"and",
"not",
"a_scalar",
":",
"b",
"=",
"tf",
".",
"cast",
"(",
"b",
",",
"a",
".",
"dtype",
")",
"return",
"f",
"(",
"a",
",",
"b",
")"
] |
Builds the graph to compute f(a, b).
If only one of the two arguments is a scalar and the operation would
cause a type error without casting, casts the scalar to match the
tensor.
:param a: a tf-compatible array or scalar
:param b: a tf-compatible array or scalar
|
[
"Builds",
"the",
"graph",
"to",
"compute",
"f",
"(",
"a",
"b",
")",
".",
"If",
"only",
"one",
"of",
"the",
"two",
"arguments",
"is",
"a",
"scalar",
"and",
"the",
"operation",
"would",
"cause",
"a",
"type",
"error",
"without",
"casting",
"casts",
"the",
"scalar",
"to",
"match",
"the",
"tensor",
".",
":",
"param",
"a",
":",
"a",
"tf",
"-",
"compatible",
"array",
"or",
"scalar",
":",
"param",
"b",
":",
"a",
"tf",
"-",
"compatible",
"array",
"or",
"scalar"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L581-L618
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
jacobian_graph
|
def jacobian_graph(predictions, x, nb_classes):
"""
Create the Jacobian graph to be ran later in a TF session
:param predictions: the model's symbolic output (linear output,
pre-softmax)
:param x: the input placeholder
:param nb_classes: the number of classes the model has
:return:
"""
# This function will return a list of TF gradients
list_derivatives = []
# Define the TF graph elements to compute our derivatives for each class
for class_ind in xrange(nb_classes):
derivatives, = tf.gradients(predictions[:, class_ind], x)
list_derivatives.append(derivatives)
return list_derivatives
|
python
|
def jacobian_graph(predictions, x, nb_classes):
"""
Create the Jacobian graph to be ran later in a TF session
:param predictions: the model's symbolic output (linear output,
pre-softmax)
:param x: the input placeholder
:param nb_classes: the number of classes the model has
:return:
"""
# This function will return a list of TF gradients
list_derivatives = []
# Define the TF graph elements to compute our derivatives for each class
for class_ind in xrange(nb_classes):
derivatives, = tf.gradients(predictions[:, class_ind], x)
list_derivatives.append(derivatives)
return list_derivatives
|
[
"def",
"jacobian_graph",
"(",
"predictions",
",",
"x",
",",
"nb_classes",
")",
":",
"# This function will return a list of TF gradients",
"list_derivatives",
"=",
"[",
"]",
"# Define the TF graph elements to compute our derivatives for each class",
"for",
"class_ind",
"in",
"xrange",
"(",
"nb_classes",
")",
":",
"derivatives",
",",
"=",
"tf",
".",
"gradients",
"(",
"predictions",
"[",
":",
",",
"class_ind",
"]",
",",
"x",
")",
"list_derivatives",
".",
"append",
"(",
"derivatives",
")",
"return",
"list_derivatives"
] |
Create the Jacobian graph to be ran later in a TF session
:param predictions: the model's symbolic output (linear output,
pre-softmax)
:param x: the input placeholder
:param nb_classes: the number of classes the model has
:return:
|
[
"Create",
"the",
"Jacobian",
"graph",
"to",
"be",
"ran",
"later",
"in",
"a",
"TF",
"session",
":",
"param",
"predictions",
":",
"the",
"model",
"s",
"symbolic",
"output",
"(",
"linear",
"output",
"pre",
"-",
"softmax",
")",
":",
"param",
"x",
":",
"the",
"input",
"placeholder",
":",
"param",
"nb_classes",
":",
"the",
"number",
"of",
"classes",
"the",
"model",
"has",
":",
"return",
":"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L647-L665
|
train
|
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
jacobian_augmentation
|
def jacobian_augmentation(sess,
x,
X_sub_prev,
Y_sub,
grads,
lmbda,
aug_batch_size=512,
feed=None):
"""
Augment an adversary's substitute training set using the Jacobian
of a substitute model to generate new synthetic inputs.
See https://arxiv.org/abs/1602.02697 for more details.
See cleverhans_tutorials/mnist_blackbox.py for example use case
:param sess: TF session in which the substitute model is defined
:param x: input TF placeholder for the substitute model
:param X_sub_prev: substitute training data available to the adversary
at the previous iteration
:param Y_sub: substitute training labels available to the adversary
at the previous iteration
:param grads: Jacobian symbolic graph for the substitute
(should be generated using utils_tf.jacobian_graph)
:return: augmented substitute data (will need to be labeled by oracle)
"""
assert len(x.get_shape()) == len(np.shape(X_sub_prev))
assert len(grads) >= np.max(Y_sub) + 1
assert len(X_sub_prev) == len(Y_sub)
aug_batch_size = min(aug_batch_size, X_sub_prev.shape[0])
# Prepare input_shape (outside loop) for feeding dictionary below
input_shape = list(x.get_shape())
input_shape[0] = 1
# Create new numpy array for adversary training data
# with twice as many components on the first dimension.
X_sub = np.vstack([X_sub_prev, X_sub_prev])
num_samples = X_sub_prev.shape[0]
# Creating and processing as batch
for p_idxs in range(0, num_samples, aug_batch_size):
X_batch = X_sub_prev[p_idxs:p_idxs + aug_batch_size, ...]
feed_dict = {x: X_batch}
if feed is not None:
feed_dict.update(feed)
# Compute sign matrix
grad_val = sess.run([tf.sign(grads)], feed_dict=feed_dict)[0]
# Create new synthetic point in adversary substitute training set
for (indx, ind) in zip(range(p_idxs, p_idxs + X_batch.shape[0]),
range(X_batch.shape[0])):
X_sub[num_samples + indx] = (
X_batch[ind] + lmbda * grad_val[Y_sub[indx], ind, ...])
# Return augmented training data (needs to be labeled afterwards)
return X_sub
|
python
|
def jacobian_augmentation(sess,
x,
X_sub_prev,
Y_sub,
grads,
lmbda,
aug_batch_size=512,
feed=None):
"""
Augment an adversary's substitute training set using the Jacobian
of a substitute model to generate new synthetic inputs.
See https://arxiv.org/abs/1602.02697 for more details.
See cleverhans_tutorials/mnist_blackbox.py for example use case
:param sess: TF session in which the substitute model is defined
:param x: input TF placeholder for the substitute model
:param X_sub_prev: substitute training data available to the adversary
at the previous iteration
:param Y_sub: substitute training labels available to the adversary
at the previous iteration
:param grads: Jacobian symbolic graph for the substitute
(should be generated using utils_tf.jacobian_graph)
:return: augmented substitute data (will need to be labeled by oracle)
"""
assert len(x.get_shape()) == len(np.shape(X_sub_prev))
assert len(grads) >= np.max(Y_sub) + 1
assert len(X_sub_prev) == len(Y_sub)
aug_batch_size = min(aug_batch_size, X_sub_prev.shape[0])
# Prepare input_shape (outside loop) for feeding dictionary below
input_shape = list(x.get_shape())
input_shape[0] = 1
# Create new numpy array for adversary training data
# with twice as many components on the first dimension.
X_sub = np.vstack([X_sub_prev, X_sub_prev])
num_samples = X_sub_prev.shape[0]
# Creating and processing as batch
for p_idxs in range(0, num_samples, aug_batch_size):
X_batch = X_sub_prev[p_idxs:p_idxs + aug_batch_size, ...]
feed_dict = {x: X_batch}
if feed is not None:
feed_dict.update(feed)
# Compute sign matrix
grad_val = sess.run([tf.sign(grads)], feed_dict=feed_dict)[0]
# Create new synthetic point in adversary substitute training set
for (indx, ind) in zip(range(p_idxs, p_idxs + X_batch.shape[0]),
range(X_batch.shape[0])):
X_sub[num_samples + indx] = (
X_batch[ind] + lmbda * grad_val[Y_sub[indx], ind, ...])
# Return augmented training data (needs to be labeled afterwards)
return X_sub
|
[
"def",
"jacobian_augmentation",
"(",
"sess",
",",
"x",
",",
"X_sub_prev",
",",
"Y_sub",
",",
"grads",
",",
"lmbda",
",",
"aug_batch_size",
"=",
"512",
",",
"feed",
"=",
"None",
")",
":",
"assert",
"len",
"(",
"x",
".",
"get_shape",
"(",
")",
")",
"==",
"len",
"(",
"np",
".",
"shape",
"(",
"X_sub_prev",
")",
")",
"assert",
"len",
"(",
"grads",
")",
">=",
"np",
".",
"max",
"(",
"Y_sub",
")",
"+",
"1",
"assert",
"len",
"(",
"X_sub_prev",
")",
"==",
"len",
"(",
"Y_sub",
")",
"aug_batch_size",
"=",
"min",
"(",
"aug_batch_size",
",",
"X_sub_prev",
".",
"shape",
"[",
"0",
"]",
")",
"# Prepare input_shape (outside loop) for feeding dictionary below",
"input_shape",
"=",
"list",
"(",
"x",
".",
"get_shape",
"(",
")",
")",
"input_shape",
"[",
"0",
"]",
"=",
"1",
"# Create new numpy array for adversary training data",
"# with twice as many components on the first dimension.",
"X_sub",
"=",
"np",
".",
"vstack",
"(",
"[",
"X_sub_prev",
",",
"X_sub_prev",
"]",
")",
"num_samples",
"=",
"X_sub_prev",
".",
"shape",
"[",
"0",
"]",
"# Creating and processing as batch",
"for",
"p_idxs",
"in",
"range",
"(",
"0",
",",
"num_samples",
",",
"aug_batch_size",
")",
":",
"X_batch",
"=",
"X_sub_prev",
"[",
"p_idxs",
":",
"p_idxs",
"+",
"aug_batch_size",
",",
"...",
"]",
"feed_dict",
"=",
"{",
"x",
":",
"X_batch",
"}",
"if",
"feed",
"is",
"not",
"None",
":",
"feed_dict",
".",
"update",
"(",
"feed",
")",
"# Compute sign matrix",
"grad_val",
"=",
"sess",
".",
"run",
"(",
"[",
"tf",
".",
"sign",
"(",
"grads",
")",
"]",
",",
"feed_dict",
"=",
"feed_dict",
")",
"[",
"0",
"]",
"# Create new synthetic point in adversary substitute training set",
"for",
"(",
"indx",
",",
"ind",
")",
"in",
"zip",
"(",
"range",
"(",
"p_idxs",
",",
"p_idxs",
"+",
"X_batch",
".",
"shape",
"[",
"0",
"]",
")",
",",
"range",
"(",
"X_batch",
".",
"shape",
"[",
"0",
"]",
")",
")",
":",
"X_sub",
"[",
"num_samples",
"+",
"indx",
"]",
"=",
"(",
"X_batch",
"[",
"ind",
"]",
"+",
"lmbda",
"*",
"grad_val",
"[",
"Y_sub",
"[",
"indx",
"]",
",",
"ind",
",",
"...",
"]",
")",
"# Return augmented training data (needs to be labeled afterwards)",
"return",
"X_sub"
] |
Augment an adversary's substitute training set using the Jacobian
of a substitute model to generate new synthetic inputs.
See https://arxiv.org/abs/1602.02697 for more details.
See cleverhans_tutorials/mnist_blackbox.py for example use case
:param sess: TF session in which the substitute model is defined
:param x: input TF placeholder for the substitute model
:param X_sub_prev: substitute training data available to the adversary
at the previous iteration
:param Y_sub: substitute training labels available to the adversary
at the previous iteration
:param grads: Jacobian symbolic graph for the substitute
(should be generated using utils_tf.jacobian_graph)
:return: augmented substitute data (will need to be labeled by oracle)
|
[
"Augment",
"an",
"adversary",
"s",
"substitute",
"training",
"set",
"using",
"the",
"Jacobian",
"of",
"a",
"substitute",
"model",
"to",
"generate",
"new",
"synthetic",
"inputs",
".",
"See",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1602",
".",
"02697",
"for",
"more",
"details",
".",
"See",
"cleverhans_tutorials",
"/",
"mnist_blackbox",
".",
"py",
"for",
"example",
"use",
"case",
":",
"param",
"sess",
":",
"TF",
"session",
"in",
"which",
"the",
"substitute",
"model",
"is",
"defined",
":",
"param",
"x",
":",
"input",
"TF",
"placeholder",
"for",
"the",
"substitute",
"model",
":",
"param",
"X_sub_prev",
":",
"substitute",
"training",
"data",
"available",
"to",
"the",
"adversary",
"at",
"the",
"previous",
"iteration",
":",
"param",
"Y_sub",
":",
"substitute",
"training",
"labels",
"available",
"to",
"the",
"adversary",
"at",
"the",
"previous",
"iteration",
":",
"param",
"grads",
":",
"Jacobian",
"symbolic",
"graph",
"for",
"the",
"substitute",
"(",
"should",
"be",
"generated",
"using",
"utils_tf",
".",
"jacobian_graph",
")",
":",
"return",
":",
"augmented",
"substitute",
"data",
"(",
"will",
"need",
"to",
"be",
"labeled",
"by",
"oracle",
")"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L667-L722
|
train
|
tensorflow/cleverhans
|
cleverhans_tutorials/evaluate_pickled_model.py
|
evaluate_model
|
def evaluate_model(filepath,
train_start=0, train_end=60000, test_start=0,
test_end=10000, batch_size=128,
testing=False, num_threads=None):
"""
Run evaluation on a saved model
:param filepath: path to model to evaluate
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param batch_size: size of evaluation batches
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Set logging level to see debug information
set_log_level(logging.INFO)
# Create TF session
if num_threads:
config_args = dict(intra_op_parallelism_threads=1)
else:
config_args = {}
sess = tf.Session(config=tf.ConfigProto(**config_args))
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
x_train, y_train = mnist.get_set('train')
x_test, y_test = mnist.get_set('test')
# Use Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
eval_params = {'batch_size': batch_size}
fgsm_params = {
'eps': 0.3,
'clip_min': 0.,
'clip_max': 1.
}
def do_eval(preds, x_set, y_set, report_key, is_adv=None):
acc = model_eval(sess, x, y, preds, x_set, y_set, args=eval_params)
if is_adv is None:
report_text = None
elif is_adv:
report_text = 'adversarial'
else:
report_text = 'legitimate'
if report_text:
print('Test accuracy on %s examples: %0.4f' % (report_text, acc))
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
# Initialize the Fast Gradient Sign Method (FGSM) attack object and
# graph
fgsm = FastGradientMethod(model, sess=sess)
adv_x = fgsm.generate(x, **fgsm_params)
preds_adv = model.get_logits(adv_x)
preds = model.get_logits(x)
# Evaluate the accuracy of the MNIST model on adversarial examples
do_eval(preds, x_test, y_test, 'train_clean_train_clean_eval', False)
do_eval(preds_adv, x_test, y_test, 'clean_train_adv_eval', True)
|
python
|
def evaluate_model(filepath,
train_start=0, train_end=60000, test_start=0,
test_end=10000, batch_size=128,
testing=False, num_threads=None):
"""
Run evaluation on a saved model
:param filepath: path to model to evaluate
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param batch_size: size of evaluation batches
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Set logging level to see debug information
set_log_level(logging.INFO)
# Create TF session
if num_threads:
config_args = dict(intra_op_parallelism_threads=1)
else:
config_args = {}
sess = tf.Session(config=tf.ConfigProto(**config_args))
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
x_train, y_train = mnist.get_set('train')
x_test, y_test = mnist.get_set('test')
# Use Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
eval_params = {'batch_size': batch_size}
fgsm_params = {
'eps': 0.3,
'clip_min': 0.,
'clip_max': 1.
}
def do_eval(preds, x_set, y_set, report_key, is_adv=None):
acc = model_eval(sess, x, y, preds, x_set, y_set, args=eval_params)
if is_adv is None:
report_text = None
elif is_adv:
report_text = 'adversarial'
else:
report_text = 'legitimate'
if report_text:
print('Test accuracy on %s examples: %0.4f' % (report_text, acc))
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
# Initialize the Fast Gradient Sign Method (FGSM) attack object and
# graph
fgsm = FastGradientMethod(model, sess=sess)
adv_x = fgsm.generate(x, **fgsm_params)
preds_adv = model.get_logits(adv_x)
preds = model.get_logits(x)
# Evaluate the accuracy of the MNIST model on adversarial examples
do_eval(preds, x_test, y_test, 'train_clean_train_clean_eval', False)
do_eval(preds_adv, x_test, y_test, 'clean_train_adv_eval', True)
|
[
"def",
"evaluate_model",
"(",
"filepath",
",",
"train_start",
"=",
"0",
",",
"train_end",
"=",
"60000",
",",
"test_start",
"=",
"0",
",",
"test_end",
"=",
"10000",
",",
"batch_size",
"=",
"128",
",",
"testing",
"=",
"False",
",",
"num_threads",
"=",
"None",
")",
":",
"# Set TF random seed to improve reproducibility",
"tf",
".",
"set_random_seed",
"(",
"1234",
")",
"# Set logging level to see debug information",
"set_log_level",
"(",
"logging",
".",
"INFO",
")",
"# Create TF session",
"if",
"num_threads",
":",
"config_args",
"=",
"dict",
"(",
"intra_op_parallelism_threads",
"=",
"1",
")",
"else",
":",
"config_args",
"=",
"{",
"}",
"sess",
"=",
"tf",
".",
"Session",
"(",
"config",
"=",
"tf",
".",
"ConfigProto",
"(",
"*",
"*",
"config_args",
")",
")",
"# Get MNIST test data",
"mnist",
"=",
"MNIST",
"(",
"train_start",
"=",
"train_start",
",",
"train_end",
"=",
"train_end",
",",
"test_start",
"=",
"test_start",
",",
"test_end",
"=",
"test_end",
")",
"x_train",
",",
"y_train",
"=",
"mnist",
".",
"get_set",
"(",
"'train'",
")",
"x_test",
",",
"y_test",
"=",
"mnist",
".",
"get_set",
"(",
"'test'",
")",
"# Use Image Parameters",
"img_rows",
",",
"img_cols",
",",
"nchannels",
"=",
"x_train",
".",
"shape",
"[",
"1",
":",
"4",
"]",
"nb_classes",
"=",
"y_train",
".",
"shape",
"[",
"1",
"]",
"# Define input TF placeholder",
"x",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"shape",
"=",
"(",
"None",
",",
"img_rows",
",",
"img_cols",
",",
"nchannels",
")",
")",
"y",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"shape",
"=",
"(",
"None",
",",
"nb_classes",
")",
")",
"eval_params",
"=",
"{",
"'batch_size'",
":",
"batch_size",
"}",
"fgsm_params",
"=",
"{",
"'eps'",
":",
"0.3",
",",
"'clip_min'",
":",
"0.",
",",
"'clip_max'",
":",
"1.",
"}",
"def",
"do_eval",
"(",
"preds",
",",
"x_set",
",",
"y_set",
",",
"report_key",
",",
"is_adv",
"=",
"None",
")",
":",
"acc",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds",
",",
"x_set",
",",
"y_set",
",",
"args",
"=",
"eval_params",
")",
"if",
"is_adv",
"is",
"None",
":",
"report_text",
"=",
"None",
"elif",
"is_adv",
":",
"report_text",
"=",
"'adversarial'",
"else",
":",
"report_text",
"=",
"'legitimate'",
"if",
"report_text",
":",
"print",
"(",
"'Test accuracy on %s examples: %0.4f'",
"%",
"(",
"report_text",
",",
"acc",
")",
")",
"with",
"sess",
".",
"as_default",
"(",
")",
":",
"model",
"=",
"load",
"(",
"filepath",
")",
"assert",
"len",
"(",
"model",
".",
"get_params",
"(",
")",
")",
">",
"0",
"# Initialize the Fast Gradient Sign Method (FGSM) attack object and",
"# graph",
"fgsm",
"=",
"FastGradientMethod",
"(",
"model",
",",
"sess",
"=",
"sess",
")",
"adv_x",
"=",
"fgsm",
".",
"generate",
"(",
"x",
",",
"*",
"*",
"fgsm_params",
")",
"preds_adv",
"=",
"model",
".",
"get_logits",
"(",
"adv_x",
")",
"preds",
"=",
"model",
".",
"get_logits",
"(",
"x",
")",
"# Evaluate the accuracy of the MNIST model on adversarial examples",
"do_eval",
"(",
"preds",
",",
"x_test",
",",
"y_test",
",",
"'train_clean_train_clean_eval'",
",",
"False",
")",
"do_eval",
"(",
"preds_adv",
",",
"x_test",
",",
"y_test",
",",
"'clean_train_adv_eval'",
",",
"True",
")"
] |
Run evaluation on a saved model
:param filepath: path to model to evaluate
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param batch_size: size of evaluation batches
|
[
"Run",
"evaluation",
"on",
"a",
"saved",
"model",
":",
"param",
"filepath",
":",
"path",
"to",
"model",
"to",
"evaluate",
":",
"param",
"train_start",
":",
"index",
"of",
"first",
"training",
"set",
"example",
":",
"param",
"train_end",
":",
"index",
"of",
"last",
"training",
"set",
"example",
":",
"param",
"test_start",
":",
"index",
"of",
"first",
"test",
"set",
"example",
":",
"param",
"test_end",
":",
"index",
"of",
"last",
"test",
"set",
"example",
":",
"param",
"batch_size",
":",
"size",
"of",
"evaluation",
"batches"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans_tutorials/evaluate_pickled_model.py#L26-L99
|
train
|
tensorflow/cleverhans
|
examples/multigpu_advtrain/runner.py
|
RunnerMultiGPU.set_input
|
def set_input(self, X_batch=None):
"""
Preprocessing the inputs before calling session.run()
:param X_batch: A dictionary of inputs to the first sub-graph
:return: A tuple, `(fetches, fd)`, with `fetches` being a list of
Tensors to be fetches and `fd` the feed dictionary.
"""
inputs = self.inputs
outputs = self.outputs
# data for first gpu
fd = {}
if X_batch is not None:
self.next_vals[0] = OrderedDict()
for i, vname in enumerate(self.inputs[0]):
if vname in X_batch:
self.next_vals[0][vname] = X_batch[vname]
else:
self.next_vals[0][vname] = None
else:
self.next_vals[0] = None
# Set `feed_dict` for each GPU. If there is something to run for that
# GPU, collect outputs to be fetched.
fetches = []
self.active_gpus = []
for i in range(len(outputs)):
if self.next_vals[i] is None:
self.active_gpus += [False]
continue
self.active_gpus += [True]
for k in inputs[i]:
if self.next_vals[i][k] is not None:
fd[inputs[i][k]] = self.next_vals[i][k]
for k, v in outputs[i].iteritems():
fetches += [v]
fd.update(self.feed_dict)
return fetches, fd
|
python
|
def set_input(self, X_batch=None):
"""
Preprocessing the inputs before calling session.run()
:param X_batch: A dictionary of inputs to the first sub-graph
:return: A tuple, `(fetches, fd)`, with `fetches` being a list of
Tensors to be fetches and `fd` the feed dictionary.
"""
inputs = self.inputs
outputs = self.outputs
# data for first gpu
fd = {}
if X_batch is not None:
self.next_vals[0] = OrderedDict()
for i, vname in enumerate(self.inputs[0]):
if vname in X_batch:
self.next_vals[0][vname] = X_batch[vname]
else:
self.next_vals[0][vname] = None
else:
self.next_vals[0] = None
# Set `feed_dict` for each GPU. If there is something to run for that
# GPU, collect outputs to be fetched.
fetches = []
self.active_gpus = []
for i in range(len(outputs)):
if self.next_vals[i] is None:
self.active_gpus += [False]
continue
self.active_gpus += [True]
for k in inputs[i]:
if self.next_vals[i][k] is not None:
fd[inputs[i][k]] = self.next_vals[i][k]
for k, v in outputs[i].iteritems():
fetches += [v]
fd.update(self.feed_dict)
return fetches, fd
|
[
"def",
"set_input",
"(",
"self",
",",
"X_batch",
"=",
"None",
")",
":",
"inputs",
"=",
"self",
".",
"inputs",
"outputs",
"=",
"self",
".",
"outputs",
"# data for first gpu",
"fd",
"=",
"{",
"}",
"if",
"X_batch",
"is",
"not",
"None",
":",
"self",
".",
"next_vals",
"[",
"0",
"]",
"=",
"OrderedDict",
"(",
")",
"for",
"i",
",",
"vname",
"in",
"enumerate",
"(",
"self",
".",
"inputs",
"[",
"0",
"]",
")",
":",
"if",
"vname",
"in",
"X_batch",
":",
"self",
".",
"next_vals",
"[",
"0",
"]",
"[",
"vname",
"]",
"=",
"X_batch",
"[",
"vname",
"]",
"else",
":",
"self",
".",
"next_vals",
"[",
"0",
"]",
"[",
"vname",
"]",
"=",
"None",
"else",
":",
"self",
".",
"next_vals",
"[",
"0",
"]",
"=",
"None",
"# Set `feed_dict` for each GPU. If there is something to run for that",
"# GPU, collect outputs to be fetched.",
"fetches",
"=",
"[",
"]",
"self",
".",
"active_gpus",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"outputs",
")",
")",
":",
"if",
"self",
".",
"next_vals",
"[",
"i",
"]",
"is",
"None",
":",
"self",
".",
"active_gpus",
"+=",
"[",
"False",
"]",
"continue",
"self",
".",
"active_gpus",
"+=",
"[",
"True",
"]",
"for",
"k",
"in",
"inputs",
"[",
"i",
"]",
":",
"if",
"self",
".",
"next_vals",
"[",
"i",
"]",
"[",
"k",
"]",
"is",
"not",
"None",
":",
"fd",
"[",
"inputs",
"[",
"i",
"]",
"[",
"k",
"]",
"]",
"=",
"self",
".",
"next_vals",
"[",
"i",
"]",
"[",
"k",
"]",
"for",
"k",
",",
"v",
"in",
"outputs",
"[",
"i",
"]",
".",
"iteritems",
"(",
")",
":",
"fetches",
"+=",
"[",
"v",
"]",
"fd",
".",
"update",
"(",
"self",
".",
"feed_dict",
")",
"return",
"fetches",
",",
"fd"
] |
Preprocessing the inputs before calling session.run()
:param X_batch: A dictionary of inputs to the first sub-graph
:return: A tuple, `(fetches, fd)`, with `fetches` being a list of
Tensors to be fetches and `fd` the feed dictionary.
|
[
"Preprocessing",
"the",
"inputs",
"before",
"calling",
"session",
".",
"run",
"()"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/runner.py#L66-L106
|
train
|
tensorflow/cleverhans
|
examples/multigpu_advtrain/runner.py
|
RunnerMultiGPU.proc_fvals
|
def proc_fvals(self, fvals):
"""
Postprocess the outputs of the Session.run(). Move the outputs of
sub-graphs to next ones and return the output of the last sub-graph.
:param fvals: A list of fetched values returned by Session.run()
:return: A dictionary of fetched values returned by the last sub-graph.
"""
inputs = self.inputs
outputs = self.outputs
# Move data to the next sub-graph for the next step
cur = 0
for i in range(len(inputs)-1):
if not self.active_gpus[i]:
self.next_vals[i+1] = None
continue
self.next_vals[i+1] = OrderedDict()
for k in outputs[i]:
self.next_vals[i+1][k] = fvals[cur]
cur += 1
if i == 0:
self.next_vals[0] = None
# Return the output of the last sub-graph
last_fvals = OrderedDict()
if self.active_gpus[-1]:
assert cur+len(outputs[-1]) == len(fvals)
for k in outputs[-1]:
last_fvals[k] = fvals[cur]
cur += 1
return last_fvals
|
python
|
def proc_fvals(self, fvals):
"""
Postprocess the outputs of the Session.run(). Move the outputs of
sub-graphs to next ones and return the output of the last sub-graph.
:param fvals: A list of fetched values returned by Session.run()
:return: A dictionary of fetched values returned by the last sub-graph.
"""
inputs = self.inputs
outputs = self.outputs
# Move data to the next sub-graph for the next step
cur = 0
for i in range(len(inputs)-1):
if not self.active_gpus[i]:
self.next_vals[i+1] = None
continue
self.next_vals[i+1] = OrderedDict()
for k in outputs[i]:
self.next_vals[i+1][k] = fvals[cur]
cur += 1
if i == 0:
self.next_vals[0] = None
# Return the output of the last sub-graph
last_fvals = OrderedDict()
if self.active_gpus[-1]:
assert cur+len(outputs[-1]) == len(fvals)
for k in outputs[-1]:
last_fvals[k] = fvals[cur]
cur += 1
return last_fvals
|
[
"def",
"proc_fvals",
"(",
"self",
",",
"fvals",
")",
":",
"inputs",
"=",
"self",
".",
"inputs",
"outputs",
"=",
"self",
".",
"outputs",
"# Move data to the next sub-graph for the next step",
"cur",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"inputs",
")",
"-",
"1",
")",
":",
"if",
"not",
"self",
".",
"active_gpus",
"[",
"i",
"]",
":",
"self",
".",
"next_vals",
"[",
"i",
"+",
"1",
"]",
"=",
"None",
"continue",
"self",
".",
"next_vals",
"[",
"i",
"+",
"1",
"]",
"=",
"OrderedDict",
"(",
")",
"for",
"k",
"in",
"outputs",
"[",
"i",
"]",
":",
"self",
".",
"next_vals",
"[",
"i",
"+",
"1",
"]",
"[",
"k",
"]",
"=",
"fvals",
"[",
"cur",
"]",
"cur",
"+=",
"1",
"if",
"i",
"==",
"0",
":",
"self",
".",
"next_vals",
"[",
"0",
"]",
"=",
"None",
"# Return the output of the last sub-graph",
"last_fvals",
"=",
"OrderedDict",
"(",
")",
"if",
"self",
".",
"active_gpus",
"[",
"-",
"1",
"]",
":",
"assert",
"cur",
"+",
"len",
"(",
"outputs",
"[",
"-",
"1",
"]",
")",
"==",
"len",
"(",
"fvals",
")",
"for",
"k",
"in",
"outputs",
"[",
"-",
"1",
"]",
":",
"last_fvals",
"[",
"k",
"]",
"=",
"fvals",
"[",
"cur",
"]",
"cur",
"+=",
"1",
"return",
"last_fvals"
] |
Postprocess the outputs of the Session.run(). Move the outputs of
sub-graphs to next ones and return the output of the last sub-graph.
:param fvals: A list of fetched values returned by Session.run()
:return: A dictionary of fetched values returned by the last sub-graph.
|
[
"Postprocess",
"the",
"outputs",
"of",
"the",
"Session",
".",
"run",
"()",
".",
"Move",
"the",
"outputs",
"of",
"sub",
"-",
"graphs",
"to",
"next",
"ones",
"and",
"return",
"the",
"output",
"of",
"the",
"last",
"sub",
"-",
"graph",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/runner.py#L108-L139
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py
|
ImageBatchesBase._write_single_batch_images_internal
|
def _write_single_batch_images_internal(self, batch_id, client_batch):
"""Helper method to write images from single batch into datastore."""
client = self._datastore_client
batch_key = client.key(self._entity_kind_batches, batch_id)
for img_id, img in iteritems(self._data[batch_id]['images']):
img_entity = client.entity(
client.key(self._entity_kind_images, img_id, parent=batch_key))
for k, v in iteritems(img):
img_entity[k] = v
client_batch.put(img_entity)
|
python
|
def _write_single_batch_images_internal(self, batch_id, client_batch):
"""Helper method to write images from single batch into datastore."""
client = self._datastore_client
batch_key = client.key(self._entity_kind_batches, batch_id)
for img_id, img in iteritems(self._data[batch_id]['images']):
img_entity = client.entity(
client.key(self._entity_kind_images, img_id, parent=batch_key))
for k, v in iteritems(img):
img_entity[k] = v
client_batch.put(img_entity)
|
[
"def",
"_write_single_batch_images_internal",
"(",
"self",
",",
"batch_id",
",",
"client_batch",
")",
":",
"client",
"=",
"self",
".",
"_datastore_client",
"batch_key",
"=",
"client",
".",
"key",
"(",
"self",
".",
"_entity_kind_batches",
",",
"batch_id",
")",
"for",
"img_id",
",",
"img",
"in",
"iteritems",
"(",
"self",
".",
"_data",
"[",
"batch_id",
"]",
"[",
"'images'",
"]",
")",
":",
"img_entity",
"=",
"client",
".",
"entity",
"(",
"client",
".",
"key",
"(",
"self",
".",
"_entity_kind_images",
",",
"img_id",
",",
"parent",
"=",
"batch_key",
")",
")",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"img",
")",
":",
"img_entity",
"[",
"k",
"]",
"=",
"v",
"client_batch",
".",
"put",
"(",
"img_entity",
")"
] |
Helper method to write images from single batch into datastore.
|
[
"Helper",
"method",
"to",
"write",
"images",
"from",
"single",
"batch",
"into",
"datastore",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py#L72-L81
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py
|
ImageBatchesBase.write_to_datastore
|
def write_to_datastore(self):
"""Writes all image batches to the datastore."""
client = self._datastore_client
with client.no_transact_batch() as client_batch:
for batch_id, batch_data in iteritems(self._data):
batch_key = client.key(self._entity_kind_batches, batch_id)
batch_entity = client.entity(batch_key)
for k, v in iteritems(batch_data):
if k != 'images':
batch_entity[k] = v
client_batch.put(batch_entity)
self._write_single_batch_images_internal(batch_id, client_batch)
|
python
|
def write_to_datastore(self):
"""Writes all image batches to the datastore."""
client = self._datastore_client
with client.no_transact_batch() as client_batch:
for batch_id, batch_data in iteritems(self._data):
batch_key = client.key(self._entity_kind_batches, batch_id)
batch_entity = client.entity(batch_key)
for k, v in iteritems(batch_data):
if k != 'images':
batch_entity[k] = v
client_batch.put(batch_entity)
self._write_single_batch_images_internal(batch_id, client_batch)
|
[
"def",
"write_to_datastore",
"(",
"self",
")",
":",
"client",
"=",
"self",
".",
"_datastore_client",
"with",
"client",
".",
"no_transact_batch",
"(",
")",
"as",
"client_batch",
":",
"for",
"batch_id",
",",
"batch_data",
"in",
"iteritems",
"(",
"self",
".",
"_data",
")",
":",
"batch_key",
"=",
"client",
".",
"key",
"(",
"self",
".",
"_entity_kind_batches",
",",
"batch_id",
")",
"batch_entity",
"=",
"client",
".",
"entity",
"(",
"batch_key",
")",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"batch_data",
")",
":",
"if",
"k",
"!=",
"'images'",
":",
"batch_entity",
"[",
"k",
"]",
"=",
"v",
"client_batch",
".",
"put",
"(",
"batch_entity",
")",
"self",
".",
"_write_single_batch_images_internal",
"(",
"batch_id",
",",
"client_batch",
")"
] |
Writes all image batches to the datastore.
|
[
"Writes",
"all",
"image",
"batches",
"to",
"the",
"datastore",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py#L83-L94
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py
|
ImageBatchesBase.write_single_batch_images_to_datastore
|
def write_single_batch_images_to_datastore(self, batch_id):
"""Writes only images from one batch to the datastore."""
client = self._datastore_client
with client.no_transact_batch() as client_batch:
self._write_single_batch_images_internal(batch_id, client_batch)
|
python
|
def write_single_batch_images_to_datastore(self, batch_id):
"""Writes only images from one batch to the datastore."""
client = self._datastore_client
with client.no_transact_batch() as client_batch:
self._write_single_batch_images_internal(batch_id, client_batch)
|
[
"def",
"write_single_batch_images_to_datastore",
"(",
"self",
",",
"batch_id",
")",
":",
"client",
"=",
"self",
".",
"_datastore_client",
"with",
"client",
".",
"no_transact_batch",
"(",
")",
"as",
"client_batch",
":",
"self",
".",
"_write_single_batch_images_internal",
"(",
"batch_id",
",",
"client_batch",
")"
] |
Writes only images from one batch to the datastore.
|
[
"Writes",
"only",
"images",
"from",
"one",
"batch",
"to",
"the",
"datastore",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py#L96-L100
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py
|
ImageBatchesBase.init_from_datastore
|
def init_from_datastore(self):
"""Initializes batches by reading from the datastore."""
self._data = {}
for entity in self._datastore_client.query_fetch(
kind=self._entity_kind_batches):
batch_id = entity.key.flat_path[-1]
self._data[batch_id] = dict(entity)
self._data[batch_id]['images'] = {}
for entity in self._datastore_client.query_fetch(
kind=self._entity_kind_images):
batch_id = entity.key.flat_path[-3]
image_id = entity.key.flat_path[-1]
self._data[batch_id]['images'][image_id] = dict(entity)
|
python
|
def init_from_datastore(self):
"""Initializes batches by reading from the datastore."""
self._data = {}
for entity in self._datastore_client.query_fetch(
kind=self._entity_kind_batches):
batch_id = entity.key.flat_path[-1]
self._data[batch_id] = dict(entity)
self._data[batch_id]['images'] = {}
for entity in self._datastore_client.query_fetch(
kind=self._entity_kind_images):
batch_id = entity.key.flat_path[-3]
image_id = entity.key.flat_path[-1]
self._data[batch_id]['images'][image_id] = dict(entity)
|
[
"def",
"init_from_datastore",
"(",
"self",
")",
":",
"self",
".",
"_data",
"=",
"{",
"}",
"for",
"entity",
"in",
"self",
".",
"_datastore_client",
".",
"query_fetch",
"(",
"kind",
"=",
"self",
".",
"_entity_kind_batches",
")",
":",
"batch_id",
"=",
"entity",
".",
"key",
".",
"flat_path",
"[",
"-",
"1",
"]",
"self",
".",
"_data",
"[",
"batch_id",
"]",
"=",
"dict",
"(",
"entity",
")",
"self",
".",
"_data",
"[",
"batch_id",
"]",
"[",
"'images'",
"]",
"=",
"{",
"}",
"for",
"entity",
"in",
"self",
".",
"_datastore_client",
".",
"query_fetch",
"(",
"kind",
"=",
"self",
".",
"_entity_kind_images",
")",
":",
"batch_id",
"=",
"entity",
".",
"key",
".",
"flat_path",
"[",
"-",
"3",
"]",
"image_id",
"=",
"entity",
".",
"key",
".",
"flat_path",
"[",
"-",
"1",
"]",
"self",
".",
"_data",
"[",
"batch_id",
"]",
"[",
"'images'",
"]",
"[",
"image_id",
"]",
"=",
"dict",
"(",
"entity",
")"
] |
Initializes batches by reading from the datastore.
|
[
"Initializes",
"batches",
"by",
"reading",
"from",
"the",
"datastore",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py#L102-L114
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py
|
ImageBatchesBase.add_batch
|
def add_batch(self, batch_id, batch_properties=None):
"""Adds batch with give ID and list of properties."""
if batch_properties is None:
batch_properties = {}
if not isinstance(batch_properties, dict):
raise ValueError('batch_properties has to be dict, however it was: '
+ str(type(batch_properties)))
self._data[batch_id] = batch_properties.copy()
self._data[batch_id]['images'] = {}
|
python
|
def add_batch(self, batch_id, batch_properties=None):
"""Adds batch with give ID and list of properties."""
if batch_properties is None:
batch_properties = {}
if not isinstance(batch_properties, dict):
raise ValueError('batch_properties has to be dict, however it was: '
+ str(type(batch_properties)))
self._data[batch_id] = batch_properties.copy()
self._data[batch_id]['images'] = {}
|
[
"def",
"add_batch",
"(",
"self",
",",
"batch_id",
",",
"batch_properties",
"=",
"None",
")",
":",
"if",
"batch_properties",
"is",
"None",
":",
"batch_properties",
"=",
"{",
"}",
"if",
"not",
"isinstance",
"(",
"batch_properties",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"'batch_properties has to be dict, however it was: '",
"+",
"str",
"(",
"type",
"(",
"batch_properties",
")",
")",
")",
"self",
".",
"_data",
"[",
"batch_id",
"]",
"=",
"batch_properties",
".",
"copy",
"(",
")",
"self",
".",
"_data",
"[",
"batch_id",
"]",
"[",
"'images'",
"]",
"=",
"{",
"}"
] |
Adds batch with give ID and list of properties.
|
[
"Adds",
"batch",
"with",
"give",
"ID",
"and",
"list",
"of",
"properties",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py#L125-L133
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py
|
ImageBatchesBase.add_image
|
def add_image(self, batch_id, image_id, image_properties=None):
"""Adds image to given batch."""
if batch_id not in self._data:
raise KeyError('Batch with ID "{0}" does not exist'.format(batch_id))
if image_properties is None:
image_properties = {}
if not isinstance(image_properties, dict):
raise ValueError('image_properties has to be dict, however it was: '
+ str(type(image_properties)))
self._data[batch_id]['images'][image_id] = image_properties.copy()
|
python
|
def add_image(self, batch_id, image_id, image_properties=None):
"""Adds image to given batch."""
if batch_id not in self._data:
raise KeyError('Batch with ID "{0}" does not exist'.format(batch_id))
if image_properties is None:
image_properties = {}
if not isinstance(image_properties, dict):
raise ValueError('image_properties has to be dict, however it was: '
+ str(type(image_properties)))
self._data[batch_id]['images'][image_id] = image_properties.copy()
|
[
"def",
"add_image",
"(",
"self",
",",
"batch_id",
",",
"image_id",
",",
"image_properties",
"=",
"None",
")",
":",
"if",
"batch_id",
"not",
"in",
"self",
".",
"_data",
":",
"raise",
"KeyError",
"(",
"'Batch with ID \"{0}\" does not exist'",
".",
"format",
"(",
"batch_id",
")",
")",
"if",
"image_properties",
"is",
"None",
":",
"image_properties",
"=",
"{",
"}",
"if",
"not",
"isinstance",
"(",
"image_properties",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"'image_properties has to be dict, however it was: '",
"+",
"str",
"(",
"type",
"(",
"image_properties",
")",
")",
")",
"self",
".",
"_data",
"[",
"batch_id",
"]",
"[",
"'images'",
"]",
"[",
"image_id",
"]",
"=",
"image_properties",
".",
"copy",
"(",
")"
] |
Adds image to given batch.
|
[
"Adds",
"image",
"to",
"given",
"batch",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py#L135-L144
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py
|
DatasetBatches._read_image_list
|
def _read_image_list(self, skip_image_ids=None):
"""Reads list of dataset images from the datastore."""
if skip_image_ids is None:
skip_image_ids = []
images = self._storage_client.list_blobs(
prefix=os.path.join('dataset', self._dataset_name) + '/')
zip_files = [i for i in images if i.endswith('.zip')]
if len(zip_files) == 1:
# we have a zip archive with images
zip_name = zip_files[0]
logging.info('Reading list of images from zip file %s', zip_name)
blob = self._storage_client.get_blob(zip_name)
buf = BytesIO()
logging.info('Downloading zip')
blob.download_to_file(buf)
buf.seek(0)
logging.info('Reading content of the zip')
with zipfile.ZipFile(buf) as f:
images = [os.path.join(zip_name, os.path.basename(n))
for n in f.namelist() if n.endswith('.png')]
buf.close()
logging.info('Found %d images', len(images))
else:
# we have just a directory with images, filter non-PNG files
logging.info('Reading list of images from png files in storage')
images = [i for i in images if i.endswith('.png')]
logging.info('Found %d images', len(images))
# filter images which should be skipped
images = [i for i in images
if os.path.basename(i)[:-4] not in skip_image_ids]
# assign IDs to images
images = [(DATASET_IMAGE_ID_PATTERN.format(idx), i)
for idx, i in enumerate(sorted(images))]
return images
|
python
|
def _read_image_list(self, skip_image_ids=None):
"""Reads list of dataset images from the datastore."""
if skip_image_ids is None:
skip_image_ids = []
images = self._storage_client.list_blobs(
prefix=os.path.join('dataset', self._dataset_name) + '/')
zip_files = [i for i in images if i.endswith('.zip')]
if len(zip_files) == 1:
# we have a zip archive with images
zip_name = zip_files[0]
logging.info('Reading list of images from zip file %s', zip_name)
blob = self._storage_client.get_blob(zip_name)
buf = BytesIO()
logging.info('Downloading zip')
blob.download_to_file(buf)
buf.seek(0)
logging.info('Reading content of the zip')
with zipfile.ZipFile(buf) as f:
images = [os.path.join(zip_name, os.path.basename(n))
for n in f.namelist() if n.endswith('.png')]
buf.close()
logging.info('Found %d images', len(images))
else:
# we have just a directory with images, filter non-PNG files
logging.info('Reading list of images from png files in storage')
images = [i for i in images if i.endswith('.png')]
logging.info('Found %d images', len(images))
# filter images which should be skipped
images = [i for i in images
if os.path.basename(i)[:-4] not in skip_image_ids]
# assign IDs to images
images = [(DATASET_IMAGE_ID_PATTERN.format(idx), i)
for idx, i in enumerate(sorted(images))]
return images
|
[
"def",
"_read_image_list",
"(",
"self",
",",
"skip_image_ids",
"=",
"None",
")",
":",
"if",
"skip_image_ids",
"is",
"None",
":",
"skip_image_ids",
"=",
"[",
"]",
"images",
"=",
"self",
".",
"_storage_client",
".",
"list_blobs",
"(",
"prefix",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'dataset'",
",",
"self",
".",
"_dataset_name",
")",
"+",
"'/'",
")",
"zip_files",
"=",
"[",
"i",
"for",
"i",
"in",
"images",
"if",
"i",
".",
"endswith",
"(",
"'.zip'",
")",
"]",
"if",
"len",
"(",
"zip_files",
")",
"==",
"1",
":",
"# we have a zip archive with images",
"zip_name",
"=",
"zip_files",
"[",
"0",
"]",
"logging",
".",
"info",
"(",
"'Reading list of images from zip file %s'",
",",
"zip_name",
")",
"blob",
"=",
"self",
".",
"_storage_client",
".",
"get_blob",
"(",
"zip_name",
")",
"buf",
"=",
"BytesIO",
"(",
")",
"logging",
".",
"info",
"(",
"'Downloading zip'",
")",
"blob",
".",
"download_to_file",
"(",
"buf",
")",
"buf",
".",
"seek",
"(",
"0",
")",
"logging",
".",
"info",
"(",
"'Reading content of the zip'",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"buf",
")",
"as",
"f",
":",
"images",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"zip_name",
",",
"os",
".",
"path",
".",
"basename",
"(",
"n",
")",
")",
"for",
"n",
"in",
"f",
".",
"namelist",
"(",
")",
"if",
"n",
".",
"endswith",
"(",
"'.png'",
")",
"]",
"buf",
".",
"close",
"(",
")",
"logging",
".",
"info",
"(",
"'Found %d images'",
",",
"len",
"(",
"images",
")",
")",
"else",
":",
"# we have just a directory with images, filter non-PNG files",
"logging",
".",
"info",
"(",
"'Reading list of images from png files in storage'",
")",
"images",
"=",
"[",
"i",
"for",
"i",
"in",
"images",
"if",
"i",
".",
"endswith",
"(",
"'.png'",
")",
"]",
"logging",
".",
"info",
"(",
"'Found %d images'",
",",
"len",
"(",
"images",
")",
")",
"# filter images which should be skipped",
"images",
"=",
"[",
"i",
"for",
"i",
"in",
"images",
"if",
"os",
".",
"path",
".",
"basename",
"(",
"i",
")",
"[",
":",
"-",
"4",
"]",
"not",
"in",
"skip_image_ids",
"]",
"# assign IDs to images",
"images",
"=",
"[",
"(",
"DATASET_IMAGE_ID_PATTERN",
".",
"format",
"(",
"idx",
")",
",",
"i",
")",
"for",
"idx",
",",
"i",
"in",
"enumerate",
"(",
"sorted",
"(",
"images",
")",
")",
"]",
"return",
"images"
] |
Reads list of dataset images from the datastore.
|
[
"Reads",
"list",
"of",
"dataset",
"images",
"from",
"the",
"datastore",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py#L189-L222
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py
|
DatasetBatches.init_from_storage_write_to_datastore
|
def init_from_storage_write_to_datastore(self,
batch_size=100,
allowed_epsilon=None,
skip_image_ids=None,
max_num_images=None):
"""Initializes dataset batches from the list of images in the datastore.
Args:
batch_size: batch size
allowed_epsilon: list of allowed epsilon or None to use default
skip_image_ids: list of image ids to skip
max_num_images: maximum number of images to read
"""
if allowed_epsilon is None:
allowed_epsilon = copy.copy(DEFAULT_EPSILON)
# init dataset batches from data in storage
self._dataset_batches = {}
# read all blob names from storage
images = self._read_image_list(skip_image_ids)
if max_num_images:
images = images[:max_num_images]
for batch_idx, batch_start in enumerate(range(0, len(images), batch_size)):
batch = images[batch_start:batch_start+batch_size]
batch_id = DATASET_BATCH_ID_PATTERN.format(batch_idx)
batch_epsilon = allowed_epsilon[batch_idx % len(allowed_epsilon)]
self.add_batch(batch_id, {'epsilon': batch_epsilon})
for image_id, image_path in batch:
self.add_image(batch_id, image_id,
{'dataset_image_id': os.path.basename(image_path)[:-4],
'image_path': image_path})
# write data to datastore
self.write_to_datastore()
|
python
|
def init_from_storage_write_to_datastore(self,
batch_size=100,
allowed_epsilon=None,
skip_image_ids=None,
max_num_images=None):
"""Initializes dataset batches from the list of images in the datastore.
Args:
batch_size: batch size
allowed_epsilon: list of allowed epsilon or None to use default
skip_image_ids: list of image ids to skip
max_num_images: maximum number of images to read
"""
if allowed_epsilon is None:
allowed_epsilon = copy.copy(DEFAULT_EPSILON)
# init dataset batches from data in storage
self._dataset_batches = {}
# read all blob names from storage
images = self._read_image_list(skip_image_ids)
if max_num_images:
images = images[:max_num_images]
for batch_idx, batch_start in enumerate(range(0, len(images), batch_size)):
batch = images[batch_start:batch_start+batch_size]
batch_id = DATASET_BATCH_ID_PATTERN.format(batch_idx)
batch_epsilon = allowed_epsilon[batch_idx % len(allowed_epsilon)]
self.add_batch(batch_id, {'epsilon': batch_epsilon})
for image_id, image_path in batch:
self.add_image(batch_id, image_id,
{'dataset_image_id': os.path.basename(image_path)[:-4],
'image_path': image_path})
# write data to datastore
self.write_to_datastore()
|
[
"def",
"init_from_storage_write_to_datastore",
"(",
"self",
",",
"batch_size",
"=",
"100",
",",
"allowed_epsilon",
"=",
"None",
",",
"skip_image_ids",
"=",
"None",
",",
"max_num_images",
"=",
"None",
")",
":",
"if",
"allowed_epsilon",
"is",
"None",
":",
"allowed_epsilon",
"=",
"copy",
".",
"copy",
"(",
"DEFAULT_EPSILON",
")",
"# init dataset batches from data in storage",
"self",
".",
"_dataset_batches",
"=",
"{",
"}",
"# read all blob names from storage",
"images",
"=",
"self",
".",
"_read_image_list",
"(",
"skip_image_ids",
")",
"if",
"max_num_images",
":",
"images",
"=",
"images",
"[",
":",
"max_num_images",
"]",
"for",
"batch_idx",
",",
"batch_start",
"in",
"enumerate",
"(",
"range",
"(",
"0",
",",
"len",
"(",
"images",
")",
",",
"batch_size",
")",
")",
":",
"batch",
"=",
"images",
"[",
"batch_start",
":",
"batch_start",
"+",
"batch_size",
"]",
"batch_id",
"=",
"DATASET_BATCH_ID_PATTERN",
".",
"format",
"(",
"batch_idx",
")",
"batch_epsilon",
"=",
"allowed_epsilon",
"[",
"batch_idx",
"%",
"len",
"(",
"allowed_epsilon",
")",
"]",
"self",
".",
"add_batch",
"(",
"batch_id",
",",
"{",
"'epsilon'",
":",
"batch_epsilon",
"}",
")",
"for",
"image_id",
",",
"image_path",
"in",
"batch",
":",
"self",
".",
"add_image",
"(",
"batch_id",
",",
"image_id",
",",
"{",
"'dataset_image_id'",
":",
"os",
".",
"path",
".",
"basename",
"(",
"image_path",
")",
"[",
":",
"-",
"4",
"]",
",",
"'image_path'",
":",
"image_path",
"}",
")",
"# write data to datastore",
"self",
".",
"write_to_datastore",
"(",
")"
] |
Initializes dataset batches from the list of images in the datastore.
Args:
batch_size: batch size
allowed_epsilon: list of allowed epsilon or None to use default
skip_image_ids: list of image ids to skip
max_num_images: maximum number of images to read
|
[
"Initializes",
"dataset",
"batches",
"from",
"the",
"list",
"of",
"images",
"in",
"the",
"datastore",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py#L224-L255
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py
|
AversarialBatches.init_from_dataset_and_submissions_write_to_datastore
|
def init_from_dataset_and_submissions_write_to_datastore(
self, dataset_batches, attack_submission_ids):
"""Init list of adversarial batches from dataset batches and submissions.
Args:
dataset_batches: instances of DatasetBatches
attack_submission_ids: iterable with IDs of all (targeted and nontargeted)
attack submissions, could be obtains as
CompetitionSubmissions.get_all_attack_ids()
"""
batches_x_attacks = itertools.product(dataset_batches.data.keys(),
attack_submission_ids)
for idx, (dataset_batch_id, attack_id) in enumerate(batches_x_attacks):
adv_batch_id = ADVERSARIAL_BATCH_ID_PATTERN.format(idx)
self.add_batch(adv_batch_id,
{'dataset_batch_id': dataset_batch_id,
'submission_id': attack_id})
self.write_to_datastore()
|
python
|
def init_from_dataset_and_submissions_write_to_datastore(
self, dataset_batches, attack_submission_ids):
"""Init list of adversarial batches from dataset batches and submissions.
Args:
dataset_batches: instances of DatasetBatches
attack_submission_ids: iterable with IDs of all (targeted and nontargeted)
attack submissions, could be obtains as
CompetitionSubmissions.get_all_attack_ids()
"""
batches_x_attacks = itertools.product(dataset_batches.data.keys(),
attack_submission_ids)
for idx, (dataset_batch_id, attack_id) in enumerate(batches_x_attacks):
adv_batch_id = ADVERSARIAL_BATCH_ID_PATTERN.format(idx)
self.add_batch(adv_batch_id,
{'dataset_batch_id': dataset_batch_id,
'submission_id': attack_id})
self.write_to_datastore()
|
[
"def",
"init_from_dataset_and_submissions_write_to_datastore",
"(",
"self",
",",
"dataset_batches",
",",
"attack_submission_ids",
")",
":",
"batches_x_attacks",
"=",
"itertools",
".",
"product",
"(",
"dataset_batches",
".",
"data",
".",
"keys",
"(",
")",
",",
"attack_submission_ids",
")",
"for",
"idx",
",",
"(",
"dataset_batch_id",
",",
"attack_id",
")",
"in",
"enumerate",
"(",
"batches_x_attacks",
")",
":",
"adv_batch_id",
"=",
"ADVERSARIAL_BATCH_ID_PATTERN",
".",
"format",
"(",
"idx",
")",
"self",
".",
"add_batch",
"(",
"adv_batch_id",
",",
"{",
"'dataset_batch_id'",
":",
"dataset_batch_id",
",",
"'submission_id'",
":",
"attack_id",
"}",
")",
"self",
".",
"write_to_datastore",
"(",
")"
] |
Init list of adversarial batches from dataset batches and submissions.
Args:
dataset_batches: instances of DatasetBatches
attack_submission_ids: iterable with IDs of all (targeted and nontargeted)
attack submissions, could be obtains as
CompetitionSubmissions.get_all_attack_ids()
|
[
"Init",
"list",
"of",
"adversarial",
"batches",
"from",
"dataset",
"batches",
"and",
"submissions",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py#L272-L289
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py
|
AversarialBatches.count_generated_adv_examples
|
def count_generated_adv_examples(self):
"""Returns total number of all generated adversarial examples."""
result = {}
for v in itervalues(self.data):
s_id = v['submission_id']
result[s_id] = result.get(s_id, 0) + len(v['images'])
return result
|
python
|
def count_generated_adv_examples(self):
"""Returns total number of all generated adversarial examples."""
result = {}
for v in itervalues(self.data):
s_id = v['submission_id']
result[s_id] = result.get(s_id, 0) + len(v['images'])
return result
|
[
"def",
"count_generated_adv_examples",
"(",
"self",
")",
":",
"result",
"=",
"{",
"}",
"for",
"v",
"in",
"itervalues",
"(",
"self",
".",
"data",
")",
":",
"s_id",
"=",
"v",
"[",
"'submission_id'",
"]",
"result",
"[",
"s_id",
"]",
"=",
"result",
".",
"get",
"(",
"s_id",
",",
"0",
")",
"+",
"len",
"(",
"v",
"[",
"'images'",
"]",
")",
"return",
"result"
] |
Returns total number of all generated adversarial examples.
|
[
"Returns",
"total",
"number",
"of",
"all",
"generated",
"adversarial",
"examples",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py#L291-L297
|
train
|
tensorflow/cleverhans
|
cleverhans/confidence_report.py
|
make_confidence_report_bundled
|
def make_confidence_report_bundled(filepath, train_start=TRAIN_START,
train_end=TRAIN_END, test_start=TEST_START,
test_end=TEST_END, which_set=WHICH_SET,
recipe=RECIPE, report_path=REPORT_PATH,
nb_iter=NB_ITER, base_eps=None,
base_eps_iter=None, base_eps_iter_small=None,
batch_size=BATCH_SIZE):
"""
Load a saved model, gather its predictions, and save a confidence report.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param which_set: 'train' or 'test'
:param nb_iter: int, number of iterations of attack algorithm
(note that different recipes will use this differently,
for example many will run two attacks, one with nb_iter
iterations and one with 25X more)
:param base_eps: float, epsilon parameter for threat model, on a scale of [0, 1].
Inferred from the dataset if not specified.
:param base_eps_iter: float, a step size used in different ways by different recipes.
Typically the step size for a PGD attack.
Inferred from the dataset if not specified.
:param base_eps_iter_small: float, a second step size for a more fine-grained attack.
Inferred from the dataset if not specified.
:param batch_size: int, batch size
"""
# Avoid circular import
from cleverhans import attack_bundling
if callable(recipe):
run_recipe = recipe
else:
run_recipe = getattr(attack_bundling, recipe)
# Set logging level to see debug information
set_log_level(logging.INFO)
# Create TF session
sess = tf.Session()
assert filepath.endswith('.joblib')
if report_path is None:
report_path = filepath[:-len('.joblib')] + "_bundled_report.joblib"
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
factory = model.dataset_factory
factory.kwargs['train_start'] = train_start
factory.kwargs['train_end'] = train_end
factory.kwargs['test_start'] = test_start
factory.kwargs['test_end'] = test_end
dataset = factory()
center = dataset.kwargs['center']
if 'max_val' in factory.kwargs:
max_value = factory.kwargs['max_val']
elif hasattr(dataset, 'max_val'):
max_value = dataset.max_val
else:
raise AttributeError("Can't find max_value specification")
min_value = 0. - center * max_value
value_range = max_value - min_value
if 'CIFAR' in str(factory.cls):
if base_eps is None:
base_eps = 8. / 255.
if base_eps_iter is None:
base_eps_iter = 2. / 255.
if base_eps_iter_small is None:
base_eps_iter_small = 1. / 255.
elif 'MNIST' in str(factory.cls):
if base_eps is None:
base_eps = .3
if base_eps_iter is None:
base_eps_iter = .1
base_eps_iter_small = None
else:
# Note that it is not required to specify base_eps_iter_small
if base_eps is None or base_eps_iter is None:
raise NotImplementedError("Not able to infer threat model from " + str(factory.cls))
eps = base_eps * value_range
eps_iter = base_eps_iter * value_range
if base_eps_iter_small is None:
eps_iter_small = None
else:
eps_iter_small = base_eps_iter_small * value_range
clip_min = min_value
clip_max = max_value
x_data, y_data = dataset.get_set(which_set)
assert x_data.max() <= max_value
assert x_data.min() >= min_value
assert eps_iter <= eps
assert eps_iter_small is None or eps_iter_small <= eps
# Different recipes take different arguments.
# For now I don't have an idea for a beautiful unifying framework, so
# we get an if statement.
if recipe == 'random_search_max_confidence_recipe':
# pylint always checks against the default recipe here
# pylint: disable=no-value-for-parameter
run_recipe(sess=sess, model=model, x=x_data, y=y_data, eps=eps,
clip_min=clip_min, clip_max=clip_max, report_path=report_path)
else:
run_recipe(sess=sess, model=model, x=x_data, y=y_data,
nb_classes=dataset.NB_CLASSES, eps=eps, clip_min=clip_min,
clip_max=clip_max, eps_iter=eps_iter, nb_iter=nb_iter,
report_path=report_path, eps_iter_small=eps_iter_small, batch_size=batch_size)
|
python
|
def make_confidence_report_bundled(filepath, train_start=TRAIN_START,
train_end=TRAIN_END, test_start=TEST_START,
test_end=TEST_END, which_set=WHICH_SET,
recipe=RECIPE, report_path=REPORT_PATH,
nb_iter=NB_ITER, base_eps=None,
base_eps_iter=None, base_eps_iter_small=None,
batch_size=BATCH_SIZE):
"""
Load a saved model, gather its predictions, and save a confidence report.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param which_set: 'train' or 'test'
:param nb_iter: int, number of iterations of attack algorithm
(note that different recipes will use this differently,
for example many will run two attacks, one with nb_iter
iterations and one with 25X more)
:param base_eps: float, epsilon parameter for threat model, on a scale of [0, 1].
Inferred from the dataset if not specified.
:param base_eps_iter: float, a step size used in different ways by different recipes.
Typically the step size for a PGD attack.
Inferred from the dataset if not specified.
:param base_eps_iter_small: float, a second step size for a more fine-grained attack.
Inferred from the dataset if not specified.
:param batch_size: int, batch size
"""
# Avoid circular import
from cleverhans import attack_bundling
if callable(recipe):
run_recipe = recipe
else:
run_recipe = getattr(attack_bundling, recipe)
# Set logging level to see debug information
set_log_level(logging.INFO)
# Create TF session
sess = tf.Session()
assert filepath.endswith('.joblib')
if report_path is None:
report_path = filepath[:-len('.joblib')] + "_bundled_report.joblib"
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
factory = model.dataset_factory
factory.kwargs['train_start'] = train_start
factory.kwargs['train_end'] = train_end
factory.kwargs['test_start'] = test_start
factory.kwargs['test_end'] = test_end
dataset = factory()
center = dataset.kwargs['center']
if 'max_val' in factory.kwargs:
max_value = factory.kwargs['max_val']
elif hasattr(dataset, 'max_val'):
max_value = dataset.max_val
else:
raise AttributeError("Can't find max_value specification")
min_value = 0. - center * max_value
value_range = max_value - min_value
if 'CIFAR' in str(factory.cls):
if base_eps is None:
base_eps = 8. / 255.
if base_eps_iter is None:
base_eps_iter = 2. / 255.
if base_eps_iter_small is None:
base_eps_iter_small = 1. / 255.
elif 'MNIST' in str(factory.cls):
if base_eps is None:
base_eps = .3
if base_eps_iter is None:
base_eps_iter = .1
base_eps_iter_small = None
else:
# Note that it is not required to specify base_eps_iter_small
if base_eps is None or base_eps_iter is None:
raise NotImplementedError("Not able to infer threat model from " + str(factory.cls))
eps = base_eps * value_range
eps_iter = base_eps_iter * value_range
if base_eps_iter_small is None:
eps_iter_small = None
else:
eps_iter_small = base_eps_iter_small * value_range
clip_min = min_value
clip_max = max_value
x_data, y_data = dataset.get_set(which_set)
assert x_data.max() <= max_value
assert x_data.min() >= min_value
assert eps_iter <= eps
assert eps_iter_small is None or eps_iter_small <= eps
# Different recipes take different arguments.
# For now I don't have an idea for a beautiful unifying framework, so
# we get an if statement.
if recipe == 'random_search_max_confidence_recipe':
# pylint always checks against the default recipe here
# pylint: disable=no-value-for-parameter
run_recipe(sess=sess, model=model, x=x_data, y=y_data, eps=eps,
clip_min=clip_min, clip_max=clip_max, report_path=report_path)
else:
run_recipe(sess=sess, model=model, x=x_data, y=y_data,
nb_classes=dataset.NB_CLASSES, eps=eps, clip_min=clip_min,
clip_max=clip_max, eps_iter=eps_iter, nb_iter=nb_iter,
report_path=report_path, eps_iter_small=eps_iter_small, batch_size=batch_size)
|
[
"def",
"make_confidence_report_bundled",
"(",
"filepath",
",",
"train_start",
"=",
"TRAIN_START",
",",
"train_end",
"=",
"TRAIN_END",
",",
"test_start",
"=",
"TEST_START",
",",
"test_end",
"=",
"TEST_END",
",",
"which_set",
"=",
"WHICH_SET",
",",
"recipe",
"=",
"RECIPE",
",",
"report_path",
"=",
"REPORT_PATH",
",",
"nb_iter",
"=",
"NB_ITER",
",",
"base_eps",
"=",
"None",
",",
"base_eps_iter",
"=",
"None",
",",
"base_eps_iter_small",
"=",
"None",
",",
"batch_size",
"=",
"BATCH_SIZE",
")",
":",
"# Avoid circular import",
"from",
"cleverhans",
"import",
"attack_bundling",
"if",
"callable",
"(",
"recipe",
")",
":",
"run_recipe",
"=",
"recipe",
"else",
":",
"run_recipe",
"=",
"getattr",
"(",
"attack_bundling",
",",
"recipe",
")",
"# Set logging level to see debug information",
"set_log_level",
"(",
"logging",
".",
"INFO",
")",
"# Create TF session",
"sess",
"=",
"tf",
".",
"Session",
"(",
")",
"assert",
"filepath",
".",
"endswith",
"(",
"'.joblib'",
")",
"if",
"report_path",
"is",
"None",
":",
"report_path",
"=",
"filepath",
"[",
":",
"-",
"len",
"(",
"'.joblib'",
")",
"]",
"+",
"\"_bundled_report.joblib\"",
"with",
"sess",
".",
"as_default",
"(",
")",
":",
"model",
"=",
"load",
"(",
"filepath",
")",
"assert",
"len",
"(",
"model",
".",
"get_params",
"(",
")",
")",
">",
"0",
"factory",
"=",
"model",
".",
"dataset_factory",
"factory",
".",
"kwargs",
"[",
"'train_start'",
"]",
"=",
"train_start",
"factory",
".",
"kwargs",
"[",
"'train_end'",
"]",
"=",
"train_end",
"factory",
".",
"kwargs",
"[",
"'test_start'",
"]",
"=",
"test_start",
"factory",
".",
"kwargs",
"[",
"'test_end'",
"]",
"=",
"test_end",
"dataset",
"=",
"factory",
"(",
")",
"center",
"=",
"dataset",
".",
"kwargs",
"[",
"'center'",
"]",
"if",
"'max_val'",
"in",
"factory",
".",
"kwargs",
":",
"max_value",
"=",
"factory",
".",
"kwargs",
"[",
"'max_val'",
"]",
"elif",
"hasattr",
"(",
"dataset",
",",
"'max_val'",
")",
":",
"max_value",
"=",
"dataset",
".",
"max_val",
"else",
":",
"raise",
"AttributeError",
"(",
"\"Can't find max_value specification\"",
")",
"min_value",
"=",
"0.",
"-",
"center",
"*",
"max_value",
"value_range",
"=",
"max_value",
"-",
"min_value",
"if",
"'CIFAR'",
"in",
"str",
"(",
"factory",
".",
"cls",
")",
":",
"if",
"base_eps",
"is",
"None",
":",
"base_eps",
"=",
"8.",
"/",
"255.",
"if",
"base_eps_iter",
"is",
"None",
":",
"base_eps_iter",
"=",
"2.",
"/",
"255.",
"if",
"base_eps_iter_small",
"is",
"None",
":",
"base_eps_iter_small",
"=",
"1.",
"/",
"255.",
"elif",
"'MNIST'",
"in",
"str",
"(",
"factory",
".",
"cls",
")",
":",
"if",
"base_eps",
"is",
"None",
":",
"base_eps",
"=",
".3",
"if",
"base_eps_iter",
"is",
"None",
":",
"base_eps_iter",
"=",
".1",
"base_eps_iter_small",
"=",
"None",
"else",
":",
"# Note that it is not required to specify base_eps_iter_small",
"if",
"base_eps",
"is",
"None",
"or",
"base_eps_iter",
"is",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"Not able to infer threat model from \"",
"+",
"str",
"(",
"factory",
".",
"cls",
")",
")",
"eps",
"=",
"base_eps",
"*",
"value_range",
"eps_iter",
"=",
"base_eps_iter",
"*",
"value_range",
"if",
"base_eps_iter_small",
"is",
"None",
":",
"eps_iter_small",
"=",
"None",
"else",
":",
"eps_iter_small",
"=",
"base_eps_iter_small",
"*",
"value_range",
"clip_min",
"=",
"min_value",
"clip_max",
"=",
"max_value",
"x_data",
",",
"y_data",
"=",
"dataset",
".",
"get_set",
"(",
"which_set",
")",
"assert",
"x_data",
".",
"max",
"(",
")",
"<=",
"max_value",
"assert",
"x_data",
".",
"min",
"(",
")",
">=",
"min_value",
"assert",
"eps_iter",
"<=",
"eps",
"assert",
"eps_iter_small",
"is",
"None",
"or",
"eps_iter_small",
"<=",
"eps",
"# Different recipes take different arguments.",
"# For now I don't have an idea for a beautiful unifying framework, so",
"# we get an if statement.",
"if",
"recipe",
"==",
"'random_search_max_confidence_recipe'",
":",
"# pylint always checks against the default recipe here",
"# pylint: disable=no-value-for-parameter",
"run_recipe",
"(",
"sess",
"=",
"sess",
",",
"model",
"=",
"model",
",",
"x",
"=",
"x_data",
",",
"y",
"=",
"y_data",
",",
"eps",
"=",
"eps",
",",
"clip_min",
"=",
"clip_min",
",",
"clip_max",
"=",
"clip_max",
",",
"report_path",
"=",
"report_path",
")",
"else",
":",
"run_recipe",
"(",
"sess",
"=",
"sess",
",",
"model",
"=",
"model",
",",
"x",
"=",
"x_data",
",",
"y",
"=",
"y_data",
",",
"nb_classes",
"=",
"dataset",
".",
"NB_CLASSES",
",",
"eps",
"=",
"eps",
",",
"clip_min",
"=",
"clip_min",
",",
"clip_max",
"=",
"clip_max",
",",
"eps_iter",
"=",
"eps_iter",
",",
"nb_iter",
"=",
"nb_iter",
",",
"report_path",
"=",
"report_path",
",",
"eps_iter_small",
"=",
"eps_iter_small",
",",
"batch_size",
"=",
"batch_size",
")"
] |
Load a saved model, gather its predictions, and save a confidence report.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param which_set: 'train' or 'test'
:param nb_iter: int, number of iterations of attack algorithm
(note that different recipes will use this differently,
for example many will run two attacks, one with nb_iter
iterations and one with 25X more)
:param base_eps: float, epsilon parameter for threat model, on a scale of [0, 1].
Inferred from the dataset if not specified.
:param base_eps_iter: float, a step size used in different ways by different recipes.
Typically the step size for a PGD attack.
Inferred from the dataset if not specified.
:param base_eps_iter_small: float, a second step size for a more fine-grained attack.
Inferred from the dataset if not specified.
:param batch_size: int, batch size
|
[
"Load",
"a",
"saved",
"model",
"gather",
"its",
"predictions",
"and",
"save",
"a",
"confidence",
"report",
".",
":",
"param",
"filepath",
":",
"path",
"to",
"model",
"to",
"evaluate",
":",
"param",
"train_start",
":",
"index",
"of",
"first",
"training",
"set",
"example",
"to",
"use",
":",
"param",
"train_end",
":",
"index",
"of",
"last",
"training",
"set",
"example",
"to",
"use",
":",
"param",
"test_start",
":",
"index",
"of",
"first",
"test",
"set",
"example",
"to",
"use",
":",
"param",
"test_end",
":",
"index",
"of",
"last",
"test",
"set",
"example",
"to",
"use",
":",
"param",
"which_set",
":",
"train",
"or",
"test",
":",
"param",
"nb_iter",
":",
"int",
"number",
"of",
"iterations",
"of",
"attack",
"algorithm",
"(",
"note",
"that",
"different",
"recipes",
"will",
"use",
"this",
"differently",
"for",
"example",
"many",
"will",
"run",
"two",
"attacks",
"one",
"with",
"nb_iter",
"iterations",
"and",
"one",
"with",
"25X",
"more",
")",
":",
"param",
"base_eps",
":",
"float",
"epsilon",
"parameter",
"for",
"threat",
"model",
"on",
"a",
"scale",
"of",
"[",
"0",
"1",
"]",
".",
"Inferred",
"from",
"the",
"dataset",
"if",
"not",
"specified",
".",
":",
"param",
"base_eps_iter",
":",
"float",
"a",
"step",
"size",
"used",
"in",
"different",
"ways",
"by",
"different",
"recipes",
".",
"Typically",
"the",
"step",
"size",
"for",
"a",
"PGD",
"attack",
".",
"Inferred",
"from",
"the",
"dataset",
"if",
"not",
"specified",
".",
":",
"param",
"base_eps_iter_small",
":",
"float",
"a",
"second",
"step",
"size",
"for",
"a",
"more",
"fine",
"-",
"grained",
"attack",
".",
"Inferred",
"from",
"the",
"dataset",
"if",
"not",
"specified",
".",
":",
"param",
"batch_size",
":",
"int",
"batch",
"size"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/confidence_report.py#L124-L235
|
train
|
tensorflow/cleverhans
|
cleverhans/confidence_report.py
|
print_stats
|
def print_stats(correctness, confidence, name):
"""
Prints out accuracy, coverage, etc. statistics
:param correctness: ndarray
One bool per example specifying whether it was correctly classified
:param confidence: ndarray
The probability associated with each prediction
:param name: str
The name of this type of data (e.g. "clean", "MaxConfidence")
"""
accuracy = correctness.mean()
wrongness = 1 - correctness
denom1 = np.maximum(1, wrongness.sum())
ave_prob_on_mistake = (wrongness * confidence).sum() / denom1
assert ave_prob_on_mistake <= 1., ave_prob_on_mistake
denom2 = np.maximum(1, correctness.sum())
ave_prob_on_correct = (correctness * confidence).sum() / denom2
covered = confidence > 0.5
cov_half = covered.mean()
acc_half = (correctness * covered).sum() / np.maximum(1, covered.sum())
print('Accuracy on %s examples: %0.4f' % (name, accuracy))
print("Average prob on mistakes: %0.4f" % ave_prob_on_mistake)
print("Average prob on correct: %0.4f" % ave_prob_on_correct)
print("Accuracy when prob thresholded at .5: %0.4f" % acc_half)
print("Coverage when prob thresholded at .5: %0.4f" % cov_half)
success_rate = acc_half * cov_half
# Success is correctly classifying a covered example
print("Success rate at .5: %0.4f" % success_rate)
# Failure is misclassifying a covered example
failure_rate = (1. - acc_half) * cov_half
print("Failure rate at .5: %0.4f" % failure_rate)
print()
|
python
|
def print_stats(correctness, confidence, name):
"""
Prints out accuracy, coverage, etc. statistics
:param correctness: ndarray
One bool per example specifying whether it was correctly classified
:param confidence: ndarray
The probability associated with each prediction
:param name: str
The name of this type of data (e.g. "clean", "MaxConfidence")
"""
accuracy = correctness.mean()
wrongness = 1 - correctness
denom1 = np.maximum(1, wrongness.sum())
ave_prob_on_mistake = (wrongness * confidence).sum() / denom1
assert ave_prob_on_mistake <= 1., ave_prob_on_mistake
denom2 = np.maximum(1, correctness.sum())
ave_prob_on_correct = (correctness * confidence).sum() / denom2
covered = confidence > 0.5
cov_half = covered.mean()
acc_half = (correctness * covered).sum() / np.maximum(1, covered.sum())
print('Accuracy on %s examples: %0.4f' % (name, accuracy))
print("Average prob on mistakes: %0.4f" % ave_prob_on_mistake)
print("Average prob on correct: %0.4f" % ave_prob_on_correct)
print("Accuracy when prob thresholded at .5: %0.4f" % acc_half)
print("Coverage when prob thresholded at .5: %0.4f" % cov_half)
success_rate = acc_half * cov_half
# Success is correctly classifying a covered example
print("Success rate at .5: %0.4f" % success_rate)
# Failure is misclassifying a covered example
failure_rate = (1. - acc_half) * cov_half
print("Failure rate at .5: %0.4f" % failure_rate)
print()
|
[
"def",
"print_stats",
"(",
"correctness",
",",
"confidence",
",",
"name",
")",
":",
"accuracy",
"=",
"correctness",
".",
"mean",
"(",
")",
"wrongness",
"=",
"1",
"-",
"correctness",
"denom1",
"=",
"np",
".",
"maximum",
"(",
"1",
",",
"wrongness",
".",
"sum",
"(",
")",
")",
"ave_prob_on_mistake",
"=",
"(",
"wrongness",
"*",
"confidence",
")",
".",
"sum",
"(",
")",
"/",
"denom1",
"assert",
"ave_prob_on_mistake",
"<=",
"1.",
",",
"ave_prob_on_mistake",
"denom2",
"=",
"np",
".",
"maximum",
"(",
"1",
",",
"correctness",
".",
"sum",
"(",
")",
")",
"ave_prob_on_correct",
"=",
"(",
"correctness",
"*",
"confidence",
")",
".",
"sum",
"(",
")",
"/",
"denom2",
"covered",
"=",
"confidence",
">",
"0.5",
"cov_half",
"=",
"covered",
".",
"mean",
"(",
")",
"acc_half",
"=",
"(",
"correctness",
"*",
"covered",
")",
".",
"sum",
"(",
")",
"/",
"np",
".",
"maximum",
"(",
"1",
",",
"covered",
".",
"sum",
"(",
")",
")",
"print",
"(",
"'Accuracy on %s examples: %0.4f'",
"%",
"(",
"name",
",",
"accuracy",
")",
")",
"print",
"(",
"\"Average prob on mistakes: %0.4f\"",
"%",
"ave_prob_on_mistake",
")",
"print",
"(",
"\"Average prob on correct: %0.4f\"",
"%",
"ave_prob_on_correct",
")",
"print",
"(",
"\"Accuracy when prob thresholded at .5: %0.4f\"",
"%",
"acc_half",
")",
"print",
"(",
"\"Coverage when prob thresholded at .5: %0.4f\"",
"%",
"cov_half",
")",
"success_rate",
"=",
"acc_half",
"*",
"cov_half",
"# Success is correctly classifying a covered example",
"print",
"(",
"\"Success rate at .5: %0.4f\"",
"%",
"success_rate",
")",
"# Failure is misclassifying a covered example",
"failure_rate",
"=",
"(",
"1.",
"-",
"acc_half",
")",
"*",
"cov_half",
"print",
"(",
"\"Failure rate at .5: %0.4f\"",
"%",
"failure_rate",
")",
"print",
"(",
")"
] |
Prints out accuracy, coverage, etc. statistics
:param correctness: ndarray
One bool per example specifying whether it was correctly classified
:param confidence: ndarray
The probability associated with each prediction
:param name: str
The name of this type of data (e.g. "clean", "MaxConfidence")
|
[
"Prints",
"out",
"accuracy",
"coverage",
"etc",
".",
"statistics",
":",
"param",
"correctness",
":",
"ndarray",
"One",
"bool",
"per",
"example",
"specifying",
"whether",
"it",
"was",
"correctly",
"classified",
":",
"param",
"confidence",
":",
"ndarray",
"The",
"probability",
"associated",
"with",
"each",
"prediction",
":",
"param",
"name",
":",
"str",
"The",
"name",
"of",
"this",
"type",
"of",
"data",
"(",
"e",
".",
"g",
".",
"clean",
"MaxConfidence",
")"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/confidence_report.py#L238-L270
|
train
|
tensorflow/cleverhans
|
cleverhans/confidence_report.py
|
make_confidence_report
|
def make_confidence_report(filepath, train_start=TRAIN_START,
train_end=TRAIN_END,
test_start=TEST_START, test_end=TEST_END,
batch_size=BATCH_SIZE, which_set=WHICH_SET,
mc_batch_size=MC_BATCH_SIZE,
report_path=REPORT_PATH,
base_eps_iter=BASE_EPS_ITER,
nb_iter=NB_ITER, save_advx=SAVE_ADVX):
"""
Load a saved model, gather its predictions, and save a confidence report.
This function works by running a single MaxConfidence attack on each example.
This provides a reasonable estimate of the true failure rate quickly, so
long as the model does not suffer from gradient masking.
However, this estimate is mostly intended for development work and not
for publication. A more accurate estimate may be obtained by running
make_confidence_report_bundled.py instead.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param batch_size: size of evaluation batches
:param which_set: 'train' or 'test'
:param mc_batch_size: batch size for MaxConfidence attack
:param base_eps_iter: step size if the data were in [0,1]
(Step size will be rescaled proportional to the actual data range)
:param nb_iter: Number of iterations of PGD to run per class
:param save_advx: bool. If True, saves the adversarial examples to disk.
On by default, but can be turned off to save memory, etc.
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Set logging level to see debug information
set_log_level(logging.INFO)
# Create TF session
sess = tf.Session()
if report_path is None:
assert filepath.endswith('.joblib')
report_path = filepath[:-len('.joblib')] + "_report.joblib"
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
factory = model.dataset_factory
factory.kwargs['train_start'] = train_start
factory.kwargs['train_end'] = train_end
factory.kwargs['test_start'] = test_start
factory.kwargs['test_end'] = test_end
dataset = factory()
center = dataset.kwargs['center']
max_val = dataset.kwargs['max_val']
value_range = max_val * (1. + center)
min_value = 0. - center * max_val
if 'CIFAR' in str(factory.cls):
base_eps = 8. / 255.
if base_eps_iter is None:
base_eps_iter = 2. / 255.
elif 'MNIST' in str(factory.cls):
base_eps = .3
if base_eps_iter is None:
base_eps_iter = .1
else:
raise NotImplementedError(str(factory.cls))
mc_params = {'eps': base_eps * value_range,
'eps_iter': base_eps_iter * value_range,
'nb_iter': nb_iter,
'clip_min': min_value,
'clip_max': max_val}
x_data, y_data = dataset.get_set(which_set)
report = ConfidenceReport()
semantic = Semantic(model, center, max_val, sess)
mc = MaxConfidence(model, sess=sess)
jobs = [('clean', None, None, None, False),
('Semantic', semantic, None, None, False),
('mc', mc, mc_params, mc_batch_size, True)]
for job in jobs:
name, attack, attack_params, job_batch_size, save_this_job = job
if job_batch_size is None:
job_batch_size = batch_size
t1 = time.time()
if save_advx and save_this_job:
# If we want to save the adversarial examples to the filesystem, we need
# to fetch all of them. Otherwise they're just computed one batch at a
# time and discarded
# The path to save to
assert report_path.endswith('.joblib')
advx_path = report_path[:-len('.joblib')] + '_advx_' + name + '.npy'
# Fetch the adversarial examples
x_data = run_attack(sess, model, x_data, y_data, attack, attack_params,
batch_size=job_batch_size, devices=devices)
# Turn off the attack so `correctness_and_confidence` won't run it a
# second time.
attack = None
attack_params = None
# Save the adversarial examples
np.save(advx_path, x_data)
# Run correctness and confidence evaluation on adversarial examples
packed = correctness_and_confidence(sess, model, x_data, y_data,
batch_size=job_batch_size,
devices=devices,
attack=attack,
attack_params=attack_params)
t2 = time.time()
print("Evaluation took", t2 - t1, "seconds")
correctness, confidence = packed
report[name] = ConfidenceReportEntry(correctness=correctness,
confidence=confidence)
print_stats(correctness, confidence, name)
save(report_path, report)
|
python
|
def make_confidence_report(filepath, train_start=TRAIN_START,
train_end=TRAIN_END,
test_start=TEST_START, test_end=TEST_END,
batch_size=BATCH_SIZE, which_set=WHICH_SET,
mc_batch_size=MC_BATCH_SIZE,
report_path=REPORT_PATH,
base_eps_iter=BASE_EPS_ITER,
nb_iter=NB_ITER, save_advx=SAVE_ADVX):
"""
Load a saved model, gather its predictions, and save a confidence report.
This function works by running a single MaxConfidence attack on each example.
This provides a reasonable estimate of the true failure rate quickly, so
long as the model does not suffer from gradient masking.
However, this estimate is mostly intended for development work and not
for publication. A more accurate estimate may be obtained by running
make_confidence_report_bundled.py instead.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param batch_size: size of evaluation batches
:param which_set: 'train' or 'test'
:param mc_batch_size: batch size for MaxConfidence attack
:param base_eps_iter: step size if the data were in [0,1]
(Step size will be rescaled proportional to the actual data range)
:param nb_iter: Number of iterations of PGD to run per class
:param save_advx: bool. If True, saves the adversarial examples to disk.
On by default, but can be turned off to save memory, etc.
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Set logging level to see debug information
set_log_level(logging.INFO)
# Create TF session
sess = tf.Session()
if report_path is None:
assert filepath.endswith('.joblib')
report_path = filepath[:-len('.joblib')] + "_report.joblib"
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
factory = model.dataset_factory
factory.kwargs['train_start'] = train_start
factory.kwargs['train_end'] = train_end
factory.kwargs['test_start'] = test_start
factory.kwargs['test_end'] = test_end
dataset = factory()
center = dataset.kwargs['center']
max_val = dataset.kwargs['max_val']
value_range = max_val * (1. + center)
min_value = 0. - center * max_val
if 'CIFAR' in str(factory.cls):
base_eps = 8. / 255.
if base_eps_iter is None:
base_eps_iter = 2. / 255.
elif 'MNIST' in str(factory.cls):
base_eps = .3
if base_eps_iter is None:
base_eps_iter = .1
else:
raise NotImplementedError(str(factory.cls))
mc_params = {'eps': base_eps * value_range,
'eps_iter': base_eps_iter * value_range,
'nb_iter': nb_iter,
'clip_min': min_value,
'clip_max': max_val}
x_data, y_data = dataset.get_set(which_set)
report = ConfidenceReport()
semantic = Semantic(model, center, max_val, sess)
mc = MaxConfidence(model, sess=sess)
jobs = [('clean', None, None, None, False),
('Semantic', semantic, None, None, False),
('mc', mc, mc_params, mc_batch_size, True)]
for job in jobs:
name, attack, attack_params, job_batch_size, save_this_job = job
if job_batch_size is None:
job_batch_size = batch_size
t1 = time.time()
if save_advx and save_this_job:
# If we want to save the adversarial examples to the filesystem, we need
# to fetch all of them. Otherwise they're just computed one batch at a
# time and discarded
# The path to save to
assert report_path.endswith('.joblib')
advx_path = report_path[:-len('.joblib')] + '_advx_' + name + '.npy'
# Fetch the adversarial examples
x_data = run_attack(sess, model, x_data, y_data, attack, attack_params,
batch_size=job_batch_size, devices=devices)
# Turn off the attack so `correctness_and_confidence` won't run it a
# second time.
attack = None
attack_params = None
# Save the adversarial examples
np.save(advx_path, x_data)
# Run correctness and confidence evaluation on adversarial examples
packed = correctness_and_confidence(sess, model, x_data, y_data,
batch_size=job_batch_size,
devices=devices,
attack=attack,
attack_params=attack_params)
t2 = time.time()
print("Evaluation took", t2 - t1, "seconds")
correctness, confidence = packed
report[name] = ConfidenceReportEntry(correctness=correctness,
confidence=confidence)
print_stats(correctness, confidence, name)
save(report_path, report)
|
[
"def",
"make_confidence_report",
"(",
"filepath",
",",
"train_start",
"=",
"TRAIN_START",
",",
"train_end",
"=",
"TRAIN_END",
",",
"test_start",
"=",
"TEST_START",
",",
"test_end",
"=",
"TEST_END",
",",
"batch_size",
"=",
"BATCH_SIZE",
",",
"which_set",
"=",
"WHICH_SET",
",",
"mc_batch_size",
"=",
"MC_BATCH_SIZE",
",",
"report_path",
"=",
"REPORT_PATH",
",",
"base_eps_iter",
"=",
"BASE_EPS_ITER",
",",
"nb_iter",
"=",
"NB_ITER",
",",
"save_advx",
"=",
"SAVE_ADVX",
")",
":",
"# Set TF random seed to improve reproducibility",
"tf",
".",
"set_random_seed",
"(",
"1234",
")",
"# Set logging level to see debug information",
"set_log_level",
"(",
"logging",
".",
"INFO",
")",
"# Create TF session",
"sess",
"=",
"tf",
".",
"Session",
"(",
")",
"if",
"report_path",
"is",
"None",
":",
"assert",
"filepath",
".",
"endswith",
"(",
"'.joblib'",
")",
"report_path",
"=",
"filepath",
"[",
":",
"-",
"len",
"(",
"'.joblib'",
")",
"]",
"+",
"\"_report.joblib\"",
"with",
"sess",
".",
"as_default",
"(",
")",
":",
"model",
"=",
"load",
"(",
"filepath",
")",
"assert",
"len",
"(",
"model",
".",
"get_params",
"(",
")",
")",
">",
"0",
"factory",
"=",
"model",
".",
"dataset_factory",
"factory",
".",
"kwargs",
"[",
"'train_start'",
"]",
"=",
"train_start",
"factory",
".",
"kwargs",
"[",
"'train_end'",
"]",
"=",
"train_end",
"factory",
".",
"kwargs",
"[",
"'test_start'",
"]",
"=",
"test_start",
"factory",
".",
"kwargs",
"[",
"'test_end'",
"]",
"=",
"test_end",
"dataset",
"=",
"factory",
"(",
")",
"center",
"=",
"dataset",
".",
"kwargs",
"[",
"'center'",
"]",
"max_val",
"=",
"dataset",
".",
"kwargs",
"[",
"'max_val'",
"]",
"value_range",
"=",
"max_val",
"*",
"(",
"1.",
"+",
"center",
")",
"min_value",
"=",
"0.",
"-",
"center",
"*",
"max_val",
"if",
"'CIFAR'",
"in",
"str",
"(",
"factory",
".",
"cls",
")",
":",
"base_eps",
"=",
"8.",
"/",
"255.",
"if",
"base_eps_iter",
"is",
"None",
":",
"base_eps_iter",
"=",
"2.",
"/",
"255.",
"elif",
"'MNIST'",
"in",
"str",
"(",
"factory",
".",
"cls",
")",
":",
"base_eps",
"=",
".3",
"if",
"base_eps_iter",
"is",
"None",
":",
"base_eps_iter",
"=",
".1",
"else",
":",
"raise",
"NotImplementedError",
"(",
"str",
"(",
"factory",
".",
"cls",
")",
")",
"mc_params",
"=",
"{",
"'eps'",
":",
"base_eps",
"*",
"value_range",
",",
"'eps_iter'",
":",
"base_eps_iter",
"*",
"value_range",
",",
"'nb_iter'",
":",
"nb_iter",
",",
"'clip_min'",
":",
"min_value",
",",
"'clip_max'",
":",
"max_val",
"}",
"x_data",
",",
"y_data",
"=",
"dataset",
".",
"get_set",
"(",
"which_set",
")",
"report",
"=",
"ConfidenceReport",
"(",
")",
"semantic",
"=",
"Semantic",
"(",
"model",
",",
"center",
",",
"max_val",
",",
"sess",
")",
"mc",
"=",
"MaxConfidence",
"(",
"model",
",",
"sess",
"=",
"sess",
")",
"jobs",
"=",
"[",
"(",
"'clean'",
",",
"None",
",",
"None",
",",
"None",
",",
"False",
")",
",",
"(",
"'Semantic'",
",",
"semantic",
",",
"None",
",",
"None",
",",
"False",
")",
",",
"(",
"'mc'",
",",
"mc",
",",
"mc_params",
",",
"mc_batch_size",
",",
"True",
")",
"]",
"for",
"job",
"in",
"jobs",
":",
"name",
",",
"attack",
",",
"attack_params",
",",
"job_batch_size",
",",
"save_this_job",
"=",
"job",
"if",
"job_batch_size",
"is",
"None",
":",
"job_batch_size",
"=",
"batch_size",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"if",
"save_advx",
"and",
"save_this_job",
":",
"# If we want to save the adversarial examples to the filesystem, we need",
"# to fetch all of them. Otherwise they're just computed one batch at a",
"# time and discarded",
"# The path to save to",
"assert",
"report_path",
".",
"endswith",
"(",
"'.joblib'",
")",
"advx_path",
"=",
"report_path",
"[",
":",
"-",
"len",
"(",
"'.joblib'",
")",
"]",
"+",
"'_advx_'",
"+",
"name",
"+",
"'.npy'",
"# Fetch the adversarial examples",
"x_data",
"=",
"run_attack",
"(",
"sess",
",",
"model",
",",
"x_data",
",",
"y_data",
",",
"attack",
",",
"attack_params",
",",
"batch_size",
"=",
"job_batch_size",
",",
"devices",
"=",
"devices",
")",
"# Turn off the attack so `correctness_and_confidence` won't run it a",
"# second time.",
"attack",
"=",
"None",
"attack_params",
"=",
"None",
"# Save the adversarial examples",
"np",
".",
"save",
"(",
"advx_path",
",",
"x_data",
")",
"# Run correctness and confidence evaluation on adversarial examples",
"packed",
"=",
"correctness_and_confidence",
"(",
"sess",
",",
"model",
",",
"x_data",
",",
"y_data",
",",
"batch_size",
"=",
"job_batch_size",
",",
"devices",
"=",
"devices",
",",
"attack",
"=",
"attack",
",",
"attack_params",
"=",
"attack_params",
")",
"t2",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"\"Evaluation took\"",
",",
"t2",
"-",
"t1",
",",
"\"seconds\"",
")",
"correctness",
",",
"confidence",
"=",
"packed",
"report",
"[",
"name",
"]",
"=",
"ConfidenceReportEntry",
"(",
"correctness",
"=",
"correctness",
",",
"confidence",
"=",
"confidence",
")",
"print_stats",
"(",
"correctness",
",",
"confidence",
",",
"name",
")",
"save",
"(",
"report_path",
",",
"report",
")"
] |
Load a saved model, gather its predictions, and save a confidence report.
This function works by running a single MaxConfidence attack on each example.
This provides a reasonable estimate of the true failure rate quickly, so
long as the model does not suffer from gradient masking.
However, this estimate is mostly intended for development work and not
for publication. A more accurate estimate may be obtained by running
make_confidence_report_bundled.py instead.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param batch_size: size of evaluation batches
:param which_set: 'train' or 'test'
:param mc_batch_size: batch size for MaxConfidence attack
:param base_eps_iter: step size if the data were in [0,1]
(Step size will be rescaled proportional to the actual data range)
:param nb_iter: Number of iterations of PGD to run per class
:param save_advx: bool. If True, saves the adversarial examples to disk.
On by default, but can be turned off to save memory, etc.
|
[
"Load",
"a",
"saved",
"model",
"gather",
"its",
"predictions",
"and",
"save",
"a",
"confidence",
"report",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/confidence_report.py#L273-L404
|
train
|
tensorflow/cleverhans
|
cleverhans_tutorials/mnist_tutorial_keras_tf.py
|
mnist_tutorial
|
def mnist_tutorial(train_start=0, train_end=60000, test_start=0,
test_end=10000, nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE,
learning_rate=LEARNING_RATE, train_dir=TRAIN_DIR,
filename=FILENAME, load_model=LOAD_MODEL,
testing=False, label_smoothing=0.1):
"""
MNIST CleverHans tutorial
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:param train_dir: Directory storing the saved model
:param filename: Filename to save model under
:param load_model: True for load, False for not load
:param testing: if true, test error is calculated
:param label_smoothing: float, amount of label smoothing for cross entropy
:return: an AccuracyReport object
"""
tf.keras.backend.set_learning_phase(0)
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
if keras.backend.image_data_format() != 'channels_last':
raise NotImplementedError("this tutorial requires keras to be configured to channels_last format")
# Create TF session and set as Keras backend session
sess = tf.Session()
keras.backend.set_session(sess)
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
x_train, y_train = mnist.get_set('train')
x_test, y_test = mnist.get_set('test')
# Obtain Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
# Define TF model graph
model = cnn_model(img_rows=img_rows, img_cols=img_cols,
channels=nchannels, nb_filters=64,
nb_classes=nb_classes)
preds = model(x)
print("Defined TensorFlow model graph.")
def evaluate():
# Evaluate the accuracy of the MNIST model on legitimate test examples
eval_params = {'batch_size': batch_size}
acc = model_eval(sess, x, y, preds, x_test, y_test, args=eval_params)
report.clean_train_clean_eval = acc
# assert X_test.shape[0] == test_end - test_start, X_test.shape
print('Test accuracy on legitimate examples: %0.4f' % acc)
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate,
'train_dir': train_dir,
'filename': filename
}
rng = np.random.RandomState([2017, 8, 30])
if not os.path.exists(train_dir):
os.mkdir(train_dir)
ckpt = tf.train.get_checkpoint_state(train_dir)
print(train_dir, ckpt)
ckpt_path = False if ckpt is None else ckpt.model_checkpoint_path
wrap = KerasModelWrapper(model)
if load_model and ckpt_path:
saver = tf.train.Saver()
print(ckpt_path)
saver.restore(sess, ckpt_path)
print("Model loaded from: {}".format(ckpt_path))
evaluate()
else:
print("Model was not loaded, training from scratch.")
loss = CrossEntropy(wrap, smoothing=label_smoothing)
train(sess, loss, x_train, y_train, evaluate=evaluate,
args=train_params, rng=rng)
# Calculate training error
if testing:
eval_params = {'batch_size': batch_size}
acc = model_eval(sess, x, y, preds, x_train, y_train, args=eval_params)
report.train_clean_train_clean_eval = acc
# Initialize the Fast Gradient Sign Method (FGSM) attack object and graph
fgsm = FastGradientMethod(wrap, sess=sess)
fgsm_params = {'eps': 0.3,
'clip_min': 0.,
'clip_max': 1.}
adv_x = fgsm.generate(x, **fgsm_params)
# Consider the attack to be constant
adv_x = tf.stop_gradient(adv_x)
preds_adv = model(adv_x)
# Evaluate the accuracy of the MNIST model on adversarial examples
eval_par = {'batch_size': batch_size}
acc = model_eval(sess, x, y, preds_adv, x_test, y_test, args=eval_par)
print('Test accuracy on adversarial examples: %0.4f\n' % acc)
report.clean_train_adv_eval = acc
# Calculating train error
if testing:
eval_par = {'batch_size': batch_size}
acc = model_eval(sess, x, y, preds_adv, x_train,
y_train, args=eval_par)
report.train_clean_train_adv_eval = acc
print("Repeating the process, using adversarial training")
# Redefine TF model graph
model_2 = cnn_model(img_rows=img_rows, img_cols=img_cols,
channels=nchannels, nb_filters=64,
nb_classes=nb_classes)
wrap_2 = KerasModelWrapper(model_2)
preds_2 = model_2(x)
fgsm2 = FastGradientMethod(wrap_2, sess=sess)
def attack(x):
return fgsm2.generate(x, **fgsm_params)
preds_2_adv = model_2(attack(x))
loss_2 = CrossEntropy(wrap_2, smoothing=label_smoothing, attack=attack)
def evaluate_2():
# Accuracy of adversarially trained model on legitimate test inputs
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, preds_2, x_test, y_test,
args=eval_params)
print('Test accuracy on legitimate examples: %0.4f' % accuracy)
report.adv_train_clean_eval = accuracy
# Accuracy of the adversarially trained model on adversarial examples
accuracy = model_eval(sess, x, y, preds_2_adv, x_test,
y_test, args=eval_params)
print('Test accuracy on adversarial examples: %0.4f' % accuracy)
report.adv_train_adv_eval = accuracy
# Perform and evaluate adversarial training
train(sess, loss_2, x_train, y_train, evaluate=evaluate_2,
args=train_params, rng=rng)
# Calculate training errors
if testing:
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, preds_2, x_train, y_train,
args=eval_params)
report.train_adv_train_clean_eval = accuracy
accuracy = model_eval(sess, x, y, preds_2_adv, x_train,
y_train, args=eval_params)
report.train_adv_train_adv_eval = accuracy
return report
|
python
|
def mnist_tutorial(train_start=0, train_end=60000, test_start=0,
test_end=10000, nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE,
learning_rate=LEARNING_RATE, train_dir=TRAIN_DIR,
filename=FILENAME, load_model=LOAD_MODEL,
testing=False, label_smoothing=0.1):
"""
MNIST CleverHans tutorial
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:param train_dir: Directory storing the saved model
:param filename: Filename to save model under
:param load_model: True for load, False for not load
:param testing: if true, test error is calculated
:param label_smoothing: float, amount of label smoothing for cross entropy
:return: an AccuracyReport object
"""
tf.keras.backend.set_learning_phase(0)
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
if keras.backend.image_data_format() != 'channels_last':
raise NotImplementedError("this tutorial requires keras to be configured to channels_last format")
# Create TF session and set as Keras backend session
sess = tf.Session()
keras.backend.set_session(sess)
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
x_train, y_train = mnist.get_set('train')
x_test, y_test = mnist.get_set('test')
# Obtain Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
# Define TF model graph
model = cnn_model(img_rows=img_rows, img_cols=img_cols,
channels=nchannels, nb_filters=64,
nb_classes=nb_classes)
preds = model(x)
print("Defined TensorFlow model graph.")
def evaluate():
# Evaluate the accuracy of the MNIST model on legitimate test examples
eval_params = {'batch_size': batch_size}
acc = model_eval(sess, x, y, preds, x_test, y_test, args=eval_params)
report.clean_train_clean_eval = acc
# assert X_test.shape[0] == test_end - test_start, X_test.shape
print('Test accuracy on legitimate examples: %0.4f' % acc)
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate,
'train_dir': train_dir,
'filename': filename
}
rng = np.random.RandomState([2017, 8, 30])
if not os.path.exists(train_dir):
os.mkdir(train_dir)
ckpt = tf.train.get_checkpoint_state(train_dir)
print(train_dir, ckpt)
ckpt_path = False if ckpt is None else ckpt.model_checkpoint_path
wrap = KerasModelWrapper(model)
if load_model and ckpt_path:
saver = tf.train.Saver()
print(ckpt_path)
saver.restore(sess, ckpt_path)
print("Model loaded from: {}".format(ckpt_path))
evaluate()
else:
print("Model was not loaded, training from scratch.")
loss = CrossEntropy(wrap, smoothing=label_smoothing)
train(sess, loss, x_train, y_train, evaluate=evaluate,
args=train_params, rng=rng)
# Calculate training error
if testing:
eval_params = {'batch_size': batch_size}
acc = model_eval(sess, x, y, preds, x_train, y_train, args=eval_params)
report.train_clean_train_clean_eval = acc
# Initialize the Fast Gradient Sign Method (FGSM) attack object and graph
fgsm = FastGradientMethod(wrap, sess=sess)
fgsm_params = {'eps': 0.3,
'clip_min': 0.,
'clip_max': 1.}
adv_x = fgsm.generate(x, **fgsm_params)
# Consider the attack to be constant
adv_x = tf.stop_gradient(adv_x)
preds_adv = model(adv_x)
# Evaluate the accuracy of the MNIST model on adversarial examples
eval_par = {'batch_size': batch_size}
acc = model_eval(sess, x, y, preds_adv, x_test, y_test, args=eval_par)
print('Test accuracy on adversarial examples: %0.4f\n' % acc)
report.clean_train_adv_eval = acc
# Calculating train error
if testing:
eval_par = {'batch_size': batch_size}
acc = model_eval(sess, x, y, preds_adv, x_train,
y_train, args=eval_par)
report.train_clean_train_adv_eval = acc
print("Repeating the process, using adversarial training")
# Redefine TF model graph
model_2 = cnn_model(img_rows=img_rows, img_cols=img_cols,
channels=nchannels, nb_filters=64,
nb_classes=nb_classes)
wrap_2 = KerasModelWrapper(model_2)
preds_2 = model_2(x)
fgsm2 = FastGradientMethod(wrap_2, sess=sess)
def attack(x):
return fgsm2.generate(x, **fgsm_params)
preds_2_adv = model_2(attack(x))
loss_2 = CrossEntropy(wrap_2, smoothing=label_smoothing, attack=attack)
def evaluate_2():
# Accuracy of adversarially trained model on legitimate test inputs
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, preds_2, x_test, y_test,
args=eval_params)
print('Test accuracy on legitimate examples: %0.4f' % accuracy)
report.adv_train_clean_eval = accuracy
# Accuracy of the adversarially trained model on adversarial examples
accuracy = model_eval(sess, x, y, preds_2_adv, x_test,
y_test, args=eval_params)
print('Test accuracy on adversarial examples: %0.4f' % accuracy)
report.adv_train_adv_eval = accuracy
# Perform and evaluate adversarial training
train(sess, loss_2, x_train, y_train, evaluate=evaluate_2,
args=train_params, rng=rng)
# Calculate training errors
if testing:
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, preds_2, x_train, y_train,
args=eval_params)
report.train_adv_train_clean_eval = accuracy
accuracy = model_eval(sess, x, y, preds_2_adv, x_train,
y_train, args=eval_params)
report.train_adv_train_adv_eval = accuracy
return report
|
[
"def",
"mnist_tutorial",
"(",
"train_start",
"=",
"0",
",",
"train_end",
"=",
"60000",
",",
"test_start",
"=",
"0",
",",
"test_end",
"=",
"10000",
",",
"nb_epochs",
"=",
"NB_EPOCHS",
",",
"batch_size",
"=",
"BATCH_SIZE",
",",
"learning_rate",
"=",
"LEARNING_RATE",
",",
"train_dir",
"=",
"TRAIN_DIR",
",",
"filename",
"=",
"FILENAME",
",",
"load_model",
"=",
"LOAD_MODEL",
",",
"testing",
"=",
"False",
",",
"label_smoothing",
"=",
"0.1",
")",
":",
"tf",
".",
"keras",
".",
"backend",
".",
"set_learning_phase",
"(",
"0",
")",
"# Object used to keep track of (and return) key accuracies",
"report",
"=",
"AccuracyReport",
"(",
")",
"# Set TF random seed to improve reproducibility",
"tf",
".",
"set_random_seed",
"(",
"1234",
")",
"if",
"keras",
".",
"backend",
".",
"image_data_format",
"(",
")",
"!=",
"'channels_last'",
":",
"raise",
"NotImplementedError",
"(",
"\"this tutorial requires keras to be configured to channels_last format\"",
")",
"# Create TF session and set as Keras backend session",
"sess",
"=",
"tf",
".",
"Session",
"(",
")",
"keras",
".",
"backend",
".",
"set_session",
"(",
"sess",
")",
"# Get MNIST test data",
"mnist",
"=",
"MNIST",
"(",
"train_start",
"=",
"train_start",
",",
"train_end",
"=",
"train_end",
",",
"test_start",
"=",
"test_start",
",",
"test_end",
"=",
"test_end",
")",
"x_train",
",",
"y_train",
"=",
"mnist",
".",
"get_set",
"(",
"'train'",
")",
"x_test",
",",
"y_test",
"=",
"mnist",
".",
"get_set",
"(",
"'test'",
")",
"# Obtain Image Parameters",
"img_rows",
",",
"img_cols",
",",
"nchannels",
"=",
"x_train",
".",
"shape",
"[",
"1",
":",
"4",
"]",
"nb_classes",
"=",
"y_train",
".",
"shape",
"[",
"1",
"]",
"# Define input TF placeholder",
"x",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"shape",
"=",
"(",
"None",
",",
"img_rows",
",",
"img_cols",
",",
"nchannels",
")",
")",
"y",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"shape",
"=",
"(",
"None",
",",
"nb_classes",
")",
")",
"# Define TF model graph",
"model",
"=",
"cnn_model",
"(",
"img_rows",
"=",
"img_rows",
",",
"img_cols",
"=",
"img_cols",
",",
"channels",
"=",
"nchannels",
",",
"nb_filters",
"=",
"64",
",",
"nb_classes",
"=",
"nb_classes",
")",
"preds",
"=",
"model",
"(",
"x",
")",
"print",
"(",
"\"Defined TensorFlow model graph.\"",
")",
"def",
"evaluate",
"(",
")",
":",
"# Evaluate the accuracy of the MNIST model on legitimate test examples",
"eval_params",
"=",
"{",
"'batch_size'",
":",
"batch_size",
"}",
"acc",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds",
",",
"x_test",
",",
"y_test",
",",
"args",
"=",
"eval_params",
")",
"report",
".",
"clean_train_clean_eval",
"=",
"acc",
"# assert X_test.shape[0] == test_end - test_start, X_test.shape",
"print",
"(",
"'Test accuracy on legitimate examples: %0.4f'",
"%",
"acc",
")",
"# Train an MNIST model",
"train_params",
"=",
"{",
"'nb_epochs'",
":",
"nb_epochs",
",",
"'batch_size'",
":",
"batch_size",
",",
"'learning_rate'",
":",
"learning_rate",
",",
"'train_dir'",
":",
"train_dir",
",",
"'filename'",
":",
"filename",
"}",
"rng",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"[",
"2017",
",",
"8",
",",
"30",
"]",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"train_dir",
")",
":",
"os",
".",
"mkdir",
"(",
"train_dir",
")",
"ckpt",
"=",
"tf",
".",
"train",
".",
"get_checkpoint_state",
"(",
"train_dir",
")",
"print",
"(",
"train_dir",
",",
"ckpt",
")",
"ckpt_path",
"=",
"False",
"if",
"ckpt",
"is",
"None",
"else",
"ckpt",
".",
"model_checkpoint_path",
"wrap",
"=",
"KerasModelWrapper",
"(",
"model",
")",
"if",
"load_model",
"and",
"ckpt_path",
":",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
")",
"print",
"(",
"ckpt_path",
")",
"saver",
".",
"restore",
"(",
"sess",
",",
"ckpt_path",
")",
"print",
"(",
"\"Model loaded from: {}\"",
".",
"format",
"(",
"ckpt_path",
")",
")",
"evaluate",
"(",
")",
"else",
":",
"print",
"(",
"\"Model was not loaded, training from scratch.\"",
")",
"loss",
"=",
"CrossEntropy",
"(",
"wrap",
",",
"smoothing",
"=",
"label_smoothing",
")",
"train",
"(",
"sess",
",",
"loss",
",",
"x_train",
",",
"y_train",
",",
"evaluate",
"=",
"evaluate",
",",
"args",
"=",
"train_params",
",",
"rng",
"=",
"rng",
")",
"# Calculate training error",
"if",
"testing",
":",
"eval_params",
"=",
"{",
"'batch_size'",
":",
"batch_size",
"}",
"acc",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds",
",",
"x_train",
",",
"y_train",
",",
"args",
"=",
"eval_params",
")",
"report",
".",
"train_clean_train_clean_eval",
"=",
"acc",
"# Initialize the Fast Gradient Sign Method (FGSM) attack object and graph",
"fgsm",
"=",
"FastGradientMethod",
"(",
"wrap",
",",
"sess",
"=",
"sess",
")",
"fgsm_params",
"=",
"{",
"'eps'",
":",
"0.3",
",",
"'clip_min'",
":",
"0.",
",",
"'clip_max'",
":",
"1.",
"}",
"adv_x",
"=",
"fgsm",
".",
"generate",
"(",
"x",
",",
"*",
"*",
"fgsm_params",
")",
"# Consider the attack to be constant",
"adv_x",
"=",
"tf",
".",
"stop_gradient",
"(",
"adv_x",
")",
"preds_adv",
"=",
"model",
"(",
"adv_x",
")",
"# Evaluate the accuracy of the MNIST model on adversarial examples",
"eval_par",
"=",
"{",
"'batch_size'",
":",
"batch_size",
"}",
"acc",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds_adv",
",",
"x_test",
",",
"y_test",
",",
"args",
"=",
"eval_par",
")",
"print",
"(",
"'Test accuracy on adversarial examples: %0.4f\\n'",
"%",
"acc",
")",
"report",
".",
"clean_train_adv_eval",
"=",
"acc",
"# Calculating train error",
"if",
"testing",
":",
"eval_par",
"=",
"{",
"'batch_size'",
":",
"batch_size",
"}",
"acc",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds_adv",
",",
"x_train",
",",
"y_train",
",",
"args",
"=",
"eval_par",
")",
"report",
".",
"train_clean_train_adv_eval",
"=",
"acc",
"print",
"(",
"\"Repeating the process, using adversarial training\"",
")",
"# Redefine TF model graph",
"model_2",
"=",
"cnn_model",
"(",
"img_rows",
"=",
"img_rows",
",",
"img_cols",
"=",
"img_cols",
",",
"channels",
"=",
"nchannels",
",",
"nb_filters",
"=",
"64",
",",
"nb_classes",
"=",
"nb_classes",
")",
"wrap_2",
"=",
"KerasModelWrapper",
"(",
"model_2",
")",
"preds_2",
"=",
"model_2",
"(",
"x",
")",
"fgsm2",
"=",
"FastGradientMethod",
"(",
"wrap_2",
",",
"sess",
"=",
"sess",
")",
"def",
"attack",
"(",
"x",
")",
":",
"return",
"fgsm2",
".",
"generate",
"(",
"x",
",",
"*",
"*",
"fgsm_params",
")",
"preds_2_adv",
"=",
"model_2",
"(",
"attack",
"(",
"x",
")",
")",
"loss_2",
"=",
"CrossEntropy",
"(",
"wrap_2",
",",
"smoothing",
"=",
"label_smoothing",
",",
"attack",
"=",
"attack",
")",
"def",
"evaluate_2",
"(",
")",
":",
"# Accuracy of adversarially trained model on legitimate test inputs",
"eval_params",
"=",
"{",
"'batch_size'",
":",
"batch_size",
"}",
"accuracy",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds_2",
",",
"x_test",
",",
"y_test",
",",
"args",
"=",
"eval_params",
")",
"print",
"(",
"'Test accuracy on legitimate examples: %0.4f'",
"%",
"accuracy",
")",
"report",
".",
"adv_train_clean_eval",
"=",
"accuracy",
"# Accuracy of the adversarially trained model on adversarial examples",
"accuracy",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds_2_adv",
",",
"x_test",
",",
"y_test",
",",
"args",
"=",
"eval_params",
")",
"print",
"(",
"'Test accuracy on adversarial examples: %0.4f'",
"%",
"accuracy",
")",
"report",
".",
"adv_train_adv_eval",
"=",
"accuracy",
"# Perform and evaluate adversarial training",
"train",
"(",
"sess",
",",
"loss_2",
",",
"x_train",
",",
"y_train",
",",
"evaluate",
"=",
"evaluate_2",
",",
"args",
"=",
"train_params",
",",
"rng",
"=",
"rng",
")",
"# Calculate training errors",
"if",
"testing",
":",
"eval_params",
"=",
"{",
"'batch_size'",
":",
"batch_size",
"}",
"accuracy",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds_2",
",",
"x_train",
",",
"y_train",
",",
"args",
"=",
"eval_params",
")",
"report",
".",
"train_adv_train_clean_eval",
"=",
"accuracy",
"accuracy",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds_2_adv",
",",
"x_train",
",",
"y_train",
",",
"args",
"=",
"eval_params",
")",
"report",
".",
"train_adv_train_adv_eval",
"=",
"accuracy",
"return",
"report"
] |
MNIST CleverHans tutorial
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:param train_dir: Directory storing the saved model
:param filename: Filename to save model under
:param load_model: True for load, False for not load
:param testing: if true, test error is calculated
:param label_smoothing: float, amount of label smoothing for cross entropy
:return: an AccuracyReport object
|
[
"MNIST",
"CleverHans",
"tutorial",
":",
"param",
"train_start",
":",
"index",
"of",
"first",
"training",
"set",
"example",
":",
"param",
"train_end",
":",
"index",
"of",
"last",
"training",
"set",
"example",
":",
"param",
"test_start",
":",
"index",
"of",
"first",
"test",
"set",
"example",
":",
"param",
"test_end",
":",
"index",
"of",
"last",
"test",
"set",
"example",
":",
"param",
"nb_epochs",
":",
"number",
"of",
"epochs",
"to",
"train",
"model",
":",
"param",
"batch_size",
":",
"size",
"of",
"training",
"batches",
":",
"param",
"learning_rate",
":",
"learning",
"rate",
"for",
"training",
":",
"param",
"train_dir",
":",
"Directory",
"storing",
"the",
"saved",
"model",
":",
"param",
"filename",
":",
"Filename",
"to",
"save",
"model",
"under",
":",
"param",
"load_model",
":",
"True",
"for",
"load",
"False",
"for",
"not",
"load",
":",
"param",
"testing",
":",
"if",
"true",
"test",
"error",
"is",
"calculated",
":",
"param",
"label_smoothing",
":",
"float",
"amount",
"of",
"label",
"smoothing",
"for",
"cross",
"entropy",
":",
"return",
":",
"an",
"AccuracyReport",
"object"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans_tutorials/mnist_tutorial_keras_tf.py#L41-L209
|
train
|
tensorflow/cleverhans
|
cleverhans/attacks/max_confidence.py
|
MaxConfidence.generate
|
def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: Keyword arguments for the base attacker
"""
assert self.parse_params(**kwargs)
labels, _nb_classes = self.get_or_guess_labels(x, kwargs)
adv_x = self.attack(x, labels)
return adv_x
|
python
|
def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: Keyword arguments for the base attacker
"""
assert self.parse_params(**kwargs)
labels, _nb_classes = self.get_or_guess_labels(x, kwargs)
adv_x = self.attack(x, labels)
return adv_x
|
[
"def",
"generate",
"(",
"self",
",",
"x",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"self",
".",
"parse_params",
"(",
"*",
"*",
"kwargs",
")",
"labels",
",",
"_nb_classes",
"=",
"self",
".",
"get_or_guess_labels",
"(",
"x",
",",
"kwargs",
")",
"adv_x",
"=",
"self",
".",
"attack",
"(",
"x",
",",
"labels",
")",
"return",
"adv_x"
] |
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: Keyword arguments for the base attacker
|
[
"Generate",
"symbolic",
"graph",
"for",
"adversarial",
"examples",
"and",
"return",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/max_confidence.py#L41-L53
|
train
|
tensorflow/cleverhans
|
cleverhans/attacks/max_confidence.py
|
MaxConfidence.attack
|
def attack(self, x, true_y):
"""
Runs the untargeted attack.
:param x: The input
:param true_y: The correct label for `x`. This attack aims to produce misclassification.
"""
adv_x_cls = []
prob_cls = []
m = tf.shape(x)[0]
true_y_idx = tf.argmax(true_y, axis=1)
expanded_x = tf.concat([x] * self.nb_classes, axis=0)
target_ys = [tf.to_float(tf.one_hot(tf.ones(m, dtype=tf.int32) * cls,
self.nb_classes))
for cls in range(self.nb_classes)]
target_y = tf.concat(target_ys, axis=0)
adv_x_cls = self.attack_class(expanded_x, target_y)
expanded_all_probs = self.model.get_probs(adv_x_cls)
adv_x_list = tf.split(adv_x_cls, self.nb_classes)
all_probs_list = tf.split(expanded_all_probs, self.nb_classes)
for cls in range(self.nb_classes):
target_y = target_ys[cls]
all_probs = all_probs_list[cls]
# We don't actually care whether we hit the target class.
# We care about the probability of the most likely wrong class
cur_prob_cls = tf.reduce_max(all_probs - true_y, axis=1)
# Knock out examples that are correctly classified.
# This is not needed to be optimal for t >= 0.5, but may as well do it
# to get better failure rate at lower thresholds.
chosen_cls = tf.argmax(all_probs, axis=1)
eligible = tf.to_float(tf.not_equal(true_y_idx, chosen_cls))
cur_prob_cls = cur_prob_cls * eligible
prob_cls.append(cur_prob_cls)
probs = tf.concat([tf.expand_dims(e, 1) for e in prob_cls], axis=1)
# Don't need to censor here because we knocked out the true class above
# probs = probs - true_y
most_confident = tf.argmax(probs, axis=1)
fused_mask = tf.one_hot(most_confident, self.nb_classes)
masks = tf.split(fused_mask, num_or_size_splits=self.nb_classes, axis=1)
shape = [m] + [1] * (len(x.get_shape()) - 1)
reshaped_masks = [tf.reshape(mask, shape) for mask in masks]
out = sum(adv_x * rmask for adv_x,
rmask in zip(adv_x_list, reshaped_masks))
return out
|
python
|
def attack(self, x, true_y):
"""
Runs the untargeted attack.
:param x: The input
:param true_y: The correct label for `x`. This attack aims to produce misclassification.
"""
adv_x_cls = []
prob_cls = []
m = tf.shape(x)[0]
true_y_idx = tf.argmax(true_y, axis=1)
expanded_x = tf.concat([x] * self.nb_classes, axis=0)
target_ys = [tf.to_float(tf.one_hot(tf.ones(m, dtype=tf.int32) * cls,
self.nb_classes))
for cls in range(self.nb_classes)]
target_y = tf.concat(target_ys, axis=0)
adv_x_cls = self.attack_class(expanded_x, target_y)
expanded_all_probs = self.model.get_probs(adv_x_cls)
adv_x_list = tf.split(adv_x_cls, self.nb_classes)
all_probs_list = tf.split(expanded_all_probs, self.nb_classes)
for cls in range(self.nb_classes):
target_y = target_ys[cls]
all_probs = all_probs_list[cls]
# We don't actually care whether we hit the target class.
# We care about the probability of the most likely wrong class
cur_prob_cls = tf.reduce_max(all_probs - true_y, axis=1)
# Knock out examples that are correctly classified.
# This is not needed to be optimal for t >= 0.5, but may as well do it
# to get better failure rate at lower thresholds.
chosen_cls = tf.argmax(all_probs, axis=1)
eligible = tf.to_float(tf.not_equal(true_y_idx, chosen_cls))
cur_prob_cls = cur_prob_cls * eligible
prob_cls.append(cur_prob_cls)
probs = tf.concat([tf.expand_dims(e, 1) for e in prob_cls], axis=1)
# Don't need to censor here because we knocked out the true class above
# probs = probs - true_y
most_confident = tf.argmax(probs, axis=1)
fused_mask = tf.one_hot(most_confident, self.nb_classes)
masks = tf.split(fused_mask, num_or_size_splits=self.nb_classes, axis=1)
shape = [m] + [1] * (len(x.get_shape()) - 1)
reshaped_masks = [tf.reshape(mask, shape) for mask in masks]
out = sum(adv_x * rmask for adv_x,
rmask in zip(adv_x_list, reshaped_masks))
return out
|
[
"def",
"attack",
"(",
"self",
",",
"x",
",",
"true_y",
")",
":",
"adv_x_cls",
"=",
"[",
"]",
"prob_cls",
"=",
"[",
"]",
"m",
"=",
"tf",
".",
"shape",
"(",
"x",
")",
"[",
"0",
"]",
"true_y_idx",
"=",
"tf",
".",
"argmax",
"(",
"true_y",
",",
"axis",
"=",
"1",
")",
"expanded_x",
"=",
"tf",
".",
"concat",
"(",
"[",
"x",
"]",
"*",
"self",
".",
"nb_classes",
",",
"axis",
"=",
"0",
")",
"target_ys",
"=",
"[",
"tf",
".",
"to_float",
"(",
"tf",
".",
"one_hot",
"(",
"tf",
".",
"ones",
"(",
"m",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"*",
"cls",
",",
"self",
".",
"nb_classes",
")",
")",
"for",
"cls",
"in",
"range",
"(",
"self",
".",
"nb_classes",
")",
"]",
"target_y",
"=",
"tf",
".",
"concat",
"(",
"target_ys",
",",
"axis",
"=",
"0",
")",
"adv_x_cls",
"=",
"self",
".",
"attack_class",
"(",
"expanded_x",
",",
"target_y",
")",
"expanded_all_probs",
"=",
"self",
".",
"model",
".",
"get_probs",
"(",
"adv_x_cls",
")",
"adv_x_list",
"=",
"tf",
".",
"split",
"(",
"adv_x_cls",
",",
"self",
".",
"nb_classes",
")",
"all_probs_list",
"=",
"tf",
".",
"split",
"(",
"expanded_all_probs",
",",
"self",
".",
"nb_classes",
")",
"for",
"cls",
"in",
"range",
"(",
"self",
".",
"nb_classes",
")",
":",
"target_y",
"=",
"target_ys",
"[",
"cls",
"]",
"all_probs",
"=",
"all_probs_list",
"[",
"cls",
"]",
"# We don't actually care whether we hit the target class.",
"# We care about the probability of the most likely wrong class",
"cur_prob_cls",
"=",
"tf",
".",
"reduce_max",
"(",
"all_probs",
"-",
"true_y",
",",
"axis",
"=",
"1",
")",
"# Knock out examples that are correctly classified.",
"# This is not needed to be optimal for t >= 0.5, but may as well do it",
"# to get better failure rate at lower thresholds.",
"chosen_cls",
"=",
"tf",
".",
"argmax",
"(",
"all_probs",
",",
"axis",
"=",
"1",
")",
"eligible",
"=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"not_equal",
"(",
"true_y_idx",
",",
"chosen_cls",
")",
")",
"cur_prob_cls",
"=",
"cur_prob_cls",
"*",
"eligible",
"prob_cls",
".",
"append",
"(",
"cur_prob_cls",
")",
"probs",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"expand_dims",
"(",
"e",
",",
"1",
")",
"for",
"e",
"in",
"prob_cls",
"]",
",",
"axis",
"=",
"1",
")",
"# Don't need to censor here because we knocked out the true class above",
"# probs = probs - true_y",
"most_confident",
"=",
"tf",
".",
"argmax",
"(",
"probs",
",",
"axis",
"=",
"1",
")",
"fused_mask",
"=",
"tf",
".",
"one_hot",
"(",
"most_confident",
",",
"self",
".",
"nb_classes",
")",
"masks",
"=",
"tf",
".",
"split",
"(",
"fused_mask",
",",
"num_or_size_splits",
"=",
"self",
".",
"nb_classes",
",",
"axis",
"=",
"1",
")",
"shape",
"=",
"[",
"m",
"]",
"+",
"[",
"1",
"]",
"*",
"(",
"len",
"(",
"x",
".",
"get_shape",
"(",
")",
")",
"-",
"1",
")",
"reshaped_masks",
"=",
"[",
"tf",
".",
"reshape",
"(",
"mask",
",",
"shape",
")",
"for",
"mask",
"in",
"masks",
"]",
"out",
"=",
"sum",
"(",
"adv_x",
"*",
"rmask",
"for",
"adv_x",
",",
"rmask",
"in",
"zip",
"(",
"adv_x_list",
",",
"reshaped_masks",
")",
")",
"return",
"out"
] |
Runs the untargeted attack.
:param x: The input
:param true_y: The correct label for `x`. This attack aims to produce misclassification.
|
[
"Runs",
"the",
"untargeted",
"attack",
".",
":",
"param",
"x",
":",
"The",
"input",
":",
"param",
"true_y",
":",
"The",
"correct",
"label",
"for",
"x",
".",
"This",
"attack",
"aims",
"to",
"produce",
"misclassification",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/max_confidence.py#L64-L110
|
train
|
tensorflow/cleverhans
|
cleverhans/attacks/max_confidence.py
|
MaxConfidence.attack_class
|
def attack_class(self, x, target_y):
"""
Run the attack on a specific target class.
:param x: tf Tensor. The input example.
:param target_y: tf Tensor. The attacker's desired target class.
Returns:
A targeted adversarial example, intended to be classified as the target class.
"""
adv = self.base_attacker.generate(x, y_target=target_y, **self.params)
return adv
|
python
|
def attack_class(self, x, target_y):
"""
Run the attack on a specific target class.
:param x: tf Tensor. The input example.
:param target_y: tf Tensor. The attacker's desired target class.
Returns:
A targeted adversarial example, intended to be classified as the target class.
"""
adv = self.base_attacker.generate(x, y_target=target_y, **self.params)
return adv
|
[
"def",
"attack_class",
"(",
"self",
",",
"x",
",",
"target_y",
")",
":",
"adv",
"=",
"self",
".",
"base_attacker",
".",
"generate",
"(",
"x",
",",
"y_target",
"=",
"target_y",
",",
"*",
"*",
"self",
".",
"params",
")",
"return",
"adv"
] |
Run the attack on a specific target class.
:param x: tf Tensor. The input example.
:param target_y: tf Tensor. The attacker's desired target class.
Returns:
A targeted adversarial example, intended to be classified as the target class.
|
[
"Run",
"the",
"attack",
"on",
"a",
"specific",
"target",
"class",
".",
":",
"param",
"x",
":",
"tf",
"Tensor",
".",
"The",
"input",
"example",
".",
":",
"param",
"target_y",
":",
"tf",
"Tensor",
".",
"The",
"attacker",
"s",
"desired",
"target",
"class",
".",
"Returns",
":",
"A",
"targeted",
"adversarial",
"example",
"intended",
"to",
"be",
"classified",
"as",
"the",
"target",
"class",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/max_confidence.py#L112-L121
|
train
|
tensorflow/cleverhans
|
cleverhans/utils.py
|
batch_indices
|
def batch_indices(batch_nb, data_length, batch_size):
"""
This helper function computes a batch start and end index
:param batch_nb: the batch number
:param data_length: the total length of the data being parsed by batches
:param batch_size: the number of inputs in each batch
:return: pair of (start, end) indices
"""
# Batch start and end index
start = int(batch_nb * batch_size)
end = int((batch_nb + 1) * batch_size)
# When there are not enough inputs left, we reuse some to complete the
# batch
if end > data_length:
shift = end - data_length
start -= shift
end -= shift
return start, end
|
python
|
def batch_indices(batch_nb, data_length, batch_size):
"""
This helper function computes a batch start and end index
:param batch_nb: the batch number
:param data_length: the total length of the data being parsed by batches
:param batch_size: the number of inputs in each batch
:return: pair of (start, end) indices
"""
# Batch start and end index
start = int(batch_nb * batch_size)
end = int((batch_nb + 1) * batch_size)
# When there are not enough inputs left, we reuse some to complete the
# batch
if end > data_length:
shift = end - data_length
start -= shift
end -= shift
return start, end
|
[
"def",
"batch_indices",
"(",
"batch_nb",
",",
"data_length",
",",
"batch_size",
")",
":",
"# Batch start and end index",
"start",
"=",
"int",
"(",
"batch_nb",
"*",
"batch_size",
")",
"end",
"=",
"int",
"(",
"(",
"batch_nb",
"+",
"1",
")",
"*",
"batch_size",
")",
"# When there are not enough inputs left, we reuse some to complete the",
"# batch",
"if",
"end",
">",
"data_length",
":",
"shift",
"=",
"end",
"-",
"data_length",
"start",
"-=",
"shift",
"end",
"-=",
"shift",
"return",
"start",
",",
"end"
] |
This helper function computes a batch start and end index
:param batch_nb: the batch number
:param data_length: the total length of the data being parsed by batches
:param batch_size: the number of inputs in each batch
:return: pair of (start, end) indices
|
[
"This",
"helper",
"function",
"computes",
"a",
"batch",
"start",
"and",
"end",
"index",
":",
"param",
"batch_nb",
":",
"the",
"batch",
"number",
":",
"param",
"data_length",
":",
"the",
"total",
"length",
"of",
"the",
"data",
"being",
"parsed",
"by",
"batches",
":",
"param",
"batch_size",
":",
"the",
"number",
"of",
"inputs",
"in",
"each",
"batch",
":",
"return",
":",
"pair",
"of",
"(",
"start",
"end",
")",
"indices"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L63-L82
|
train
|
tensorflow/cleverhans
|
cleverhans/utils.py
|
other_classes
|
def other_classes(nb_classes, class_ind):
"""
Returns a list of class indices excluding the class indexed by class_ind
:param nb_classes: number of classes in the task
:param class_ind: the class index to be omitted
:return: list of class indices excluding the class indexed by class_ind
"""
if class_ind < 0 or class_ind >= nb_classes:
error_str = "class_ind must be within the range (0, nb_classes - 1)"
raise ValueError(error_str)
other_classes_list = list(range(nb_classes))
other_classes_list.remove(class_ind)
return other_classes_list
|
python
|
def other_classes(nb_classes, class_ind):
"""
Returns a list of class indices excluding the class indexed by class_ind
:param nb_classes: number of classes in the task
:param class_ind: the class index to be omitted
:return: list of class indices excluding the class indexed by class_ind
"""
if class_ind < 0 or class_ind >= nb_classes:
error_str = "class_ind must be within the range (0, nb_classes - 1)"
raise ValueError(error_str)
other_classes_list = list(range(nb_classes))
other_classes_list.remove(class_ind)
return other_classes_list
|
[
"def",
"other_classes",
"(",
"nb_classes",
",",
"class_ind",
")",
":",
"if",
"class_ind",
"<",
"0",
"or",
"class_ind",
">=",
"nb_classes",
":",
"error_str",
"=",
"\"class_ind must be within the range (0, nb_classes - 1)\"",
"raise",
"ValueError",
"(",
"error_str",
")",
"other_classes_list",
"=",
"list",
"(",
"range",
"(",
"nb_classes",
")",
")",
"other_classes_list",
".",
"remove",
"(",
"class_ind",
")",
"return",
"other_classes_list"
] |
Returns a list of class indices excluding the class indexed by class_ind
:param nb_classes: number of classes in the task
:param class_ind: the class index to be omitted
:return: list of class indices excluding the class indexed by class_ind
|
[
"Returns",
"a",
"list",
"of",
"class",
"indices",
"excluding",
"the",
"class",
"indexed",
"by",
"class_ind",
":",
"param",
"nb_classes",
":",
"number",
"of",
"classes",
"in",
"the",
"task",
":",
"param",
"class_ind",
":",
"the",
"class",
"index",
"to",
"be",
"omitted",
":",
"return",
":",
"list",
"of",
"class",
"indices",
"excluding",
"the",
"class",
"indexed",
"by",
"class_ind"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L85-L99
|
train
|
tensorflow/cleverhans
|
cleverhans/utils.py
|
to_categorical
|
def to_categorical(y, nb_classes, num_classes=None):
"""
Converts a class vector (integers) to binary class matrix.
This is adapted from the Keras function with the same name.
:param y: class vector to be converted into a matrix
(integers from 0 to nb_classes).
:param nb_classes: nb_classes: total number of classes.
:param num_classses: depricated version of nb_classes
:return: A binary matrix representation of the input.
"""
if num_classes is not None:
if nb_classes is not None:
raise ValueError("Should not specify both nb_classes and its deprecated "
"alias, num_classes")
warnings.warn("`num_classes` is deprecated. Switch to `nb_classes`."
" `num_classes` may be removed on or after 2019-04-23.")
nb_classes = num_classes
del num_classes
y = np.array(y, dtype='int').ravel()
n = y.shape[0]
categorical = np.zeros((n, nb_classes))
categorical[np.arange(n), y] = 1
return categorical
|
python
|
def to_categorical(y, nb_classes, num_classes=None):
"""
Converts a class vector (integers) to binary class matrix.
This is adapted from the Keras function with the same name.
:param y: class vector to be converted into a matrix
(integers from 0 to nb_classes).
:param nb_classes: nb_classes: total number of classes.
:param num_classses: depricated version of nb_classes
:return: A binary matrix representation of the input.
"""
if num_classes is not None:
if nb_classes is not None:
raise ValueError("Should not specify both nb_classes and its deprecated "
"alias, num_classes")
warnings.warn("`num_classes` is deprecated. Switch to `nb_classes`."
" `num_classes` may be removed on or after 2019-04-23.")
nb_classes = num_classes
del num_classes
y = np.array(y, dtype='int').ravel()
n = y.shape[0]
categorical = np.zeros((n, nb_classes))
categorical[np.arange(n), y] = 1
return categorical
|
[
"def",
"to_categorical",
"(",
"y",
",",
"nb_classes",
",",
"num_classes",
"=",
"None",
")",
":",
"if",
"num_classes",
"is",
"not",
"None",
":",
"if",
"nb_classes",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"Should not specify both nb_classes and its deprecated \"",
"\"alias, num_classes\"",
")",
"warnings",
".",
"warn",
"(",
"\"`num_classes` is deprecated. Switch to `nb_classes`.\"",
"\" `num_classes` may be removed on or after 2019-04-23.\"",
")",
"nb_classes",
"=",
"num_classes",
"del",
"num_classes",
"y",
"=",
"np",
".",
"array",
"(",
"y",
",",
"dtype",
"=",
"'int'",
")",
".",
"ravel",
"(",
")",
"n",
"=",
"y",
".",
"shape",
"[",
"0",
"]",
"categorical",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"nb_classes",
")",
")",
"categorical",
"[",
"np",
".",
"arange",
"(",
"n",
")",
",",
"y",
"]",
"=",
"1",
"return",
"categorical"
] |
Converts a class vector (integers) to binary class matrix.
This is adapted from the Keras function with the same name.
:param y: class vector to be converted into a matrix
(integers from 0 to nb_classes).
:param nb_classes: nb_classes: total number of classes.
:param num_classses: depricated version of nb_classes
:return: A binary matrix representation of the input.
|
[
"Converts",
"a",
"class",
"vector",
"(",
"integers",
")",
"to",
"binary",
"class",
"matrix",
".",
"This",
"is",
"adapted",
"from",
"the",
"Keras",
"function",
"with",
"the",
"same",
"name",
".",
":",
"param",
"y",
":",
"class",
"vector",
"to",
"be",
"converted",
"into",
"a",
"matrix",
"(",
"integers",
"from",
"0",
"to",
"nb_classes",
")",
".",
":",
"param",
"nb_classes",
":",
"nb_classes",
":",
"total",
"number",
"of",
"classes",
".",
":",
"param",
"num_classses",
":",
"depricated",
"version",
"of",
"nb_classes",
":",
"return",
":",
"A",
"binary",
"matrix",
"representation",
"of",
"the",
"input",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L102-L124
|
train
|
tensorflow/cleverhans
|
cleverhans/utils.py
|
random_targets
|
def random_targets(gt, nb_classes):
"""
Take in an array of correct labels and randomly select a different label
for each label in the array. This is typically used to randomly select a
target class in targeted adversarial examples attacks (i.e., when the
search algorithm takes in both a source class and target class to compute
the adversarial example).
:param gt: the ground truth (correct) labels. They can be provided as a
1D vector or 2D array of one-hot encoded labels.
:param nb_classes: The number of classes for this task. The random class
will be chosen between 0 and nb_classes such that it
is different from the correct class.
:return: A numpy array holding the randomly-selected target classes
encoded as one-hot labels.
"""
# If the ground truth labels are encoded as one-hot, convert to labels.
if len(gt.shape) == 2:
gt = np.argmax(gt, axis=1)
# This vector will hold the randomly selected labels.
result = np.zeros(gt.shape, dtype=np.int32)
for class_ind in xrange(nb_classes):
# Compute all indices in that class.
in_cl = gt == class_ind
size = np.sum(in_cl)
# Compute the set of potential targets for this class.
potential_targets = other_classes(nb_classes, class_ind)
# Draw with replacement random targets among the potential targets.
result[in_cl] = np.random.choice(potential_targets, size=size)
# Encode vector of random labels as one-hot labels.
result = to_categorical(result, nb_classes)
result = result.astype(np.int32)
return result
|
python
|
def random_targets(gt, nb_classes):
"""
Take in an array of correct labels and randomly select a different label
for each label in the array. This is typically used to randomly select a
target class in targeted adversarial examples attacks (i.e., when the
search algorithm takes in both a source class and target class to compute
the adversarial example).
:param gt: the ground truth (correct) labels. They can be provided as a
1D vector or 2D array of one-hot encoded labels.
:param nb_classes: The number of classes for this task. The random class
will be chosen between 0 and nb_classes such that it
is different from the correct class.
:return: A numpy array holding the randomly-selected target classes
encoded as one-hot labels.
"""
# If the ground truth labels are encoded as one-hot, convert to labels.
if len(gt.shape) == 2:
gt = np.argmax(gt, axis=1)
# This vector will hold the randomly selected labels.
result = np.zeros(gt.shape, dtype=np.int32)
for class_ind in xrange(nb_classes):
# Compute all indices in that class.
in_cl = gt == class_ind
size = np.sum(in_cl)
# Compute the set of potential targets for this class.
potential_targets = other_classes(nb_classes, class_ind)
# Draw with replacement random targets among the potential targets.
result[in_cl] = np.random.choice(potential_targets, size=size)
# Encode vector of random labels as one-hot labels.
result = to_categorical(result, nb_classes)
result = result.astype(np.int32)
return result
|
[
"def",
"random_targets",
"(",
"gt",
",",
"nb_classes",
")",
":",
"# If the ground truth labels are encoded as one-hot, convert to labels.",
"if",
"len",
"(",
"gt",
".",
"shape",
")",
"==",
"2",
":",
"gt",
"=",
"np",
".",
"argmax",
"(",
"gt",
",",
"axis",
"=",
"1",
")",
"# This vector will hold the randomly selected labels.",
"result",
"=",
"np",
".",
"zeros",
"(",
"gt",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"for",
"class_ind",
"in",
"xrange",
"(",
"nb_classes",
")",
":",
"# Compute all indices in that class.",
"in_cl",
"=",
"gt",
"==",
"class_ind",
"size",
"=",
"np",
".",
"sum",
"(",
"in_cl",
")",
"# Compute the set of potential targets for this class.",
"potential_targets",
"=",
"other_classes",
"(",
"nb_classes",
",",
"class_ind",
")",
"# Draw with replacement random targets among the potential targets.",
"result",
"[",
"in_cl",
"]",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"potential_targets",
",",
"size",
"=",
"size",
")",
"# Encode vector of random labels as one-hot labels.",
"result",
"=",
"to_categorical",
"(",
"result",
",",
"nb_classes",
")",
"result",
"=",
"result",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"return",
"result"
] |
Take in an array of correct labels and randomly select a different label
for each label in the array. This is typically used to randomly select a
target class in targeted adversarial examples attacks (i.e., when the
search algorithm takes in both a source class and target class to compute
the adversarial example).
:param gt: the ground truth (correct) labels. They can be provided as a
1D vector or 2D array of one-hot encoded labels.
:param nb_classes: The number of classes for this task. The random class
will be chosen between 0 and nb_classes such that it
is different from the correct class.
:return: A numpy array holding the randomly-selected target classes
encoded as one-hot labels.
|
[
"Take",
"in",
"an",
"array",
"of",
"correct",
"labels",
"and",
"randomly",
"select",
"a",
"different",
"label",
"for",
"each",
"label",
"in",
"the",
"array",
".",
"This",
"is",
"typically",
"used",
"to",
"randomly",
"select",
"a",
"target",
"class",
"in",
"targeted",
"adversarial",
"examples",
"attacks",
"(",
"i",
".",
"e",
".",
"when",
"the",
"search",
"algorithm",
"takes",
"in",
"both",
"a",
"source",
"class",
"and",
"target",
"class",
"to",
"compute",
"the",
"adversarial",
"example",
")",
".",
":",
"param",
"gt",
":",
"the",
"ground",
"truth",
"(",
"correct",
")",
"labels",
".",
"They",
"can",
"be",
"provided",
"as",
"a",
"1D",
"vector",
"or",
"2D",
"array",
"of",
"one",
"-",
"hot",
"encoded",
"labels",
".",
":",
"param",
"nb_classes",
":",
"The",
"number",
"of",
"classes",
"for",
"this",
"task",
".",
"The",
"random",
"class",
"will",
"be",
"chosen",
"between",
"0",
"and",
"nb_classes",
"such",
"that",
"it",
"is",
"different",
"from",
"the",
"correct",
"class",
".",
":",
"return",
":",
"A",
"numpy",
"array",
"holding",
"the",
"randomly",
"-",
"selected",
"target",
"classes",
"encoded",
"as",
"one",
"-",
"hot",
"labels",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L127-L164
|
train
|
tensorflow/cleverhans
|
cleverhans/utils.py
|
pair_visual
|
def pair_visual(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn("`pair_visual` has moved to `cleverhans.plot.pyplot_image`. "
"cleverhans.utils.pair_visual may be removed on or after "
"2019-04-24.")
from cleverhans.plot.pyplot_image import pair_visual as new_pair_visual
return new_pair_visual(*args, **kwargs)
|
python
|
def pair_visual(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn("`pair_visual` has moved to `cleverhans.plot.pyplot_image`. "
"cleverhans.utils.pair_visual may be removed on or after "
"2019-04-24.")
from cleverhans.plot.pyplot_image import pair_visual as new_pair_visual
return new_pair_visual(*args, **kwargs)
|
[
"def",
"pair_visual",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"`pair_visual` has moved to `cleverhans.plot.pyplot_image`. \"",
"\"cleverhans.utils.pair_visual may be removed on or after \"",
"\"2019-04-24.\"",
")",
"from",
"cleverhans",
".",
"plot",
".",
"pyplot_image",
"import",
"pair_visual",
"as",
"new_pair_visual",
"return",
"new_pair_visual",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Deprecation wrapper
|
[
"Deprecation",
"wrapper"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L167-L173
|
train
|
tensorflow/cleverhans
|
cleverhans/utils.py
|
grid_visual
|
def grid_visual(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn("`grid_visual` has moved to `cleverhans.plot.pyplot_image`. "
"cleverhans.utils.grid_visual may be removed on or after "
"2019-04-24.")
from cleverhans.plot.pyplot_image import grid_visual as new_grid_visual
return new_grid_visual(*args, **kwargs)
|
python
|
def grid_visual(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn("`grid_visual` has moved to `cleverhans.plot.pyplot_image`. "
"cleverhans.utils.grid_visual may be removed on or after "
"2019-04-24.")
from cleverhans.plot.pyplot_image import grid_visual as new_grid_visual
return new_grid_visual(*args, **kwargs)
|
[
"def",
"grid_visual",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"`grid_visual` has moved to `cleverhans.plot.pyplot_image`. \"",
"\"cleverhans.utils.grid_visual may be removed on or after \"",
"\"2019-04-24.\"",
")",
"from",
"cleverhans",
".",
"plot",
".",
"pyplot_image",
"import",
"grid_visual",
"as",
"new_grid_visual",
"return",
"new_grid_visual",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Deprecation wrapper
|
[
"Deprecation",
"wrapper"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L176-L182
|
train
|
tensorflow/cleverhans
|
cleverhans/utils.py
|
get_logits_over_interval
|
def get_logits_over_interval(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn("`get_logits_over_interval` has moved to "
"`cleverhans.plot.pyplot_image`. "
"cleverhans.utils.get_logits_over_interval may be removed on "
"or after 2019-04-24.")
# pylint:disable=line-too-long
from cleverhans.plot.pyplot_image import get_logits_over_interval as new_get_logits_over_interval
return new_get_logits_over_interval(*args, **kwargs)
|
python
|
def get_logits_over_interval(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn("`get_logits_over_interval` has moved to "
"`cleverhans.plot.pyplot_image`. "
"cleverhans.utils.get_logits_over_interval may be removed on "
"or after 2019-04-24.")
# pylint:disable=line-too-long
from cleverhans.plot.pyplot_image import get_logits_over_interval as new_get_logits_over_interval
return new_get_logits_over_interval(*args, **kwargs)
|
[
"def",
"get_logits_over_interval",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"`get_logits_over_interval` has moved to \"",
"\"`cleverhans.plot.pyplot_image`. \"",
"\"cleverhans.utils.get_logits_over_interval may be removed on \"",
"\"or after 2019-04-24.\"",
")",
"# pylint:disable=line-too-long",
"from",
"cleverhans",
".",
"plot",
".",
"pyplot_image",
"import",
"get_logits_over_interval",
"as",
"new_get_logits_over_interval",
"return",
"new_get_logits_over_interval",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Deprecation wrapper
|
[
"Deprecation",
"wrapper"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L185-L193
|
train
|
tensorflow/cleverhans
|
cleverhans/utils.py
|
linear_extrapolation_plot
|
def linear_extrapolation_plot(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn("`linear_extrapolation_plot` has moved to "
"`cleverhans.plot.pyplot_image`. "
"cleverhans.utils.linear_extrapolation_plot may be removed on "
"or after 2019-04-24.")
# pylint:disable=line-too-long
from cleverhans.plot.pyplot_image import linear_extrapolation_plot as new_linear_extrapolation_plot
return new_linear_extrapolation_plot(*args, **kwargs)
|
python
|
def linear_extrapolation_plot(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn("`linear_extrapolation_plot` has moved to "
"`cleverhans.plot.pyplot_image`. "
"cleverhans.utils.linear_extrapolation_plot may be removed on "
"or after 2019-04-24.")
# pylint:disable=line-too-long
from cleverhans.plot.pyplot_image import linear_extrapolation_plot as new_linear_extrapolation_plot
return new_linear_extrapolation_plot(*args, **kwargs)
|
[
"def",
"linear_extrapolation_plot",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"`linear_extrapolation_plot` has moved to \"",
"\"`cleverhans.plot.pyplot_image`. \"",
"\"cleverhans.utils.linear_extrapolation_plot may be removed on \"",
"\"or after 2019-04-24.\"",
")",
"# pylint:disable=line-too-long",
"from",
"cleverhans",
".",
"plot",
".",
"pyplot_image",
"import",
"linear_extrapolation_plot",
"as",
"new_linear_extrapolation_plot",
"return",
"new_linear_extrapolation_plot",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Deprecation wrapper
|
[
"Deprecation",
"wrapper"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L196-L204
|
train
|
tensorflow/cleverhans
|
cleverhans/utils.py
|
create_logger
|
def create_logger(name):
"""
Create a logger object with the given name.
If this is the first time that we call this method, then initialize the
formatter.
"""
base = logging.getLogger("cleverhans")
if len(base.handlers) == 0:
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname)s %(asctime)s %(name)s] ' +
'%(message)s')
ch.setFormatter(formatter)
base.addHandler(ch)
return base
|
python
|
def create_logger(name):
"""
Create a logger object with the given name.
If this is the first time that we call this method, then initialize the
formatter.
"""
base = logging.getLogger("cleverhans")
if len(base.handlers) == 0:
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname)s %(asctime)s %(name)s] ' +
'%(message)s')
ch.setFormatter(formatter)
base.addHandler(ch)
return base
|
[
"def",
"create_logger",
"(",
"name",
")",
":",
"base",
"=",
"logging",
".",
"getLogger",
"(",
"\"cleverhans\"",
")",
"if",
"len",
"(",
"base",
".",
"handlers",
")",
"==",
"0",
":",
"ch",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"'[%(levelname)s %(asctime)s %(name)s] '",
"+",
"'%(message)s'",
")",
"ch",
".",
"setFormatter",
"(",
"formatter",
")",
"base",
".",
"addHandler",
"(",
"ch",
")",
"return",
"base"
] |
Create a logger object with the given name.
If this is the first time that we call this method, then initialize the
formatter.
|
[
"Create",
"a",
"logger",
"object",
"with",
"the",
"given",
"name",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L247-L262
|
train
|
tensorflow/cleverhans
|
cleverhans/utils.py
|
deterministic_dict
|
def deterministic_dict(normal_dict):
"""
Returns a version of `normal_dict` whose iteration order is always the same
"""
out = OrderedDict()
for key in sorted(normal_dict.keys()):
out[key] = normal_dict[key]
return out
|
python
|
def deterministic_dict(normal_dict):
"""
Returns a version of `normal_dict` whose iteration order is always the same
"""
out = OrderedDict()
for key in sorted(normal_dict.keys()):
out[key] = normal_dict[key]
return out
|
[
"def",
"deterministic_dict",
"(",
"normal_dict",
")",
":",
"out",
"=",
"OrderedDict",
"(",
")",
"for",
"key",
"in",
"sorted",
"(",
"normal_dict",
".",
"keys",
"(",
")",
")",
":",
"out",
"[",
"key",
"]",
"=",
"normal_dict",
"[",
"key",
"]",
"return",
"out"
] |
Returns a version of `normal_dict` whose iteration order is always the same
|
[
"Returns",
"a",
"version",
"of",
"normal_dict",
"whose",
"iteration",
"order",
"is",
"always",
"the",
"same"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L265-L272
|
train
|
tensorflow/cleverhans
|
cleverhans/utils.py
|
ordered_union
|
def ordered_union(l1, l2):
"""
Return the union of l1 and l2, with a deterministic ordering.
(Union of python sets does not necessarily have a consisten iteration
order)
:param l1: list of items
:param l2: list of items
:returns: list containing one copy of each item that is in l1 or in l2
"""
out = []
for e in l1 + l2:
if e not in out:
out.append(e)
return out
|
python
|
def ordered_union(l1, l2):
"""
Return the union of l1 and l2, with a deterministic ordering.
(Union of python sets does not necessarily have a consisten iteration
order)
:param l1: list of items
:param l2: list of items
:returns: list containing one copy of each item that is in l1 or in l2
"""
out = []
for e in l1 + l2:
if e not in out:
out.append(e)
return out
|
[
"def",
"ordered_union",
"(",
"l1",
",",
"l2",
")",
":",
"out",
"=",
"[",
"]",
"for",
"e",
"in",
"l1",
"+",
"l2",
":",
"if",
"e",
"not",
"in",
"out",
":",
"out",
".",
"append",
"(",
"e",
")",
"return",
"out"
] |
Return the union of l1 and l2, with a deterministic ordering.
(Union of python sets does not necessarily have a consisten iteration
order)
:param l1: list of items
:param l2: list of items
:returns: list containing one copy of each item that is in l1 or in l2
|
[
"Return",
"the",
"union",
"of",
"l1",
"and",
"l2",
"with",
"a",
"deterministic",
"ordering",
".",
"(",
"Union",
"of",
"python",
"sets",
"does",
"not",
"necessarily",
"have",
"a",
"consisten",
"iteration",
"order",
")",
":",
"param",
"l1",
":",
"list",
"of",
"items",
":",
"param",
"l2",
":",
"list",
"of",
"items",
":",
"returns",
":",
"list",
"containing",
"one",
"copy",
"of",
"each",
"item",
"that",
"is",
"in",
"l1",
"or",
"in",
"l2"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L275-L288
|
train
|
tensorflow/cleverhans
|
cleverhans/utils.py
|
safe_zip
|
def safe_zip(*args):
"""like zip but with these properties:
- returns a list, rather than an iterator. This is the old Python2 zip behavior.
- a guarantee that all arguments are the same length.
(normal zip silently drops entries to make them the same length)
"""
length = len(args[0])
if not all(len(arg) == length for arg in args):
raise ValueError("Lengths of arguments do not match: "
+ str([len(arg) for arg in args]))
return list(zip(*args))
|
python
|
def safe_zip(*args):
"""like zip but with these properties:
- returns a list, rather than an iterator. This is the old Python2 zip behavior.
- a guarantee that all arguments are the same length.
(normal zip silently drops entries to make them the same length)
"""
length = len(args[0])
if not all(len(arg) == length for arg in args):
raise ValueError("Lengths of arguments do not match: "
+ str([len(arg) for arg in args]))
return list(zip(*args))
|
[
"def",
"safe_zip",
"(",
"*",
"args",
")",
":",
"length",
"=",
"len",
"(",
"args",
"[",
"0",
"]",
")",
"if",
"not",
"all",
"(",
"len",
"(",
"arg",
")",
"==",
"length",
"for",
"arg",
"in",
"args",
")",
":",
"raise",
"ValueError",
"(",
"\"Lengths of arguments do not match: \"",
"+",
"str",
"(",
"[",
"len",
"(",
"arg",
")",
"for",
"arg",
"in",
"args",
"]",
")",
")",
"return",
"list",
"(",
"zip",
"(",
"*",
"args",
")",
")"
] |
like zip but with these properties:
- returns a list, rather than an iterator. This is the old Python2 zip behavior.
- a guarantee that all arguments are the same length.
(normal zip silently drops entries to make them the same length)
|
[
"like",
"zip",
"but",
"with",
"these",
"properties",
":",
"-",
"returns",
"a",
"list",
"rather",
"than",
"an",
"iterator",
".",
"This",
"is",
"the",
"old",
"Python2",
"zip",
"behavior",
".",
"-",
"a",
"guarantee",
"that",
"all",
"arguments",
"are",
"the",
"same",
"length",
".",
"(",
"normal",
"zip",
"silently",
"drops",
"entries",
"to",
"make",
"them",
"the",
"same",
"length",
")"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L291-L301
|
train
|
tensorflow/cleverhans
|
cleverhans/utils.py
|
shell_call
|
def shell_call(command, **kwargs):
"""Calls shell command with argument substitution.
Args:
command: command represented as a list. Each element of the list is one
token of the command. For example "cp a b" becomes ['cp', 'a', 'b']
If any element of the list looks like '${NAME}' then it will be replaced
by value from **kwargs with key 'NAME'.
**kwargs: dictionary with argument substitution
Returns:
output of the command
Raises:
subprocess.CalledProcessError if command return value is not zero
This function is useful when you need to do variable substitution prior
running the command. Below are few examples of how it works:
shell_call(['cp', 'a', 'b'], a='asd') calls command 'cp a b'
shell_call(['cp', '${a}', 'b'], a='asd') calls command 'cp asd b',
'${a}; was replaced with 'asd' before calling the command
"""
# Regular expression to find instances of '${NAME}' in a string
CMD_VARIABLE_RE = re.compile('^\\$\\{(\\w+)\\}$')
command = list(command)
for i in range(len(command)):
m = CMD_VARIABLE_RE.match(command[i])
if m:
var_id = m.group(1)
if var_id in kwargs:
command[i] = kwargs[var_id]
str_command = ' '.join(command)
logging.debug('Executing shell command: %s' % str_command)
return subprocess.check_output(command)
|
python
|
def shell_call(command, **kwargs):
"""Calls shell command with argument substitution.
Args:
command: command represented as a list. Each element of the list is one
token of the command. For example "cp a b" becomes ['cp', 'a', 'b']
If any element of the list looks like '${NAME}' then it will be replaced
by value from **kwargs with key 'NAME'.
**kwargs: dictionary with argument substitution
Returns:
output of the command
Raises:
subprocess.CalledProcessError if command return value is not zero
This function is useful when you need to do variable substitution prior
running the command. Below are few examples of how it works:
shell_call(['cp', 'a', 'b'], a='asd') calls command 'cp a b'
shell_call(['cp', '${a}', 'b'], a='asd') calls command 'cp asd b',
'${a}; was replaced with 'asd' before calling the command
"""
# Regular expression to find instances of '${NAME}' in a string
CMD_VARIABLE_RE = re.compile('^\\$\\{(\\w+)\\}$')
command = list(command)
for i in range(len(command)):
m = CMD_VARIABLE_RE.match(command[i])
if m:
var_id = m.group(1)
if var_id in kwargs:
command[i] = kwargs[var_id]
str_command = ' '.join(command)
logging.debug('Executing shell command: %s' % str_command)
return subprocess.check_output(command)
|
[
"def",
"shell_call",
"(",
"command",
",",
"*",
"*",
"kwargs",
")",
":",
"# Regular expression to find instances of '${NAME}' in a string",
"CMD_VARIABLE_RE",
"=",
"re",
".",
"compile",
"(",
"'^\\\\$\\\\{(\\\\w+)\\\\}$'",
")",
"command",
"=",
"list",
"(",
"command",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"command",
")",
")",
":",
"m",
"=",
"CMD_VARIABLE_RE",
".",
"match",
"(",
"command",
"[",
"i",
"]",
")",
"if",
"m",
":",
"var_id",
"=",
"m",
".",
"group",
"(",
"1",
")",
"if",
"var_id",
"in",
"kwargs",
":",
"command",
"[",
"i",
"]",
"=",
"kwargs",
"[",
"var_id",
"]",
"str_command",
"=",
"' '",
".",
"join",
"(",
"command",
")",
"logging",
".",
"debug",
"(",
"'Executing shell command: %s'",
"%",
"str_command",
")",
"return",
"subprocess",
".",
"check_output",
"(",
"command",
")"
] |
Calls shell command with argument substitution.
Args:
command: command represented as a list. Each element of the list is one
token of the command. For example "cp a b" becomes ['cp', 'a', 'b']
If any element of the list looks like '${NAME}' then it will be replaced
by value from **kwargs with key 'NAME'.
**kwargs: dictionary with argument substitution
Returns:
output of the command
Raises:
subprocess.CalledProcessError if command return value is not zero
This function is useful when you need to do variable substitution prior
running the command. Below are few examples of how it works:
shell_call(['cp', 'a', 'b'], a='asd') calls command 'cp a b'
shell_call(['cp', '${a}', 'b'], a='asd') calls command 'cp asd b',
'${a}; was replaced with 'asd' before calling the command
|
[
"Calls",
"shell",
"command",
"with",
"argument",
"substitution",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L304-L339
|
train
|
tensorflow/cleverhans
|
cleverhans/utils.py
|
deep_copy
|
def deep_copy(numpy_dict):
"""
Returns a copy of a dictionary whose values are numpy arrays.
Copies their values rather than copying references to them.
"""
out = {}
for key in numpy_dict:
out[key] = numpy_dict[key].copy()
return out
|
python
|
def deep_copy(numpy_dict):
"""
Returns a copy of a dictionary whose values are numpy arrays.
Copies their values rather than copying references to them.
"""
out = {}
for key in numpy_dict:
out[key] = numpy_dict[key].copy()
return out
|
[
"def",
"deep_copy",
"(",
"numpy_dict",
")",
":",
"out",
"=",
"{",
"}",
"for",
"key",
"in",
"numpy_dict",
":",
"out",
"[",
"key",
"]",
"=",
"numpy_dict",
"[",
"key",
"]",
".",
"copy",
"(",
")",
"return",
"out"
] |
Returns a copy of a dictionary whose values are numpy arrays.
Copies their values rather than copying references to them.
|
[
"Returns",
"a",
"copy",
"of",
"a",
"dictionary",
"whose",
"values",
"are",
"numpy",
"arrays",
".",
"Copies",
"their",
"values",
"rather",
"than",
"copying",
"references",
"to",
"them",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L341-L349
|
train
|
tensorflow/cleverhans
|
cleverhans/dataset.py
|
data_mnist
|
def data_mnist(datadir=tempfile.gettempdir(), train_start=0,
train_end=60000, test_start=0, test_end=10000):
"""
Load and preprocess MNIST dataset
:param datadir: path to folder where data should be stored
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:return: tuple of four arrays containing training data, training labels,
testing data and testing labels.
"""
assert isinstance(train_start, int)
assert isinstance(train_end, int)
assert isinstance(test_start, int)
assert isinstance(test_end, int)
X_train = download_and_parse_mnist_file(
'train-images-idx3-ubyte.gz', datadir=datadir) / 255.
Y_train = download_and_parse_mnist_file(
'train-labels-idx1-ubyte.gz', datadir=datadir)
X_test = download_and_parse_mnist_file(
't10k-images-idx3-ubyte.gz', datadir=datadir) / 255.
Y_test = download_and_parse_mnist_file(
't10k-labels-idx1-ubyte.gz', datadir=datadir)
X_train = np.expand_dims(X_train, -1)
X_test = np.expand_dims(X_test, -1)
X_train = X_train[train_start:train_end]
Y_train = Y_train[train_start:train_end]
X_test = X_test[test_start:test_end]
Y_test = Y_test[test_start:test_end]
Y_train = utils.to_categorical(Y_train, nb_classes=10)
Y_test = utils.to_categorical(Y_test, nb_classes=10)
return X_train, Y_train, X_test, Y_test
|
python
|
def data_mnist(datadir=tempfile.gettempdir(), train_start=0,
train_end=60000, test_start=0, test_end=10000):
"""
Load and preprocess MNIST dataset
:param datadir: path to folder where data should be stored
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:return: tuple of four arrays containing training data, training labels,
testing data and testing labels.
"""
assert isinstance(train_start, int)
assert isinstance(train_end, int)
assert isinstance(test_start, int)
assert isinstance(test_end, int)
X_train = download_and_parse_mnist_file(
'train-images-idx3-ubyte.gz', datadir=datadir) / 255.
Y_train = download_and_parse_mnist_file(
'train-labels-idx1-ubyte.gz', datadir=datadir)
X_test = download_and_parse_mnist_file(
't10k-images-idx3-ubyte.gz', datadir=datadir) / 255.
Y_test = download_and_parse_mnist_file(
't10k-labels-idx1-ubyte.gz', datadir=datadir)
X_train = np.expand_dims(X_train, -1)
X_test = np.expand_dims(X_test, -1)
X_train = X_train[train_start:train_end]
Y_train = Y_train[train_start:train_end]
X_test = X_test[test_start:test_end]
Y_test = Y_test[test_start:test_end]
Y_train = utils.to_categorical(Y_train, nb_classes=10)
Y_test = utils.to_categorical(Y_test, nb_classes=10)
return X_train, Y_train, X_test, Y_test
|
[
"def",
"data_mnist",
"(",
"datadir",
"=",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"train_start",
"=",
"0",
",",
"train_end",
"=",
"60000",
",",
"test_start",
"=",
"0",
",",
"test_end",
"=",
"10000",
")",
":",
"assert",
"isinstance",
"(",
"train_start",
",",
"int",
")",
"assert",
"isinstance",
"(",
"train_end",
",",
"int",
")",
"assert",
"isinstance",
"(",
"test_start",
",",
"int",
")",
"assert",
"isinstance",
"(",
"test_end",
",",
"int",
")",
"X_train",
"=",
"download_and_parse_mnist_file",
"(",
"'train-images-idx3-ubyte.gz'",
",",
"datadir",
"=",
"datadir",
")",
"/",
"255.",
"Y_train",
"=",
"download_and_parse_mnist_file",
"(",
"'train-labels-idx1-ubyte.gz'",
",",
"datadir",
"=",
"datadir",
")",
"X_test",
"=",
"download_and_parse_mnist_file",
"(",
"'t10k-images-idx3-ubyte.gz'",
",",
"datadir",
"=",
"datadir",
")",
"/",
"255.",
"Y_test",
"=",
"download_and_parse_mnist_file",
"(",
"'t10k-labels-idx1-ubyte.gz'",
",",
"datadir",
"=",
"datadir",
")",
"X_train",
"=",
"np",
".",
"expand_dims",
"(",
"X_train",
",",
"-",
"1",
")",
"X_test",
"=",
"np",
".",
"expand_dims",
"(",
"X_test",
",",
"-",
"1",
")",
"X_train",
"=",
"X_train",
"[",
"train_start",
":",
"train_end",
"]",
"Y_train",
"=",
"Y_train",
"[",
"train_start",
":",
"train_end",
"]",
"X_test",
"=",
"X_test",
"[",
"test_start",
":",
"test_end",
"]",
"Y_test",
"=",
"Y_test",
"[",
"test_start",
":",
"test_end",
"]",
"Y_train",
"=",
"utils",
".",
"to_categorical",
"(",
"Y_train",
",",
"nb_classes",
"=",
"10",
")",
"Y_test",
"=",
"utils",
".",
"to_categorical",
"(",
"Y_test",
",",
"nb_classes",
"=",
"10",
")",
"return",
"X_train",
",",
"Y_train",
",",
"X_test",
",",
"Y_test"
] |
Load and preprocess MNIST dataset
:param datadir: path to folder where data should be stored
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:return: tuple of four arrays containing training data, training labels,
testing data and testing labels.
|
[
"Load",
"and",
"preprocess",
"MNIST",
"dataset",
":",
"param",
"datadir",
":",
"path",
"to",
"folder",
"where",
"data",
"should",
"be",
"stored",
":",
"param",
"train_start",
":",
"index",
"of",
"first",
"training",
"set",
"example",
":",
"param",
"train_end",
":",
"index",
"of",
"last",
"training",
"set",
"example",
":",
"param",
"test_start",
":",
"index",
"of",
"first",
"test",
"set",
"example",
":",
"param",
"test_end",
":",
"index",
"of",
"last",
"test",
"set",
"example",
":",
"return",
":",
"tuple",
"of",
"four",
"arrays",
"containing",
"training",
"data",
"training",
"labels",
"testing",
"data",
"and",
"testing",
"labels",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/dataset.py#L230-L266
|
train
|
tensorflow/cleverhans
|
cleverhans/dataset.py
|
data_cifar10
|
def data_cifar10(train_start=0, train_end=50000, test_start=0, test_end=10000):
"""
Preprocess CIFAR10 dataset
:return:
"""
# These values are specific to CIFAR10
img_rows = 32
img_cols = 32
nb_classes = 10
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
if tf.keras.backend.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
x_train = x_train[train_start:train_end, :, :, :]
y_train = y_train[train_start:train_end, :]
x_test = x_test[test_start:test_end, :]
y_test = y_test[test_start:test_end, :]
return x_train, y_train, x_test, y_test
|
python
|
def data_cifar10(train_start=0, train_end=50000, test_start=0, test_end=10000):
"""
Preprocess CIFAR10 dataset
:return:
"""
# These values are specific to CIFAR10
img_rows = 32
img_cols = 32
nb_classes = 10
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
if tf.keras.backend.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
x_train = x_train[train_start:train_end, :, :, :]
y_train = y_train[train_start:train_end, :]
x_test = x_test[test_start:test_end, :]
y_test = y_test[test_start:test_end, :]
return x_train, y_train, x_test, y_test
|
[
"def",
"data_cifar10",
"(",
"train_start",
"=",
"0",
",",
"train_end",
"=",
"50000",
",",
"test_start",
"=",
"0",
",",
"test_end",
"=",
"10000",
")",
":",
"# These values are specific to CIFAR10",
"img_rows",
"=",
"32",
"img_cols",
"=",
"32",
"nb_classes",
"=",
"10",
"# the data, shuffled and split between train and test sets",
"(",
"x_train",
",",
"y_train",
")",
",",
"(",
"x_test",
",",
"y_test",
")",
"=",
"cifar10",
".",
"load_data",
"(",
")",
"if",
"tf",
".",
"keras",
".",
"backend",
".",
"image_data_format",
"(",
")",
"==",
"'channels_first'",
":",
"x_train",
"=",
"x_train",
".",
"reshape",
"(",
"x_train",
".",
"shape",
"[",
"0",
"]",
",",
"3",
",",
"img_rows",
",",
"img_cols",
")",
"x_test",
"=",
"x_test",
".",
"reshape",
"(",
"x_test",
".",
"shape",
"[",
"0",
"]",
",",
"3",
",",
"img_rows",
",",
"img_cols",
")",
"else",
":",
"x_train",
"=",
"x_train",
".",
"reshape",
"(",
"x_train",
".",
"shape",
"[",
"0",
"]",
",",
"img_rows",
",",
"img_cols",
",",
"3",
")",
"x_test",
"=",
"x_test",
".",
"reshape",
"(",
"x_test",
".",
"shape",
"[",
"0",
"]",
",",
"img_rows",
",",
"img_cols",
",",
"3",
")",
"x_train",
"=",
"x_train",
".",
"astype",
"(",
"'float32'",
")",
"x_test",
"=",
"x_test",
".",
"astype",
"(",
"'float32'",
")",
"x_train",
"/=",
"255",
"x_test",
"/=",
"255",
"print",
"(",
"'x_train shape:'",
",",
"x_train",
".",
"shape",
")",
"print",
"(",
"x_train",
".",
"shape",
"[",
"0",
"]",
",",
"'train samples'",
")",
"print",
"(",
"x_test",
".",
"shape",
"[",
"0",
"]",
",",
"'test samples'",
")",
"# convert class vectors to binary class matrices",
"y_train",
"=",
"np_utils",
".",
"to_categorical",
"(",
"y_train",
",",
"nb_classes",
")",
"y_test",
"=",
"np_utils",
".",
"to_categorical",
"(",
"y_test",
",",
"nb_classes",
")",
"x_train",
"=",
"x_train",
"[",
"train_start",
":",
"train_end",
",",
":",
",",
":",
",",
":",
"]",
"y_train",
"=",
"y_train",
"[",
"train_start",
":",
"train_end",
",",
":",
"]",
"x_test",
"=",
"x_test",
"[",
"test_start",
":",
"test_end",
",",
":",
"]",
"y_test",
"=",
"y_test",
"[",
"test_start",
":",
"test_end",
",",
":",
"]",
"return",
"x_train",
",",
"y_train",
",",
"x_test",
",",
"y_test"
] |
Preprocess CIFAR10 dataset
:return:
|
[
"Preprocess",
"CIFAR10",
"dataset",
":",
"return",
":"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/dataset.py#L269-L307
|
train
|
tensorflow/cleverhans
|
scripts/compute_accuracy.py
|
print_accuracies
|
def print_accuracies(filepath, train_start=TRAIN_START, train_end=TRAIN_END,
test_start=TEST_START, test_end=TEST_END,
batch_size=BATCH_SIZE, which_set=WHICH_SET,
base_eps_iter=BASE_EPS_ITER,
nb_iter=NB_ITER):
"""
Load a saved model and print out its accuracy on different data distributions
This function works by running a single attack on each example.
This provides a reasonable estimate of the true failure rate quickly, so
long as the model does not suffer from gradient masking.
However, this estimate is mostly intended for development work and not
for publication. A more accurate estimate may be obtained by running
an attack bundler instead.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param batch_size: size of evaluation batches
:param which_set: 'train' or 'test'
:param base_eps_iter: step size if the data were in [0,1]
(Step size will be rescaled proportional to the actual data range)
:param nb_iter: Number of iterations of PGD to run per class
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(20181014)
set_log_level(logging.INFO)
sess = tf.Session()
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
factory = model.dataset_factory
factory.kwargs['train_start'] = train_start
factory.kwargs['train_end'] = train_end
factory.kwargs['test_start'] = test_start
factory.kwargs['test_end'] = test_end
dataset = factory()
x_data, y_data = dataset.get_set(which_set)
impl(sess, model, dataset, factory, x_data, y_data, base_eps_iter, nb_iter)
|
python
|
def print_accuracies(filepath, train_start=TRAIN_START, train_end=TRAIN_END,
test_start=TEST_START, test_end=TEST_END,
batch_size=BATCH_SIZE, which_set=WHICH_SET,
base_eps_iter=BASE_EPS_ITER,
nb_iter=NB_ITER):
"""
Load a saved model and print out its accuracy on different data distributions
This function works by running a single attack on each example.
This provides a reasonable estimate of the true failure rate quickly, so
long as the model does not suffer from gradient masking.
However, this estimate is mostly intended for development work and not
for publication. A more accurate estimate may be obtained by running
an attack bundler instead.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param batch_size: size of evaluation batches
:param which_set: 'train' or 'test'
:param base_eps_iter: step size if the data were in [0,1]
(Step size will be rescaled proportional to the actual data range)
:param nb_iter: Number of iterations of PGD to run per class
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(20181014)
set_log_level(logging.INFO)
sess = tf.Session()
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
factory = model.dataset_factory
factory.kwargs['train_start'] = train_start
factory.kwargs['train_end'] = train_end
factory.kwargs['test_start'] = test_start
factory.kwargs['test_end'] = test_end
dataset = factory()
x_data, y_data = dataset.get_set(which_set)
impl(sess, model, dataset, factory, x_data, y_data, base_eps_iter, nb_iter)
|
[
"def",
"print_accuracies",
"(",
"filepath",
",",
"train_start",
"=",
"TRAIN_START",
",",
"train_end",
"=",
"TRAIN_END",
",",
"test_start",
"=",
"TEST_START",
",",
"test_end",
"=",
"TEST_END",
",",
"batch_size",
"=",
"BATCH_SIZE",
",",
"which_set",
"=",
"WHICH_SET",
",",
"base_eps_iter",
"=",
"BASE_EPS_ITER",
",",
"nb_iter",
"=",
"NB_ITER",
")",
":",
"# Set TF random seed to improve reproducibility",
"tf",
".",
"set_random_seed",
"(",
"20181014",
")",
"set_log_level",
"(",
"logging",
".",
"INFO",
")",
"sess",
"=",
"tf",
".",
"Session",
"(",
")",
"with",
"sess",
".",
"as_default",
"(",
")",
":",
"model",
"=",
"load",
"(",
"filepath",
")",
"assert",
"len",
"(",
"model",
".",
"get_params",
"(",
")",
")",
">",
"0",
"factory",
"=",
"model",
".",
"dataset_factory",
"factory",
".",
"kwargs",
"[",
"'train_start'",
"]",
"=",
"train_start",
"factory",
".",
"kwargs",
"[",
"'train_end'",
"]",
"=",
"train_end",
"factory",
".",
"kwargs",
"[",
"'test_start'",
"]",
"=",
"test_start",
"factory",
".",
"kwargs",
"[",
"'test_end'",
"]",
"=",
"test_end",
"dataset",
"=",
"factory",
"(",
")",
"x_data",
",",
"y_data",
"=",
"dataset",
".",
"get_set",
"(",
"which_set",
")",
"impl",
"(",
"sess",
",",
"model",
",",
"dataset",
",",
"factory",
",",
"x_data",
",",
"y_data",
",",
"base_eps_iter",
",",
"nb_iter",
")"
] |
Load a saved model and print out its accuracy on different data distributions
This function works by running a single attack on each example.
This provides a reasonable estimate of the true failure rate quickly, so
long as the model does not suffer from gradient masking.
However, this estimate is mostly intended for development work and not
for publication. A more accurate estimate may be obtained by running
an attack bundler instead.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param batch_size: size of evaluation batches
:param which_set: 'train' or 'test'
:param base_eps_iter: step size if the data were in [0,1]
(Step size will be rescaled proportional to the actual data range)
:param nb_iter: Number of iterations of PGD to run per class
|
[
"Load",
"a",
"saved",
"model",
"and",
"print",
"out",
"its",
"accuracy",
"on",
"different",
"data",
"distributions"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/scripts/compute_accuracy.py#L53-L98
|
train
|
tensorflow/cleverhans
|
scripts/compute_accuracy.py
|
impl
|
def impl(sess, model, dataset, factory, x_data, y_data,
base_eps_iter=BASE_EPS_ITER, nb_iter=NB_ITER,
batch_size=BATCH_SIZE):
"""
The actual implementation of the evaluation.
:param sess: tf.Session
:param model: cleverhans.model.Model
:param dataset: cleverhans.dataset.Dataset
:param factory: the dataset factory corresponding to `dataset`
:param x_data: numpy array of input examples
:param y_data: numpy array of class labels
:param base_eps_iter: step size for PGD if data were in [0, 1]
:param nb_iter: number of PGD iterations
:returns: dict mapping string adversarial example names to accuracies
"""
center = dataset.kwargs['center']
max_val = dataset.kwargs['max_val']
value_range = max_val * (1. + center)
min_value = 0. - center * max_val
if 'CIFAR' in str(factory.cls):
base_eps = 8. / 255.
if base_eps_iter is None:
base_eps_iter = 2. / 255.
elif 'MNIST' in str(factory.cls):
base_eps = .3
if base_eps_iter is None:
base_eps_iter = .1
else:
raise NotImplementedError(str(factory.cls))
pgd_params = {'eps': base_eps * value_range,
'eps_iter': base_eps_iter * value_range,
'nb_iter': nb_iter,
'clip_min': min_value,
'clip_max': max_val}
semantic = Semantic(model, center, max_val, sess)
pgd = ProjectedGradientDescent(model, sess=sess)
jobs = [('clean', None, None, None),
('Semantic', semantic, None, None),
('pgd', pgd, pgd_params, None)]
out = {}
for job in jobs:
name, attack, attack_params, job_batch_size = job
if job_batch_size is None:
job_batch_size = batch_size
t1 = time.time()
acc = accuracy(sess, model, x_data, y_data, batch_size=job_batch_size,
devices=devices, attack=attack, attack_params=attack_params)
t2 = time.time()
out[name] = acc
print("Accuracy on " + name + " examples: ", acc)
print("Evaluation took", t2 - t1, "seconds")
return out
|
python
|
def impl(sess, model, dataset, factory, x_data, y_data,
base_eps_iter=BASE_EPS_ITER, nb_iter=NB_ITER,
batch_size=BATCH_SIZE):
"""
The actual implementation of the evaluation.
:param sess: tf.Session
:param model: cleverhans.model.Model
:param dataset: cleverhans.dataset.Dataset
:param factory: the dataset factory corresponding to `dataset`
:param x_data: numpy array of input examples
:param y_data: numpy array of class labels
:param base_eps_iter: step size for PGD if data were in [0, 1]
:param nb_iter: number of PGD iterations
:returns: dict mapping string adversarial example names to accuracies
"""
center = dataset.kwargs['center']
max_val = dataset.kwargs['max_val']
value_range = max_val * (1. + center)
min_value = 0. - center * max_val
if 'CIFAR' in str(factory.cls):
base_eps = 8. / 255.
if base_eps_iter is None:
base_eps_iter = 2. / 255.
elif 'MNIST' in str(factory.cls):
base_eps = .3
if base_eps_iter is None:
base_eps_iter = .1
else:
raise NotImplementedError(str(factory.cls))
pgd_params = {'eps': base_eps * value_range,
'eps_iter': base_eps_iter * value_range,
'nb_iter': nb_iter,
'clip_min': min_value,
'clip_max': max_val}
semantic = Semantic(model, center, max_val, sess)
pgd = ProjectedGradientDescent(model, sess=sess)
jobs = [('clean', None, None, None),
('Semantic', semantic, None, None),
('pgd', pgd, pgd_params, None)]
out = {}
for job in jobs:
name, attack, attack_params, job_batch_size = job
if job_batch_size is None:
job_batch_size = batch_size
t1 = time.time()
acc = accuracy(sess, model, x_data, y_data, batch_size=job_batch_size,
devices=devices, attack=attack, attack_params=attack_params)
t2 = time.time()
out[name] = acc
print("Accuracy on " + name + " examples: ", acc)
print("Evaluation took", t2 - t1, "seconds")
return out
|
[
"def",
"impl",
"(",
"sess",
",",
"model",
",",
"dataset",
",",
"factory",
",",
"x_data",
",",
"y_data",
",",
"base_eps_iter",
"=",
"BASE_EPS_ITER",
",",
"nb_iter",
"=",
"NB_ITER",
",",
"batch_size",
"=",
"BATCH_SIZE",
")",
":",
"center",
"=",
"dataset",
".",
"kwargs",
"[",
"'center'",
"]",
"max_val",
"=",
"dataset",
".",
"kwargs",
"[",
"'max_val'",
"]",
"value_range",
"=",
"max_val",
"*",
"(",
"1.",
"+",
"center",
")",
"min_value",
"=",
"0.",
"-",
"center",
"*",
"max_val",
"if",
"'CIFAR'",
"in",
"str",
"(",
"factory",
".",
"cls",
")",
":",
"base_eps",
"=",
"8.",
"/",
"255.",
"if",
"base_eps_iter",
"is",
"None",
":",
"base_eps_iter",
"=",
"2.",
"/",
"255.",
"elif",
"'MNIST'",
"in",
"str",
"(",
"factory",
".",
"cls",
")",
":",
"base_eps",
"=",
".3",
"if",
"base_eps_iter",
"is",
"None",
":",
"base_eps_iter",
"=",
".1",
"else",
":",
"raise",
"NotImplementedError",
"(",
"str",
"(",
"factory",
".",
"cls",
")",
")",
"pgd_params",
"=",
"{",
"'eps'",
":",
"base_eps",
"*",
"value_range",
",",
"'eps_iter'",
":",
"base_eps_iter",
"*",
"value_range",
",",
"'nb_iter'",
":",
"nb_iter",
",",
"'clip_min'",
":",
"min_value",
",",
"'clip_max'",
":",
"max_val",
"}",
"semantic",
"=",
"Semantic",
"(",
"model",
",",
"center",
",",
"max_val",
",",
"sess",
")",
"pgd",
"=",
"ProjectedGradientDescent",
"(",
"model",
",",
"sess",
"=",
"sess",
")",
"jobs",
"=",
"[",
"(",
"'clean'",
",",
"None",
",",
"None",
",",
"None",
")",
",",
"(",
"'Semantic'",
",",
"semantic",
",",
"None",
",",
"None",
")",
",",
"(",
"'pgd'",
",",
"pgd",
",",
"pgd_params",
",",
"None",
")",
"]",
"out",
"=",
"{",
"}",
"for",
"job",
"in",
"jobs",
":",
"name",
",",
"attack",
",",
"attack_params",
",",
"job_batch_size",
"=",
"job",
"if",
"job_batch_size",
"is",
"None",
":",
"job_batch_size",
"=",
"batch_size",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"acc",
"=",
"accuracy",
"(",
"sess",
",",
"model",
",",
"x_data",
",",
"y_data",
",",
"batch_size",
"=",
"job_batch_size",
",",
"devices",
"=",
"devices",
",",
"attack",
"=",
"attack",
",",
"attack_params",
"=",
"attack_params",
")",
"t2",
"=",
"time",
".",
"time",
"(",
")",
"out",
"[",
"name",
"]",
"=",
"acc",
"print",
"(",
"\"Accuracy on \"",
"+",
"name",
"+",
"\" examples: \"",
",",
"acc",
")",
"print",
"(",
"\"Evaluation took\"",
",",
"t2",
"-",
"t1",
",",
"\"seconds\"",
")",
"return",
"out"
] |
The actual implementation of the evaluation.
:param sess: tf.Session
:param model: cleverhans.model.Model
:param dataset: cleverhans.dataset.Dataset
:param factory: the dataset factory corresponding to `dataset`
:param x_data: numpy array of input examples
:param y_data: numpy array of class labels
:param base_eps_iter: step size for PGD if data were in [0, 1]
:param nb_iter: number of PGD iterations
:returns: dict mapping string adversarial example names to accuracies
|
[
"The",
"actual",
"implementation",
"of",
"the",
"evaluation",
".",
":",
"param",
"sess",
":",
"tf",
".",
"Session",
":",
"param",
"model",
":",
"cleverhans",
".",
"model",
".",
"Model",
":",
"param",
"dataset",
":",
"cleverhans",
".",
"dataset",
".",
"Dataset",
":",
"param",
"factory",
":",
"the",
"dataset",
"factory",
"corresponding",
"to",
"dataset",
":",
"param",
"x_data",
":",
"numpy",
"array",
"of",
"input",
"examples",
":",
"param",
"y_data",
":",
"numpy",
"array",
"of",
"class",
"labels",
":",
"param",
"base_eps_iter",
":",
"step",
"size",
"for",
"PGD",
"if",
"data",
"were",
"in",
"[",
"0",
"1",
"]",
":",
"param",
"nb_iter",
":",
"number",
"of",
"PGD",
"iterations",
":",
"returns",
":",
"dict",
"mapping",
"string",
"adversarial",
"example",
"names",
"to",
"accuracies"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/scripts/compute_accuracy.py#L100-L159
|
train
|
tensorflow/cleverhans
|
scripts/compute_accuracy.py
|
main
|
def main(argv=None):
"""
Print accuracies
"""
try:
_name_of_script, filepath = argv
except ValueError:
raise ValueError(argv)
print_accuracies(filepath=filepath, test_start=FLAGS.test_start,
test_end=FLAGS.test_end, which_set=FLAGS.which_set,
nb_iter=FLAGS.nb_iter, base_eps_iter=FLAGS.base_eps_iter,
batch_size=FLAGS.batch_size)
|
python
|
def main(argv=None):
"""
Print accuracies
"""
try:
_name_of_script, filepath = argv
except ValueError:
raise ValueError(argv)
print_accuracies(filepath=filepath, test_start=FLAGS.test_start,
test_end=FLAGS.test_end, which_set=FLAGS.which_set,
nb_iter=FLAGS.nb_iter, base_eps_iter=FLAGS.base_eps_iter,
batch_size=FLAGS.batch_size)
|
[
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"try",
":",
"_name_of_script",
",",
"filepath",
"=",
"argv",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"argv",
")",
"print_accuracies",
"(",
"filepath",
"=",
"filepath",
",",
"test_start",
"=",
"FLAGS",
".",
"test_start",
",",
"test_end",
"=",
"FLAGS",
".",
"test_end",
",",
"which_set",
"=",
"FLAGS",
".",
"which_set",
",",
"nb_iter",
"=",
"FLAGS",
".",
"nb_iter",
",",
"base_eps_iter",
"=",
"FLAGS",
".",
"base_eps_iter",
",",
"batch_size",
"=",
"FLAGS",
".",
"batch_size",
")"
] |
Print accuracies
|
[
"Print",
"accuracies"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/scripts/compute_accuracy.py#L162-L173
|
train
|
tensorflow/cleverhans
|
cleverhans/future/torch/attacks/fast_gradient_method.py
|
fast_gradient_method
|
def fast_gradient_method(model_fn, x, eps, ord,
clip_min=None, clip_max=None, y=None, targeted=False, sanity_checks=False):
"""
PyTorch implementation of the Fast Gradient Method.
:param model_fn: a callable that takes an input tensor and returns the model logits.
:param x: input tensor.
:param eps: epsilon (input variation parameter); see https://arxiv.org/abs/1412.6572.
:param ord: Order of the norm (mimics NumPy). Possible values: np.inf, 1 or 2.
:param clip_min: (optional) float. Minimum float value for adversarial example components.
:param clip_max: (optional) float. Maximum float value for adversarial example components.
:param y: (optional) Tensor with true labels. If targeted is true, then provide the
target label. Otherwise, only provide this parameter if you'd like to use true
labels when crafting adversarial samples. Otherwise, model predictions are used
as labels to avoid the "label leaking" effect (explained in this paper:
https://arxiv.org/abs/1611.01236). Default is None.
:param targeted: (optional) bool. Is the attack targeted or untargeted?
Untargeted, the default, will try to make the label incorrect.
Targeted will instead try to move in the direction of being more like y.
:param sanity_checks: bool, if True, include asserts (Turn them off to use less runtime /
memory or for unit tests that intentionally pass strange input)
:return: a tensor for the adversarial example
"""
if ord not in [np.inf, 1, 2]:
raise ValueError("Norm order must be either np.inf, 1, or 2.")
asserts = []
# If a data range was specified, check that the input was in that range
if clip_min is not None:
assert_ge = torch.all(torch.ge(x, torch.tensor(clip_min, device=x.device, dtype=x.dtype)))
asserts.append(assert_ge)
if clip_max is not None:
assert_le = torch.all(torch.le(x, torch.tensor(clip_max, device=x.device, dtype=x.dtype)))
asserts.append(assert_le)
# x needs to be a leaf variable, of floating point type and have requires_grad being True for
# its grad to be computed and stored properly in a backward call
x = x.clone().detach().to(torch.float).requires_grad_(True)
if y is None:
# Using model predictions as ground truth to avoid label leaking
_, y = torch.max(model_fn(x), 1)
# Compute loss
loss_fn = torch.nn.CrossEntropyLoss()
loss = loss_fn(model_fn(x), y)
# If attack is targeted, minimize loss of target label rather than maximize loss of correct label
if targeted:
loss = -loss
# Define gradient of loss wrt input
loss.backward()
optimal_perturbation = optimize_linear(x.grad, eps, ord)
# Add perturbation to original example to obtain adversarial example
adv_x = x + optimal_perturbation
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) or (clip_max is not None):
# We don't currently support one-sided clipping
assert clip_min is not None and clip_max is not None
adv_x = torch.clamp(adv_x, clip_min, clip_max)
if sanity_checks:
assert np.all(asserts)
return adv_x
|
python
|
def fast_gradient_method(model_fn, x, eps, ord,
clip_min=None, clip_max=None, y=None, targeted=False, sanity_checks=False):
"""
PyTorch implementation of the Fast Gradient Method.
:param model_fn: a callable that takes an input tensor and returns the model logits.
:param x: input tensor.
:param eps: epsilon (input variation parameter); see https://arxiv.org/abs/1412.6572.
:param ord: Order of the norm (mimics NumPy). Possible values: np.inf, 1 or 2.
:param clip_min: (optional) float. Minimum float value for adversarial example components.
:param clip_max: (optional) float. Maximum float value for adversarial example components.
:param y: (optional) Tensor with true labels. If targeted is true, then provide the
target label. Otherwise, only provide this parameter if you'd like to use true
labels when crafting adversarial samples. Otherwise, model predictions are used
as labels to avoid the "label leaking" effect (explained in this paper:
https://arxiv.org/abs/1611.01236). Default is None.
:param targeted: (optional) bool. Is the attack targeted or untargeted?
Untargeted, the default, will try to make the label incorrect.
Targeted will instead try to move in the direction of being more like y.
:param sanity_checks: bool, if True, include asserts (Turn them off to use less runtime /
memory or for unit tests that intentionally pass strange input)
:return: a tensor for the adversarial example
"""
if ord not in [np.inf, 1, 2]:
raise ValueError("Norm order must be either np.inf, 1, or 2.")
asserts = []
# If a data range was specified, check that the input was in that range
if clip_min is not None:
assert_ge = torch.all(torch.ge(x, torch.tensor(clip_min, device=x.device, dtype=x.dtype)))
asserts.append(assert_ge)
if clip_max is not None:
assert_le = torch.all(torch.le(x, torch.tensor(clip_max, device=x.device, dtype=x.dtype)))
asserts.append(assert_le)
# x needs to be a leaf variable, of floating point type and have requires_grad being True for
# its grad to be computed and stored properly in a backward call
x = x.clone().detach().to(torch.float).requires_grad_(True)
if y is None:
# Using model predictions as ground truth to avoid label leaking
_, y = torch.max(model_fn(x), 1)
# Compute loss
loss_fn = torch.nn.CrossEntropyLoss()
loss = loss_fn(model_fn(x), y)
# If attack is targeted, minimize loss of target label rather than maximize loss of correct label
if targeted:
loss = -loss
# Define gradient of loss wrt input
loss.backward()
optimal_perturbation = optimize_linear(x.grad, eps, ord)
# Add perturbation to original example to obtain adversarial example
adv_x = x + optimal_perturbation
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) or (clip_max is not None):
# We don't currently support one-sided clipping
assert clip_min is not None and clip_max is not None
adv_x = torch.clamp(adv_x, clip_min, clip_max)
if sanity_checks:
assert np.all(asserts)
return adv_x
|
[
"def",
"fast_gradient_method",
"(",
"model_fn",
",",
"x",
",",
"eps",
",",
"ord",
",",
"clip_min",
"=",
"None",
",",
"clip_max",
"=",
"None",
",",
"y",
"=",
"None",
",",
"targeted",
"=",
"False",
",",
"sanity_checks",
"=",
"False",
")",
":",
"if",
"ord",
"not",
"in",
"[",
"np",
".",
"inf",
",",
"1",
",",
"2",
"]",
":",
"raise",
"ValueError",
"(",
"\"Norm order must be either np.inf, 1, or 2.\"",
")",
"asserts",
"=",
"[",
"]",
"# If a data range was specified, check that the input was in that range",
"if",
"clip_min",
"is",
"not",
"None",
":",
"assert_ge",
"=",
"torch",
".",
"all",
"(",
"torch",
".",
"ge",
"(",
"x",
",",
"torch",
".",
"tensor",
"(",
"clip_min",
",",
"device",
"=",
"x",
".",
"device",
",",
"dtype",
"=",
"x",
".",
"dtype",
")",
")",
")",
"asserts",
".",
"append",
"(",
"assert_ge",
")",
"if",
"clip_max",
"is",
"not",
"None",
":",
"assert_le",
"=",
"torch",
".",
"all",
"(",
"torch",
".",
"le",
"(",
"x",
",",
"torch",
".",
"tensor",
"(",
"clip_max",
",",
"device",
"=",
"x",
".",
"device",
",",
"dtype",
"=",
"x",
".",
"dtype",
")",
")",
")",
"asserts",
".",
"append",
"(",
"assert_le",
")",
"# x needs to be a leaf variable, of floating point type and have requires_grad being True for",
"# its grad to be computed and stored properly in a backward call",
"x",
"=",
"x",
".",
"clone",
"(",
")",
".",
"detach",
"(",
")",
".",
"to",
"(",
"torch",
".",
"float",
")",
".",
"requires_grad_",
"(",
"True",
")",
"if",
"y",
"is",
"None",
":",
"# Using model predictions as ground truth to avoid label leaking",
"_",
",",
"y",
"=",
"torch",
".",
"max",
"(",
"model_fn",
"(",
"x",
")",
",",
"1",
")",
"# Compute loss",
"loss_fn",
"=",
"torch",
".",
"nn",
".",
"CrossEntropyLoss",
"(",
")",
"loss",
"=",
"loss_fn",
"(",
"model_fn",
"(",
"x",
")",
",",
"y",
")",
"# If attack is targeted, minimize loss of target label rather than maximize loss of correct label",
"if",
"targeted",
":",
"loss",
"=",
"-",
"loss",
"# Define gradient of loss wrt input",
"loss",
".",
"backward",
"(",
")",
"optimal_perturbation",
"=",
"optimize_linear",
"(",
"x",
".",
"grad",
",",
"eps",
",",
"ord",
")",
"# Add perturbation to original example to obtain adversarial example",
"adv_x",
"=",
"x",
"+",
"optimal_perturbation",
"# If clipping is needed, reset all values outside of [clip_min, clip_max]",
"if",
"(",
"clip_min",
"is",
"not",
"None",
")",
"or",
"(",
"clip_max",
"is",
"not",
"None",
")",
":",
"# We don't currently support one-sided clipping",
"assert",
"clip_min",
"is",
"not",
"None",
"and",
"clip_max",
"is",
"not",
"None",
"adv_x",
"=",
"torch",
".",
"clamp",
"(",
"adv_x",
",",
"clip_min",
",",
"clip_max",
")",
"if",
"sanity_checks",
":",
"assert",
"np",
".",
"all",
"(",
"asserts",
")",
"return",
"adv_x"
] |
PyTorch implementation of the Fast Gradient Method.
:param model_fn: a callable that takes an input tensor and returns the model logits.
:param x: input tensor.
:param eps: epsilon (input variation parameter); see https://arxiv.org/abs/1412.6572.
:param ord: Order of the norm (mimics NumPy). Possible values: np.inf, 1 or 2.
:param clip_min: (optional) float. Minimum float value for adversarial example components.
:param clip_max: (optional) float. Maximum float value for adversarial example components.
:param y: (optional) Tensor with true labels. If targeted is true, then provide the
target label. Otherwise, only provide this parameter if you'd like to use true
labels when crafting adversarial samples. Otherwise, model predictions are used
as labels to avoid the "label leaking" effect (explained in this paper:
https://arxiv.org/abs/1611.01236). Default is None.
:param targeted: (optional) bool. Is the attack targeted or untargeted?
Untargeted, the default, will try to make the label incorrect.
Targeted will instead try to move in the direction of being more like y.
:param sanity_checks: bool, if True, include asserts (Turn them off to use less runtime /
memory or for unit tests that intentionally pass strange input)
:return: a tensor for the adversarial example
|
[
"PyTorch",
"implementation",
"of",
"the",
"Fast",
"Gradient",
"Method",
".",
":",
"param",
"model_fn",
":",
"a",
"callable",
"that",
"takes",
"an",
"input",
"tensor",
"and",
"returns",
"the",
"model",
"logits",
".",
":",
"param",
"x",
":",
"input",
"tensor",
".",
":",
"param",
"eps",
":",
"epsilon",
"(",
"input",
"variation",
"parameter",
")",
";",
"see",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1412",
".",
"6572",
".",
":",
"param",
"ord",
":",
"Order",
"of",
"the",
"norm",
"(",
"mimics",
"NumPy",
")",
".",
"Possible",
"values",
":",
"np",
".",
"inf",
"1",
"or",
"2",
".",
":",
"param",
"clip_min",
":",
"(",
"optional",
")",
"float",
".",
"Minimum",
"float",
"value",
"for",
"adversarial",
"example",
"components",
".",
":",
"param",
"clip_max",
":",
"(",
"optional",
")",
"float",
".",
"Maximum",
"float",
"value",
"for",
"adversarial",
"example",
"components",
".",
":",
"param",
"y",
":",
"(",
"optional",
")",
"Tensor",
"with",
"true",
"labels",
".",
"If",
"targeted",
"is",
"true",
"then",
"provide",
"the",
"target",
"label",
".",
"Otherwise",
"only",
"provide",
"this",
"parameter",
"if",
"you",
"d",
"like",
"to",
"use",
"true",
"labels",
"when",
"crafting",
"adversarial",
"samples",
".",
"Otherwise",
"model",
"predictions",
"are",
"used",
"as",
"labels",
"to",
"avoid",
"the",
"label",
"leaking",
"effect",
"(",
"explained",
"in",
"this",
"paper",
":",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1611",
".",
"01236",
")",
".",
"Default",
"is",
"None",
".",
":",
"param",
"targeted",
":",
"(",
"optional",
")",
"bool",
".",
"Is",
"the",
"attack",
"targeted",
"or",
"untargeted?",
"Untargeted",
"the",
"default",
"will",
"try",
"to",
"make",
"the",
"label",
"incorrect",
".",
"Targeted",
"will",
"instead",
"try",
"to",
"move",
"in",
"the",
"direction",
"of",
"being",
"more",
"like",
"y",
".",
":",
"param",
"sanity_checks",
":",
"bool",
"if",
"True",
"include",
"asserts",
"(",
"Turn",
"them",
"off",
"to",
"use",
"less",
"runtime",
"/",
"memory",
"or",
"for",
"unit",
"tests",
"that",
"intentionally",
"pass",
"strange",
"input",
")",
":",
"return",
":",
"a",
"tensor",
"for",
"the",
"adversarial",
"example"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/future/torch/attacks/fast_gradient_method.py#L8-L73
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/dev_toolkit/sample_attacks/fgsm/attack_fgsm.py
|
load_images
|
def load_images(input_dir, batch_shape):
"""Read png images from input directory in batches.
Args:
input_dir: input directory
batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]
Yields:
filenames: list file names without path of each image
Lenght of this list could be less than batch_size, in this case only
first few images of the result are elements of the minibatch.
images: array with all images from this batch
"""
images = np.zeros(batch_shape)
filenames = []
idx = 0
batch_size = batch_shape[0]
for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):
with tf.gfile.Open(filepath) as f:
image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0
# Images for inception classifier are normalized to be in [-1, 1] interval.
images[idx, :, :, :] = image * 2.0 - 1.0
filenames.append(os.path.basename(filepath))
idx += 1
if idx == batch_size:
yield filenames, images
filenames = []
images = np.zeros(batch_shape)
idx = 0
if idx > 0:
yield filenames, images
|
python
|
def load_images(input_dir, batch_shape):
"""Read png images from input directory in batches.
Args:
input_dir: input directory
batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]
Yields:
filenames: list file names without path of each image
Lenght of this list could be less than batch_size, in this case only
first few images of the result are elements of the minibatch.
images: array with all images from this batch
"""
images = np.zeros(batch_shape)
filenames = []
idx = 0
batch_size = batch_shape[0]
for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):
with tf.gfile.Open(filepath) as f:
image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0
# Images for inception classifier are normalized to be in [-1, 1] interval.
images[idx, :, :, :] = image * 2.0 - 1.0
filenames.append(os.path.basename(filepath))
idx += 1
if idx == batch_size:
yield filenames, images
filenames = []
images = np.zeros(batch_shape)
idx = 0
if idx > 0:
yield filenames, images
|
[
"def",
"load_images",
"(",
"input_dir",
",",
"batch_shape",
")",
":",
"images",
"=",
"np",
".",
"zeros",
"(",
"batch_shape",
")",
"filenames",
"=",
"[",
"]",
"idx",
"=",
"0",
"batch_size",
"=",
"batch_shape",
"[",
"0",
"]",
"for",
"filepath",
"in",
"tf",
".",
"gfile",
".",
"Glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"input_dir",
",",
"'*.png'",
")",
")",
":",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"filepath",
")",
"as",
"f",
":",
"image",
"=",
"np",
".",
"array",
"(",
"Image",
".",
"open",
"(",
"f",
")",
".",
"convert",
"(",
"'RGB'",
")",
")",
".",
"astype",
"(",
"np",
".",
"float",
")",
"/",
"255.0",
"# Images for inception classifier are normalized to be in [-1, 1] interval.",
"images",
"[",
"idx",
",",
":",
",",
":",
",",
":",
"]",
"=",
"image",
"*",
"2.0",
"-",
"1.0",
"filenames",
".",
"append",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"filepath",
")",
")",
"idx",
"+=",
"1",
"if",
"idx",
"==",
"batch_size",
":",
"yield",
"filenames",
",",
"images",
"filenames",
"=",
"[",
"]",
"images",
"=",
"np",
".",
"zeros",
"(",
"batch_shape",
")",
"idx",
"=",
"0",
"if",
"idx",
">",
"0",
":",
"yield",
"filenames",
",",
"images"
] |
Read png images from input directory in batches.
Args:
input_dir: input directory
batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]
Yields:
filenames: list file names without path of each image
Lenght of this list could be less than batch_size, in this case only
first few images of the result are elements of the minibatch.
images: array with all images from this batch
|
[
"Read",
"png",
"images",
"from",
"input",
"directory",
"in",
"batches",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/sample_attacks/fgsm/attack_fgsm.py#L47-L77
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/dev_toolkit/sample_attacks/fgsm/attack_fgsm.py
|
save_images
|
def save_images(images, filenames, output_dir):
"""Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images
"""
for i, filename in enumerate(filenames):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# so rescale them back to [0, 1].
with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
img = (((images[i, :, :, :] + 1.0) * 0.5) * 255.0).astype(np.uint8)
Image.fromarray(img).save(f, format='PNG')
|
python
|
def save_images(images, filenames, output_dir):
"""Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images
"""
for i, filename in enumerate(filenames):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# so rescale them back to [0, 1].
with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
img = (((images[i, :, :, :] + 1.0) * 0.5) * 255.0).astype(np.uint8)
Image.fromarray(img).save(f, format='PNG')
|
[
"def",
"save_images",
"(",
"images",
",",
"filenames",
",",
"output_dir",
")",
":",
"for",
"i",
",",
"filename",
"in",
"enumerate",
"(",
"filenames",
")",
":",
"# Images for inception classifier are normalized to be in [-1, 1] interval,",
"# so rescale them back to [0, 1].",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"filename",
")",
",",
"'w'",
")",
"as",
"f",
":",
"img",
"=",
"(",
"(",
"(",
"images",
"[",
"i",
",",
":",
",",
":",
",",
":",
"]",
"+",
"1.0",
")",
"*",
"0.5",
")",
"*",
"255.0",
")",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"Image",
".",
"fromarray",
"(",
"img",
")",
".",
"save",
"(",
"f",
",",
"format",
"=",
"'PNG'",
")"
] |
Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images
|
[
"Saves",
"images",
"to",
"the",
"output",
"directory",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/sample_attacks/fgsm/attack_fgsm.py#L80-L95
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/dev_toolkit/sample_attacks/fgsm/attack_fgsm.py
|
main
|
def main(_):
"""Run the sample attack"""
# Images for inception classifier are normalized to be in [-1, 1] interval,
# eps is a difference between pixels so it should be in [0, 2] interval.
# Renormalizing epsilon from [0, 255] to [0, 2].
eps = 2.0 * FLAGS.max_epsilon / 255.0
batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
nb_classes = 1001
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
# Prepare graph
x_input = tf.placeholder(tf.float32, shape=batch_shape)
model = InceptionModel(nb_classes)
fgsm = FastGradientMethod(model)
x_adv = fgsm.generate(x_input, eps=eps, clip_min=-1., clip_max=1.)
# Run computation
saver = tf.train.Saver(slim.get_model_variables())
session_creator = tf.train.ChiefSessionCreator(
scaffold=tf.train.Scaffold(saver=saver),
checkpoint_filename_with_path=FLAGS.checkpoint_path,
master=FLAGS.master)
with tf.train.MonitoredSession(session_creator=session_creator) as sess:
for filenames, images in load_images(FLAGS.input_dir, batch_shape):
adv_images = sess.run(x_adv, feed_dict={x_input: images})
save_images(adv_images, filenames, FLAGS.output_dir)
|
python
|
def main(_):
"""Run the sample attack"""
# Images for inception classifier are normalized to be in [-1, 1] interval,
# eps is a difference between pixels so it should be in [0, 2] interval.
# Renormalizing epsilon from [0, 255] to [0, 2].
eps = 2.0 * FLAGS.max_epsilon / 255.0
batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
nb_classes = 1001
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
# Prepare graph
x_input = tf.placeholder(tf.float32, shape=batch_shape)
model = InceptionModel(nb_classes)
fgsm = FastGradientMethod(model)
x_adv = fgsm.generate(x_input, eps=eps, clip_min=-1., clip_max=1.)
# Run computation
saver = tf.train.Saver(slim.get_model_variables())
session_creator = tf.train.ChiefSessionCreator(
scaffold=tf.train.Scaffold(saver=saver),
checkpoint_filename_with_path=FLAGS.checkpoint_path,
master=FLAGS.master)
with tf.train.MonitoredSession(session_creator=session_creator) as sess:
for filenames, images in load_images(FLAGS.input_dir, batch_shape):
adv_images = sess.run(x_adv, feed_dict={x_input: images})
save_images(adv_images, filenames, FLAGS.output_dir)
|
[
"def",
"main",
"(",
"_",
")",
":",
"# Images for inception classifier are normalized to be in [-1, 1] interval,",
"# eps is a difference between pixels so it should be in [0, 2] interval.",
"# Renormalizing epsilon from [0, 255] to [0, 2].",
"eps",
"=",
"2.0",
"*",
"FLAGS",
".",
"max_epsilon",
"/",
"255.0",
"batch_shape",
"=",
"[",
"FLAGS",
".",
"batch_size",
",",
"FLAGS",
".",
"image_height",
",",
"FLAGS",
".",
"image_width",
",",
"3",
"]",
"nb_classes",
"=",
"1001",
"tf",
".",
"logging",
".",
"set_verbosity",
"(",
"tf",
".",
"logging",
".",
"INFO",
")",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
":",
"# Prepare graph",
"x_input",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"shape",
"=",
"batch_shape",
")",
"model",
"=",
"InceptionModel",
"(",
"nb_classes",
")",
"fgsm",
"=",
"FastGradientMethod",
"(",
"model",
")",
"x_adv",
"=",
"fgsm",
".",
"generate",
"(",
"x_input",
",",
"eps",
"=",
"eps",
",",
"clip_min",
"=",
"-",
"1.",
",",
"clip_max",
"=",
"1.",
")",
"# Run computation",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
"slim",
".",
"get_model_variables",
"(",
")",
")",
"session_creator",
"=",
"tf",
".",
"train",
".",
"ChiefSessionCreator",
"(",
"scaffold",
"=",
"tf",
".",
"train",
".",
"Scaffold",
"(",
"saver",
"=",
"saver",
")",
",",
"checkpoint_filename_with_path",
"=",
"FLAGS",
".",
"checkpoint_path",
",",
"master",
"=",
"FLAGS",
".",
"master",
")",
"with",
"tf",
".",
"train",
".",
"MonitoredSession",
"(",
"session_creator",
"=",
"session_creator",
")",
"as",
"sess",
":",
"for",
"filenames",
",",
"images",
"in",
"load_images",
"(",
"FLAGS",
".",
"input_dir",
",",
"batch_shape",
")",
":",
"adv_images",
"=",
"sess",
".",
"run",
"(",
"x_adv",
",",
"feed_dict",
"=",
"{",
"x_input",
":",
"images",
"}",
")",
"save_images",
"(",
"adv_images",
",",
"filenames",
",",
"FLAGS",
".",
"output_dir",
")"
] |
Run the sample attack
|
[
"Run",
"the",
"sample",
"attack"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/sample_attacks/fgsm/attack_fgsm.py#L127-L157
|
train
|
tensorflow/cleverhans
|
tutorials/future/torch/cifar10_tutorial.py
|
ld_cifar10
|
def ld_cifar10():
"""Load training and test data."""
train_transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
test_transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
train_dataset = torchvision.datasets.CIFAR10(root='/tmp/data', train=True, transform=train_transforms, download=True)
test_dataset = torchvision.datasets.CIFAR10(root='/tmp/data', train=False, transform=test_transforms, download=True)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=128, shuffle=False, num_workers=2)
return EasyDict(train=train_loader, test=test_loader)
|
python
|
def ld_cifar10():
"""Load training and test data."""
train_transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
test_transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
train_dataset = torchvision.datasets.CIFAR10(root='/tmp/data', train=True, transform=train_transforms, download=True)
test_dataset = torchvision.datasets.CIFAR10(root='/tmp/data', train=False, transform=test_transforms, download=True)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=128, shuffle=False, num_workers=2)
return EasyDict(train=train_loader, test=test_loader)
|
[
"def",
"ld_cifar10",
"(",
")",
":",
"train_transforms",
"=",
"torchvision",
".",
"transforms",
".",
"Compose",
"(",
"[",
"torchvision",
".",
"transforms",
".",
"ToTensor",
"(",
")",
"]",
")",
"test_transforms",
"=",
"torchvision",
".",
"transforms",
".",
"Compose",
"(",
"[",
"torchvision",
".",
"transforms",
".",
"ToTensor",
"(",
")",
"]",
")",
"train_dataset",
"=",
"torchvision",
".",
"datasets",
".",
"CIFAR10",
"(",
"root",
"=",
"'/tmp/data'",
",",
"train",
"=",
"True",
",",
"transform",
"=",
"train_transforms",
",",
"download",
"=",
"True",
")",
"test_dataset",
"=",
"torchvision",
".",
"datasets",
".",
"CIFAR10",
"(",
"root",
"=",
"'/tmp/data'",
",",
"train",
"=",
"False",
",",
"transform",
"=",
"test_transforms",
",",
"download",
"=",
"True",
")",
"train_loader",
"=",
"torch",
".",
"utils",
".",
"data",
".",
"DataLoader",
"(",
"train_dataset",
",",
"batch_size",
"=",
"128",
",",
"shuffle",
"=",
"True",
",",
"num_workers",
"=",
"2",
")",
"test_loader",
"=",
"torch",
".",
"utils",
".",
"data",
".",
"DataLoader",
"(",
"test_dataset",
",",
"batch_size",
"=",
"128",
",",
"shuffle",
"=",
"False",
",",
"num_workers",
"=",
"2",
")",
"return",
"EasyDict",
"(",
"train",
"=",
"train_loader",
",",
"test",
"=",
"test_loader",
")"
] |
Load training and test data.
|
[
"Load",
"training",
"and",
"test",
"data",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/tutorials/future/torch/cifar10_tutorial.py#L33-L41
|
train
|
tensorflow/cleverhans
|
cleverhans/plot/success_fail.py
|
plot_report_from_path
|
def plot_report_from_path(path, success_name=DEFAULT_SUCCESS_NAME,
fail_names=DEFAULT_FAIL_NAMES, label=None,
is_max_confidence=True,
linewidth=LINEWIDTH,
plot_upper_bound=True):
"""
Plots a success-fail curve from a confidence report stored on disk,
:param path: string filepath for the stored report.
(Should be the output of make_confidence_report*.py)
:param success_name: The name (confidence report key) of the data that
should be used to measure success rate
:param fail_names: A list of names (confidence report keys) of the data
that should be used to measure failure rate.
*Only one of these keys will be plotted*. Each key will be tried in
order until one is found in the report. This is to support both the
output of `make_confidence_report` and `make_confidence_report_bundled`.
:param label: Optional string. Name to use for this curve in the legend.
:param is_max_confidence: bool.
If True, when measuring the failure rate, treat the data as the output
of a maximum confidence attack procedure.
This means that the attack is optimal (assuming the underlying optimizer
is good enough, *which is probably false*, so interpret the plot
accordingly) for thresholds >= .5 but for lower thresholds the observed
failure rate is a lower bound on the true worst failure rate and the
observed coverage is an upper bound (assuming good enough optimization)
on the true failure rate.
The plot thus draws the threshold >= .5 portion of the curve with a solid
line and the upper and lower bounds with a dashed line.
See https://openreview.net/forum?id=H1g0piA9tQ for details.
If False, the attack procedure is regarded as an ad hoc way of obtaining
a loose lower bound, and thus the whole curve is drawn with dashed lines.
:param linewidth: thickness of the line to draw
:param plot_upper_bound: include upper bound on error rate in plot
"""
report = load(path)
plot_report(report, success_name, fail_names, label, is_max_confidence,
linewidth, plot_upper_bound)
|
python
|
def plot_report_from_path(path, success_name=DEFAULT_SUCCESS_NAME,
fail_names=DEFAULT_FAIL_NAMES, label=None,
is_max_confidence=True,
linewidth=LINEWIDTH,
plot_upper_bound=True):
"""
Plots a success-fail curve from a confidence report stored on disk,
:param path: string filepath for the stored report.
(Should be the output of make_confidence_report*.py)
:param success_name: The name (confidence report key) of the data that
should be used to measure success rate
:param fail_names: A list of names (confidence report keys) of the data
that should be used to measure failure rate.
*Only one of these keys will be plotted*. Each key will be tried in
order until one is found in the report. This is to support both the
output of `make_confidence_report` and `make_confidence_report_bundled`.
:param label: Optional string. Name to use for this curve in the legend.
:param is_max_confidence: bool.
If True, when measuring the failure rate, treat the data as the output
of a maximum confidence attack procedure.
This means that the attack is optimal (assuming the underlying optimizer
is good enough, *which is probably false*, so interpret the plot
accordingly) for thresholds >= .5 but for lower thresholds the observed
failure rate is a lower bound on the true worst failure rate and the
observed coverage is an upper bound (assuming good enough optimization)
on the true failure rate.
The plot thus draws the threshold >= .5 portion of the curve with a solid
line and the upper and lower bounds with a dashed line.
See https://openreview.net/forum?id=H1g0piA9tQ for details.
If False, the attack procedure is regarded as an ad hoc way of obtaining
a loose lower bound, and thus the whole curve is drawn with dashed lines.
:param linewidth: thickness of the line to draw
:param plot_upper_bound: include upper bound on error rate in plot
"""
report = load(path)
plot_report(report, success_name, fail_names, label, is_max_confidence,
linewidth, plot_upper_bound)
|
[
"def",
"plot_report_from_path",
"(",
"path",
",",
"success_name",
"=",
"DEFAULT_SUCCESS_NAME",
",",
"fail_names",
"=",
"DEFAULT_FAIL_NAMES",
",",
"label",
"=",
"None",
",",
"is_max_confidence",
"=",
"True",
",",
"linewidth",
"=",
"LINEWIDTH",
",",
"plot_upper_bound",
"=",
"True",
")",
":",
"report",
"=",
"load",
"(",
"path",
")",
"plot_report",
"(",
"report",
",",
"success_name",
",",
"fail_names",
",",
"label",
",",
"is_max_confidence",
",",
"linewidth",
",",
"plot_upper_bound",
")"
] |
Plots a success-fail curve from a confidence report stored on disk,
:param path: string filepath for the stored report.
(Should be the output of make_confidence_report*.py)
:param success_name: The name (confidence report key) of the data that
should be used to measure success rate
:param fail_names: A list of names (confidence report keys) of the data
that should be used to measure failure rate.
*Only one of these keys will be plotted*. Each key will be tried in
order until one is found in the report. This is to support both the
output of `make_confidence_report` and `make_confidence_report_bundled`.
:param label: Optional string. Name to use for this curve in the legend.
:param is_max_confidence: bool.
If True, when measuring the failure rate, treat the data as the output
of a maximum confidence attack procedure.
This means that the attack is optimal (assuming the underlying optimizer
is good enough, *which is probably false*, so interpret the plot
accordingly) for thresholds >= .5 but for lower thresholds the observed
failure rate is a lower bound on the true worst failure rate and the
observed coverage is an upper bound (assuming good enough optimization)
on the true failure rate.
The plot thus draws the threshold >= .5 portion of the curve with a solid
line and the upper and lower bounds with a dashed line.
See https://openreview.net/forum?id=H1g0piA9tQ for details.
If False, the attack procedure is regarded as an ad hoc way of obtaining
a loose lower bound, and thus the whole curve is drawn with dashed lines.
:param linewidth: thickness of the line to draw
:param plot_upper_bound: include upper bound on error rate in plot
|
[
"Plots",
"a",
"success",
"-",
"fail",
"curve",
"from",
"a",
"confidence",
"report",
"stored",
"on",
"disk",
":",
"param",
"path",
":",
"string",
"filepath",
"for",
"the",
"stored",
"report",
".",
"(",
"Should",
"be",
"the",
"output",
"of",
"make_confidence_report",
"*",
".",
"py",
")",
":",
"param",
"success_name",
":",
"The",
"name",
"(",
"confidence",
"report",
"key",
")",
"of",
"the",
"data",
"that",
"should",
"be",
"used",
"to",
"measure",
"success",
"rate",
":",
"param",
"fail_names",
":",
"A",
"list",
"of",
"names",
"(",
"confidence",
"report",
"keys",
")",
"of",
"the",
"data",
"that",
"should",
"be",
"used",
"to",
"measure",
"failure",
"rate",
".",
"*",
"Only",
"one",
"of",
"these",
"keys",
"will",
"be",
"plotted",
"*",
".",
"Each",
"key",
"will",
"be",
"tried",
"in",
"order",
"until",
"one",
"is",
"found",
"in",
"the",
"report",
".",
"This",
"is",
"to",
"support",
"both",
"the",
"output",
"of",
"make_confidence_report",
"and",
"make_confidence_report_bundled",
".",
":",
"param",
"label",
":",
"Optional",
"string",
".",
"Name",
"to",
"use",
"for",
"this",
"curve",
"in",
"the",
"legend",
".",
":",
"param",
"is_max_confidence",
":",
"bool",
".",
"If",
"True",
"when",
"measuring",
"the",
"failure",
"rate",
"treat",
"the",
"data",
"as",
"the",
"output",
"of",
"a",
"maximum",
"confidence",
"attack",
"procedure",
".",
"This",
"means",
"that",
"the",
"attack",
"is",
"optimal",
"(",
"assuming",
"the",
"underlying",
"optimizer",
"is",
"good",
"enough",
"*",
"which",
"is",
"probably",
"false",
"*",
"so",
"interpret",
"the",
"plot",
"accordingly",
")",
"for",
"thresholds",
">",
"=",
".",
"5",
"but",
"for",
"lower",
"thresholds",
"the",
"observed",
"failure",
"rate",
"is",
"a",
"lower",
"bound",
"on",
"the",
"true",
"worst",
"failure",
"rate",
"and",
"the",
"observed",
"coverage",
"is",
"an",
"upper",
"bound",
"(",
"assuming",
"good",
"enough",
"optimization",
")",
"on",
"the",
"true",
"failure",
"rate",
".",
"The",
"plot",
"thus",
"draws",
"the",
"threshold",
">",
"=",
".",
"5",
"portion",
"of",
"the",
"curve",
"with",
"a",
"solid",
"line",
"and",
"the",
"upper",
"and",
"lower",
"bounds",
"with",
"a",
"dashed",
"line",
".",
"See",
"https",
":",
"//",
"openreview",
".",
"net",
"/",
"forum?id",
"=",
"H1g0piA9tQ",
"for",
"details",
".",
"If",
"False",
"the",
"attack",
"procedure",
"is",
"regarded",
"as",
"an",
"ad",
"hoc",
"way",
"of",
"obtaining",
"a",
"loose",
"lower",
"bound",
"and",
"thus",
"the",
"whole",
"curve",
"is",
"drawn",
"with",
"dashed",
"lines",
".",
":",
"param",
"linewidth",
":",
"thickness",
"of",
"the",
"line",
"to",
"draw",
":",
"param",
"plot_upper_bound",
":",
"include",
"upper",
"bound",
"on",
"error",
"rate",
"in",
"plot"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/plot/success_fail.py#L18-L54
|
train
|
tensorflow/cleverhans
|
cleverhans/plot/success_fail.py
|
plot_report
|
def plot_report(report, success_name, fail_names, label=None,
is_max_confidence=True,
linewidth=LINEWIDTH,
plot_upper_bound=True):
"""
Plot a success fail curve from a confidence report
:param report: A confidence report
(the type of object saved by make_confidence_report.py)
:param success_name: see plot_report_from_path
:param fail_names: see plot_report_from_path
:param label: see plot_report_from_path
:param is_max_confidence: see plot_report_from_path
:param linewidth: see plot_report_from_path
"""
(fail_optimal, success_optimal, fail_lower_bound, fail_upper_bound,
success_bounded) = make_curve(report, success_name, fail_names)
assert len(fail_lower_bound) == len(fail_upper_bound)
fail_optimal = np.array(fail_optimal)
fail_lower_bound = np.array(fail_lower_bound)
fail_upper_bound = np.array(fail_upper_bound)
if is_max_confidence:
p, = pyplot.plot(fail_optimal, success_optimal, label=label,
linewidth=linewidth)
color = p.get_color()
pyplot.plot(fail_lower_bound, success_bounded, '--', color=color)
if plot_upper_bound:
pyplot.plot(fail_upper_bound, success_bounded, '--', color=color)
else:
# If the attack was not MaxConfidence, then this whole curve is just
# a loose lower bound
all_fail = np.concatenate((fail_optimal, fail_lower_bound), axis=0)
pyplot.plot(all_fail, success_optimal + success_bounded,
'--', label=label, linewidth=linewidth)
pyplot.xlabel("Failure rate on adversarial examples")
pyplot.ylabel("Success rate on clean examples")
gap = fail_upper_bound - fail_lower_bound
if gap.size > 0:
assert gap.min() >= 0.
print("Max gap: ", gap.max())
|
python
|
def plot_report(report, success_name, fail_names, label=None,
is_max_confidence=True,
linewidth=LINEWIDTH,
plot_upper_bound=True):
"""
Plot a success fail curve from a confidence report
:param report: A confidence report
(the type of object saved by make_confidence_report.py)
:param success_name: see plot_report_from_path
:param fail_names: see plot_report_from_path
:param label: see plot_report_from_path
:param is_max_confidence: see plot_report_from_path
:param linewidth: see plot_report_from_path
"""
(fail_optimal, success_optimal, fail_lower_bound, fail_upper_bound,
success_bounded) = make_curve(report, success_name, fail_names)
assert len(fail_lower_bound) == len(fail_upper_bound)
fail_optimal = np.array(fail_optimal)
fail_lower_bound = np.array(fail_lower_bound)
fail_upper_bound = np.array(fail_upper_bound)
if is_max_confidence:
p, = pyplot.plot(fail_optimal, success_optimal, label=label,
linewidth=linewidth)
color = p.get_color()
pyplot.plot(fail_lower_bound, success_bounded, '--', color=color)
if plot_upper_bound:
pyplot.plot(fail_upper_bound, success_bounded, '--', color=color)
else:
# If the attack was not MaxConfidence, then this whole curve is just
# a loose lower bound
all_fail = np.concatenate((fail_optimal, fail_lower_bound), axis=0)
pyplot.plot(all_fail, success_optimal + success_bounded,
'--', label=label, linewidth=linewidth)
pyplot.xlabel("Failure rate on adversarial examples")
pyplot.ylabel("Success rate on clean examples")
gap = fail_upper_bound - fail_lower_bound
if gap.size > 0:
assert gap.min() >= 0.
print("Max gap: ", gap.max())
|
[
"def",
"plot_report",
"(",
"report",
",",
"success_name",
",",
"fail_names",
",",
"label",
"=",
"None",
",",
"is_max_confidence",
"=",
"True",
",",
"linewidth",
"=",
"LINEWIDTH",
",",
"plot_upper_bound",
"=",
"True",
")",
":",
"(",
"fail_optimal",
",",
"success_optimal",
",",
"fail_lower_bound",
",",
"fail_upper_bound",
",",
"success_bounded",
")",
"=",
"make_curve",
"(",
"report",
",",
"success_name",
",",
"fail_names",
")",
"assert",
"len",
"(",
"fail_lower_bound",
")",
"==",
"len",
"(",
"fail_upper_bound",
")",
"fail_optimal",
"=",
"np",
".",
"array",
"(",
"fail_optimal",
")",
"fail_lower_bound",
"=",
"np",
".",
"array",
"(",
"fail_lower_bound",
")",
"fail_upper_bound",
"=",
"np",
".",
"array",
"(",
"fail_upper_bound",
")",
"if",
"is_max_confidence",
":",
"p",
",",
"=",
"pyplot",
".",
"plot",
"(",
"fail_optimal",
",",
"success_optimal",
",",
"label",
"=",
"label",
",",
"linewidth",
"=",
"linewidth",
")",
"color",
"=",
"p",
".",
"get_color",
"(",
")",
"pyplot",
".",
"plot",
"(",
"fail_lower_bound",
",",
"success_bounded",
",",
"'--'",
",",
"color",
"=",
"color",
")",
"if",
"plot_upper_bound",
":",
"pyplot",
".",
"plot",
"(",
"fail_upper_bound",
",",
"success_bounded",
",",
"'--'",
",",
"color",
"=",
"color",
")",
"else",
":",
"# If the attack was not MaxConfidence, then this whole curve is just",
"# a loose lower bound",
"all_fail",
"=",
"np",
".",
"concatenate",
"(",
"(",
"fail_optimal",
",",
"fail_lower_bound",
")",
",",
"axis",
"=",
"0",
")",
"pyplot",
".",
"plot",
"(",
"all_fail",
",",
"success_optimal",
"+",
"success_bounded",
",",
"'--'",
",",
"label",
"=",
"label",
",",
"linewidth",
"=",
"linewidth",
")",
"pyplot",
".",
"xlabel",
"(",
"\"Failure rate on adversarial examples\"",
")",
"pyplot",
".",
"ylabel",
"(",
"\"Success rate on clean examples\"",
")",
"gap",
"=",
"fail_upper_bound",
"-",
"fail_lower_bound",
"if",
"gap",
".",
"size",
">",
"0",
":",
"assert",
"gap",
".",
"min",
"(",
")",
">=",
"0.",
"print",
"(",
"\"Max gap: \"",
",",
"gap",
".",
"max",
"(",
")",
")"
] |
Plot a success fail curve from a confidence report
:param report: A confidence report
(the type of object saved by make_confidence_report.py)
:param success_name: see plot_report_from_path
:param fail_names: see plot_report_from_path
:param label: see plot_report_from_path
:param is_max_confidence: see plot_report_from_path
:param linewidth: see plot_report_from_path
|
[
"Plot",
"a",
"success",
"fail",
"curve",
"from",
"a",
"confidence",
"report",
":",
"param",
"report",
":",
"A",
"confidence",
"report",
"(",
"the",
"type",
"of",
"object",
"saved",
"by",
"make_confidence_report",
".",
"py",
")",
":",
"param",
"success_name",
":",
"see",
"plot_report_from_path",
":",
"param",
"fail_names",
":",
"see",
"plot_report_from_path",
":",
"param",
"label",
":",
"see",
"plot_report_from_path",
":",
"param",
"is_max_confidence",
":",
"see",
"plot_report_from_path",
":",
"param",
"linewidth",
":",
"see",
"plot_report_from_path"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/plot/success_fail.py#L57-L97
|
train
|
tensorflow/cleverhans
|
cleverhans/plot/success_fail.py
|
make_curve
|
def make_curve(report, success_name, fail_names):
"""
Make a success-failure curve.
:param report: A confidence report
(the type of object saved by make_confidence_report.py)
:param success_name: see plot_report_from_path
:param fail_names: see plot_report_from_path
:returns:
fail_optimal: list of failure rates on adversarial data for the optimal
(t >= .5) part of the curve. Each entry corresponds to a different
threshold. Thresholds are chosen to make the smoothest possible curve
from the available data, e.g. one threshold between each unique
confidence value observed in the data. To make sure that linear
interpolation between points in the curve never overestimates the
failure rate for a specific success rate, the curve also includes
extra points that increment the failure rate prior to any point
that increments the success rate, so the curve moves up and to the
right in a series of backwards "L" shapes rather than moving up
and to the right along diagonal lines. For large datasets these
maximally pessimistic points will usually not be visible and the
curve will appear smooth.
success_optimal: list of success rates on clean data on the optimal
part of the curve. Matches up with `fail_optimal`.
fail_lower_bound: list of observed failure rates on the t < .5 portion
of the curve where MaxConfidence is not optimal.
fail_upper_bound: list of upper bounds (assuming good enough optimization,
so not a true upper bound) on the failure rates on the t < .5 portion
of the curve where MaxConfidence is not optimal. Matches up with
`fail_lower_bound`.
success_bounded: success rates on the non-optimal part of the curve.
Matches up with `fail_lower_bound` and `fail_upper_bound`.
"""
success_results = report[success_name]
fail_name = None # pacify pylint
found = False
for fail_name in fail_names:
if fail_name in report:
found = True
break
if not found:
raise ValueError(fail_name + " not in report."
"Available keys: " + str(report.keys()))
fail_results = report[fail_name]
# "good" means drawn from the distribution where we measure success rate.
# "bad" means drawn from the distribution where we measure failure rate.
# From here on out we use those terms, to avoid confusion between examples
# that actually failed and examples that were drawn from the distribution
# where we measured failure rate.
old_all_probs_version = False
if isinstance(success_results, dict):
# This dictionary key lookup will trigger a deprecation warning if `success_results` is not the old dictionary
# style of report, so we don't want to do a dictionary lookup unless we really are using the old version.
old_all_probs_version = 'all_probs' in success_results
if old_all_probs_version:
warnings.warn("The 'all_probs' key is included only to support "
" old files from a private development codebase. "
"Support for this key can be dropped at any time "
" without warning.")
good_probs = success_results['all_probs']
bad_probs = fail_results['all_probs']
bad_corrects = fail_results['correctness_mask']
good_corrects = success_results['correctness_mask']
else:
if isinstance(success_results, dict):
# Still using dict, but using newer key names
warnings.warn("Support for dictionary confidence reports is deprecated. Switch to using the classes in "
"cleverhans.confidence_report. Support for old dictionary-style reports may be removed "
"on or after 2019-07-19.")
good_probs = success_results['confidence']
bad_probs = fail_results['confidence']
good_corrects = success_results['correctness']
bad_corrects = fail_results['correctness']
else:
# current version
good_probs = success_results.confidence
bad_probs = fail_results.confidence
good_corrects = success_results.correctness
bad_corrects = fail_results.correctness
good_triplets = [(prob, correct, True) for prob, correct
in safe_zip(good_probs, good_corrects)]
bad_triplets = [(prob, correct, False) for prob, correct
in safe_zip(bad_probs, bad_corrects)]
total_good = len(good_triplets)
total_bad = len(bad_triplets)
if total_good != 10000:
warnings.warn("Not using full test set? Found " + str(total_good) +
" examples for measuring success rate")
if total_bad != 10000:
warnings.warn("Not using full test set for adversarial examples?")
all_triplets = good_triplets + bad_triplets
all_triplets = sorted(all_triplets, key=lambda x: -x[0])
# Start with the case for threshold t = 1.
# Examples are covered only if prob > t (strict inequality)
# So initially nothing is covered
good_covered_and_correct = 0
bad_covered_and_incorrect = 0
# Number of examples that are bad, incorrect, and covered by
# a t >= 0.5, or that were merely covered by a t < 0.5
failure_opportunities = 0
next_idx = 0
fail_optimal = []
success_optimal = []
fail_upper_bound = []
fail_lower_bound = []
success_bounded = []
bounded = False
# NOTE: the loop always exits via an internal break statement.
# Copied the termination condition to the while statement for ease
# of reading.
while next_idx < len(all_triplets):
gs = float(good_covered_and_correct) / total_good
bf = float(bad_covered_and_incorrect) / total_bad
# Add results for current threshold to the list
if not bounded:
# Sometimes when there are big jumps the failure rate it makes
# artifacts in the plot, where there's a long linear track.
# This implies the real success-fail curve is linear when
# actually it just isn't sampled by the data.
# To avoid implying that the model reaches a higher success
# rate than it actually does, we avoid these plotting artifacts
# by introducing extra points that make the graph move horizontally
# to the right first, then vertically.
if len(fail_optimal) > 0:
prev_bf = fail_optimal[-1]
prev_gs = success_optimal[-1]
if gs > prev_gs and bf > prev_bf:
fail_optimal.append(bf)
success_optimal.append(prev_gs)
success_optimal.append(gs)
fail_optimal.append(bf)
else:
success_bounded.append(gs)
fail_lower_bound.append(bf)
fail_upper_bound.append(float(failure_opportunities) / total_bad)
if next_idx == len(all_triplets):
break
# next_prob_to_include is not quite the same thing as the threshold.
# The threshold is infinitesimally smaller than this value.
next_prob_to_include = all_triplets[next_idx][0]
# Process all ties
while next_prob_to_include == all_triplets[next_idx][0]:
_prob, correct, is_good = all_triplets[next_idx]
if is_good:
good_covered_and_correct += correct
else:
if next_prob_to_include <= .5:
failure_opportunities += 1
else:
failure_opportunities += 1 - correct
bad_covered_and_incorrect += 1 - correct
next_idx += 1
if next_idx == len(all_triplets):
break
if next_prob_to_include <= .5:
bounded = True
out = (fail_optimal, success_optimal, fail_lower_bound, fail_upper_bound,
success_bounded)
return out
|
python
|
def make_curve(report, success_name, fail_names):
"""
Make a success-failure curve.
:param report: A confidence report
(the type of object saved by make_confidence_report.py)
:param success_name: see plot_report_from_path
:param fail_names: see plot_report_from_path
:returns:
fail_optimal: list of failure rates on adversarial data for the optimal
(t >= .5) part of the curve. Each entry corresponds to a different
threshold. Thresholds are chosen to make the smoothest possible curve
from the available data, e.g. one threshold between each unique
confidence value observed in the data. To make sure that linear
interpolation between points in the curve never overestimates the
failure rate for a specific success rate, the curve also includes
extra points that increment the failure rate prior to any point
that increments the success rate, so the curve moves up and to the
right in a series of backwards "L" shapes rather than moving up
and to the right along diagonal lines. For large datasets these
maximally pessimistic points will usually not be visible and the
curve will appear smooth.
success_optimal: list of success rates on clean data on the optimal
part of the curve. Matches up with `fail_optimal`.
fail_lower_bound: list of observed failure rates on the t < .5 portion
of the curve where MaxConfidence is not optimal.
fail_upper_bound: list of upper bounds (assuming good enough optimization,
so not a true upper bound) on the failure rates on the t < .5 portion
of the curve where MaxConfidence is not optimal. Matches up with
`fail_lower_bound`.
success_bounded: success rates on the non-optimal part of the curve.
Matches up with `fail_lower_bound` and `fail_upper_bound`.
"""
success_results = report[success_name]
fail_name = None # pacify pylint
found = False
for fail_name in fail_names:
if fail_name in report:
found = True
break
if not found:
raise ValueError(fail_name + " not in report."
"Available keys: " + str(report.keys()))
fail_results = report[fail_name]
# "good" means drawn from the distribution where we measure success rate.
# "bad" means drawn from the distribution where we measure failure rate.
# From here on out we use those terms, to avoid confusion between examples
# that actually failed and examples that were drawn from the distribution
# where we measured failure rate.
old_all_probs_version = False
if isinstance(success_results, dict):
# This dictionary key lookup will trigger a deprecation warning if `success_results` is not the old dictionary
# style of report, so we don't want to do a dictionary lookup unless we really are using the old version.
old_all_probs_version = 'all_probs' in success_results
if old_all_probs_version:
warnings.warn("The 'all_probs' key is included only to support "
" old files from a private development codebase. "
"Support for this key can be dropped at any time "
" without warning.")
good_probs = success_results['all_probs']
bad_probs = fail_results['all_probs']
bad_corrects = fail_results['correctness_mask']
good_corrects = success_results['correctness_mask']
else:
if isinstance(success_results, dict):
# Still using dict, but using newer key names
warnings.warn("Support for dictionary confidence reports is deprecated. Switch to using the classes in "
"cleverhans.confidence_report. Support for old dictionary-style reports may be removed "
"on or after 2019-07-19.")
good_probs = success_results['confidence']
bad_probs = fail_results['confidence']
good_corrects = success_results['correctness']
bad_corrects = fail_results['correctness']
else:
# current version
good_probs = success_results.confidence
bad_probs = fail_results.confidence
good_corrects = success_results.correctness
bad_corrects = fail_results.correctness
good_triplets = [(prob, correct, True) for prob, correct
in safe_zip(good_probs, good_corrects)]
bad_triplets = [(prob, correct, False) for prob, correct
in safe_zip(bad_probs, bad_corrects)]
total_good = len(good_triplets)
total_bad = len(bad_triplets)
if total_good != 10000:
warnings.warn("Not using full test set? Found " + str(total_good) +
" examples for measuring success rate")
if total_bad != 10000:
warnings.warn("Not using full test set for adversarial examples?")
all_triplets = good_triplets + bad_triplets
all_triplets = sorted(all_triplets, key=lambda x: -x[0])
# Start with the case for threshold t = 1.
# Examples are covered only if prob > t (strict inequality)
# So initially nothing is covered
good_covered_and_correct = 0
bad_covered_and_incorrect = 0
# Number of examples that are bad, incorrect, and covered by
# a t >= 0.5, or that were merely covered by a t < 0.5
failure_opportunities = 0
next_idx = 0
fail_optimal = []
success_optimal = []
fail_upper_bound = []
fail_lower_bound = []
success_bounded = []
bounded = False
# NOTE: the loop always exits via an internal break statement.
# Copied the termination condition to the while statement for ease
# of reading.
while next_idx < len(all_triplets):
gs = float(good_covered_and_correct) / total_good
bf = float(bad_covered_and_incorrect) / total_bad
# Add results for current threshold to the list
if not bounded:
# Sometimes when there are big jumps the failure rate it makes
# artifacts in the plot, where there's a long linear track.
# This implies the real success-fail curve is linear when
# actually it just isn't sampled by the data.
# To avoid implying that the model reaches a higher success
# rate than it actually does, we avoid these plotting artifacts
# by introducing extra points that make the graph move horizontally
# to the right first, then vertically.
if len(fail_optimal) > 0:
prev_bf = fail_optimal[-1]
prev_gs = success_optimal[-1]
if gs > prev_gs and bf > prev_bf:
fail_optimal.append(bf)
success_optimal.append(prev_gs)
success_optimal.append(gs)
fail_optimal.append(bf)
else:
success_bounded.append(gs)
fail_lower_bound.append(bf)
fail_upper_bound.append(float(failure_opportunities) / total_bad)
if next_idx == len(all_triplets):
break
# next_prob_to_include is not quite the same thing as the threshold.
# The threshold is infinitesimally smaller than this value.
next_prob_to_include = all_triplets[next_idx][0]
# Process all ties
while next_prob_to_include == all_triplets[next_idx][0]:
_prob, correct, is_good = all_triplets[next_idx]
if is_good:
good_covered_and_correct += correct
else:
if next_prob_to_include <= .5:
failure_opportunities += 1
else:
failure_opportunities += 1 - correct
bad_covered_and_incorrect += 1 - correct
next_idx += 1
if next_idx == len(all_triplets):
break
if next_prob_to_include <= .5:
bounded = True
out = (fail_optimal, success_optimal, fail_lower_bound, fail_upper_bound,
success_bounded)
return out
|
[
"def",
"make_curve",
"(",
"report",
",",
"success_name",
",",
"fail_names",
")",
":",
"success_results",
"=",
"report",
"[",
"success_name",
"]",
"fail_name",
"=",
"None",
"# pacify pylint",
"found",
"=",
"False",
"for",
"fail_name",
"in",
"fail_names",
":",
"if",
"fail_name",
"in",
"report",
":",
"found",
"=",
"True",
"break",
"if",
"not",
"found",
":",
"raise",
"ValueError",
"(",
"fail_name",
"+",
"\" not in report.\"",
"\"Available keys: \"",
"+",
"str",
"(",
"report",
".",
"keys",
"(",
")",
")",
")",
"fail_results",
"=",
"report",
"[",
"fail_name",
"]",
"# \"good\" means drawn from the distribution where we measure success rate.",
"# \"bad\" means drawn from the distribution where we measure failure rate.",
"# From here on out we use those terms, to avoid confusion between examples",
"# that actually failed and examples that were drawn from the distribution",
"# where we measured failure rate.",
"old_all_probs_version",
"=",
"False",
"if",
"isinstance",
"(",
"success_results",
",",
"dict",
")",
":",
"# This dictionary key lookup will trigger a deprecation warning if `success_results` is not the old dictionary",
"# style of report, so we don't want to do a dictionary lookup unless we really are using the old version.",
"old_all_probs_version",
"=",
"'all_probs'",
"in",
"success_results",
"if",
"old_all_probs_version",
":",
"warnings",
".",
"warn",
"(",
"\"The 'all_probs' key is included only to support \"",
"\" old files from a private development codebase. \"",
"\"Support for this key can be dropped at any time \"",
"\" without warning.\"",
")",
"good_probs",
"=",
"success_results",
"[",
"'all_probs'",
"]",
"bad_probs",
"=",
"fail_results",
"[",
"'all_probs'",
"]",
"bad_corrects",
"=",
"fail_results",
"[",
"'correctness_mask'",
"]",
"good_corrects",
"=",
"success_results",
"[",
"'correctness_mask'",
"]",
"else",
":",
"if",
"isinstance",
"(",
"success_results",
",",
"dict",
")",
":",
"# Still using dict, but using newer key names",
"warnings",
".",
"warn",
"(",
"\"Support for dictionary confidence reports is deprecated. Switch to using the classes in \"",
"\"cleverhans.confidence_report. Support for old dictionary-style reports may be removed \"",
"\"on or after 2019-07-19.\"",
")",
"good_probs",
"=",
"success_results",
"[",
"'confidence'",
"]",
"bad_probs",
"=",
"fail_results",
"[",
"'confidence'",
"]",
"good_corrects",
"=",
"success_results",
"[",
"'correctness'",
"]",
"bad_corrects",
"=",
"fail_results",
"[",
"'correctness'",
"]",
"else",
":",
"# current version",
"good_probs",
"=",
"success_results",
".",
"confidence",
"bad_probs",
"=",
"fail_results",
".",
"confidence",
"good_corrects",
"=",
"success_results",
".",
"correctness",
"bad_corrects",
"=",
"fail_results",
".",
"correctness",
"good_triplets",
"=",
"[",
"(",
"prob",
",",
"correct",
",",
"True",
")",
"for",
"prob",
",",
"correct",
"in",
"safe_zip",
"(",
"good_probs",
",",
"good_corrects",
")",
"]",
"bad_triplets",
"=",
"[",
"(",
"prob",
",",
"correct",
",",
"False",
")",
"for",
"prob",
",",
"correct",
"in",
"safe_zip",
"(",
"bad_probs",
",",
"bad_corrects",
")",
"]",
"total_good",
"=",
"len",
"(",
"good_triplets",
")",
"total_bad",
"=",
"len",
"(",
"bad_triplets",
")",
"if",
"total_good",
"!=",
"10000",
":",
"warnings",
".",
"warn",
"(",
"\"Not using full test set? Found \"",
"+",
"str",
"(",
"total_good",
")",
"+",
"\" examples for measuring success rate\"",
")",
"if",
"total_bad",
"!=",
"10000",
":",
"warnings",
".",
"warn",
"(",
"\"Not using full test set for adversarial examples?\"",
")",
"all_triplets",
"=",
"good_triplets",
"+",
"bad_triplets",
"all_triplets",
"=",
"sorted",
"(",
"all_triplets",
",",
"key",
"=",
"lambda",
"x",
":",
"-",
"x",
"[",
"0",
"]",
")",
"# Start with the case for threshold t = 1.",
"# Examples are covered only if prob > t (strict inequality)",
"# So initially nothing is covered",
"good_covered_and_correct",
"=",
"0",
"bad_covered_and_incorrect",
"=",
"0",
"# Number of examples that are bad, incorrect, and covered by",
"# a t >= 0.5, or that were merely covered by a t < 0.5",
"failure_opportunities",
"=",
"0",
"next_idx",
"=",
"0",
"fail_optimal",
"=",
"[",
"]",
"success_optimal",
"=",
"[",
"]",
"fail_upper_bound",
"=",
"[",
"]",
"fail_lower_bound",
"=",
"[",
"]",
"success_bounded",
"=",
"[",
"]",
"bounded",
"=",
"False",
"# NOTE: the loop always exits via an internal break statement.",
"# Copied the termination condition to the while statement for ease",
"# of reading.",
"while",
"next_idx",
"<",
"len",
"(",
"all_triplets",
")",
":",
"gs",
"=",
"float",
"(",
"good_covered_and_correct",
")",
"/",
"total_good",
"bf",
"=",
"float",
"(",
"bad_covered_and_incorrect",
")",
"/",
"total_bad",
"# Add results for current threshold to the list",
"if",
"not",
"bounded",
":",
"# Sometimes when there are big jumps the failure rate it makes",
"# artifacts in the plot, where there's a long linear track.",
"# This implies the real success-fail curve is linear when",
"# actually it just isn't sampled by the data.",
"# To avoid implying that the model reaches a higher success",
"# rate than it actually does, we avoid these plotting artifacts",
"# by introducing extra points that make the graph move horizontally",
"# to the right first, then vertically.",
"if",
"len",
"(",
"fail_optimal",
")",
">",
"0",
":",
"prev_bf",
"=",
"fail_optimal",
"[",
"-",
"1",
"]",
"prev_gs",
"=",
"success_optimal",
"[",
"-",
"1",
"]",
"if",
"gs",
">",
"prev_gs",
"and",
"bf",
">",
"prev_bf",
":",
"fail_optimal",
".",
"append",
"(",
"bf",
")",
"success_optimal",
".",
"append",
"(",
"prev_gs",
")",
"success_optimal",
".",
"append",
"(",
"gs",
")",
"fail_optimal",
".",
"append",
"(",
"bf",
")",
"else",
":",
"success_bounded",
".",
"append",
"(",
"gs",
")",
"fail_lower_bound",
".",
"append",
"(",
"bf",
")",
"fail_upper_bound",
".",
"append",
"(",
"float",
"(",
"failure_opportunities",
")",
"/",
"total_bad",
")",
"if",
"next_idx",
"==",
"len",
"(",
"all_triplets",
")",
":",
"break",
"# next_prob_to_include is not quite the same thing as the threshold.",
"# The threshold is infinitesimally smaller than this value.",
"next_prob_to_include",
"=",
"all_triplets",
"[",
"next_idx",
"]",
"[",
"0",
"]",
"# Process all ties",
"while",
"next_prob_to_include",
"==",
"all_triplets",
"[",
"next_idx",
"]",
"[",
"0",
"]",
":",
"_prob",
",",
"correct",
",",
"is_good",
"=",
"all_triplets",
"[",
"next_idx",
"]",
"if",
"is_good",
":",
"good_covered_and_correct",
"+=",
"correct",
"else",
":",
"if",
"next_prob_to_include",
"<=",
".5",
":",
"failure_opportunities",
"+=",
"1",
"else",
":",
"failure_opportunities",
"+=",
"1",
"-",
"correct",
"bad_covered_and_incorrect",
"+=",
"1",
"-",
"correct",
"next_idx",
"+=",
"1",
"if",
"next_idx",
"==",
"len",
"(",
"all_triplets",
")",
":",
"break",
"if",
"next_prob_to_include",
"<=",
".5",
":",
"bounded",
"=",
"True",
"out",
"=",
"(",
"fail_optimal",
",",
"success_optimal",
",",
"fail_lower_bound",
",",
"fail_upper_bound",
",",
"success_bounded",
")",
"return",
"out"
] |
Make a success-failure curve.
:param report: A confidence report
(the type of object saved by make_confidence_report.py)
:param success_name: see plot_report_from_path
:param fail_names: see plot_report_from_path
:returns:
fail_optimal: list of failure rates on adversarial data for the optimal
(t >= .5) part of the curve. Each entry corresponds to a different
threshold. Thresholds are chosen to make the smoothest possible curve
from the available data, e.g. one threshold between each unique
confidence value observed in the data. To make sure that linear
interpolation between points in the curve never overestimates the
failure rate for a specific success rate, the curve also includes
extra points that increment the failure rate prior to any point
that increments the success rate, so the curve moves up and to the
right in a series of backwards "L" shapes rather than moving up
and to the right along diagonal lines. For large datasets these
maximally pessimistic points will usually not be visible and the
curve will appear smooth.
success_optimal: list of success rates on clean data on the optimal
part of the curve. Matches up with `fail_optimal`.
fail_lower_bound: list of observed failure rates on the t < .5 portion
of the curve where MaxConfidence is not optimal.
fail_upper_bound: list of upper bounds (assuming good enough optimization,
so not a true upper bound) on the failure rates on the t < .5 portion
of the curve where MaxConfidence is not optimal. Matches up with
`fail_lower_bound`.
success_bounded: success rates on the non-optimal part of the curve.
Matches up with `fail_lower_bound` and `fail_upper_bound`.
|
[
"Make",
"a",
"success",
"-",
"failure",
"curve",
".",
":",
"param",
"report",
":",
"A",
"confidence",
"report",
"(",
"the",
"type",
"of",
"object",
"saved",
"by",
"make_confidence_report",
".",
"py",
")",
":",
"param",
"success_name",
":",
"see",
"plot_report_from_path",
":",
"param",
"fail_names",
":",
"see",
"plot_report_from_path",
":",
"returns",
":",
"fail_optimal",
":",
"list",
"of",
"failure",
"rates",
"on",
"adversarial",
"data",
"for",
"the",
"optimal",
"(",
"t",
">",
"=",
".",
"5",
")",
"part",
"of",
"the",
"curve",
".",
"Each",
"entry",
"corresponds",
"to",
"a",
"different",
"threshold",
".",
"Thresholds",
"are",
"chosen",
"to",
"make",
"the",
"smoothest",
"possible",
"curve",
"from",
"the",
"available",
"data",
"e",
".",
"g",
".",
"one",
"threshold",
"between",
"each",
"unique",
"confidence",
"value",
"observed",
"in",
"the",
"data",
".",
"To",
"make",
"sure",
"that",
"linear",
"interpolation",
"between",
"points",
"in",
"the",
"curve",
"never",
"overestimates",
"the",
"failure",
"rate",
"for",
"a",
"specific",
"success",
"rate",
"the",
"curve",
"also",
"includes",
"extra",
"points",
"that",
"increment",
"the",
"failure",
"rate",
"prior",
"to",
"any",
"point",
"that",
"increments",
"the",
"success",
"rate",
"so",
"the",
"curve",
"moves",
"up",
"and",
"to",
"the",
"right",
"in",
"a",
"series",
"of",
"backwards",
"L",
"shapes",
"rather",
"than",
"moving",
"up",
"and",
"to",
"the",
"right",
"along",
"diagonal",
"lines",
".",
"For",
"large",
"datasets",
"these",
"maximally",
"pessimistic",
"points",
"will",
"usually",
"not",
"be",
"visible",
"and",
"the",
"curve",
"will",
"appear",
"smooth",
".",
"success_optimal",
":",
"list",
"of",
"success",
"rates",
"on",
"clean",
"data",
"on",
"the",
"optimal",
"part",
"of",
"the",
"curve",
".",
"Matches",
"up",
"with",
"fail_optimal",
".",
"fail_lower_bound",
":",
"list",
"of",
"observed",
"failure",
"rates",
"on",
"the",
"t",
"<",
".",
"5",
"portion",
"of",
"the",
"curve",
"where",
"MaxConfidence",
"is",
"not",
"optimal",
".",
"fail_upper_bound",
":",
"list",
"of",
"upper",
"bounds",
"(",
"assuming",
"good",
"enough",
"optimization",
"so",
"not",
"a",
"true",
"upper",
"bound",
")",
"on",
"the",
"failure",
"rates",
"on",
"the",
"t",
"<",
".",
"5",
"portion",
"of",
"the",
"curve",
"where",
"MaxConfidence",
"is",
"not",
"optimal",
".",
"Matches",
"up",
"with",
"fail_lower_bound",
".",
"success_bounded",
":",
"success",
"rates",
"on",
"the",
"non",
"-",
"optimal",
"part",
"of",
"the",
"curve",
".",
"Matches",
"up",
"with",
"fail_lower_bound",
"and",
"fail_upper_bound",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/plot/success_fail.py#L100-L274
|
train
|
tensorflow/cleverhans
|
examples/multigpu_advtrain/trainer.py
|
TrainManager.model_train
|
def model_train(self):
"""
Train a TF graph
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param hparams.save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
"""
assert self.runner is not None, (
"""Runner is not initialized. TrainerSingleGPU or TrainerMultiGPU
instantiate a Runner object at initialization time.""")
hparams = self.hparams
batch_size = hparams.batch_size
nb_epochs = hparams.nb_epochs
train_dir = hparams.save_dir
filename = 'model.ckpt'
X_train = self.X_train
Y_train = self.Y_train
sess = self.sess
with sess.as_default():
X_batch = X_train[:batch_size]
Y_batch = Y_train[:batch_size]
self._init_tf(X_batch, Y_batch)
for epoch in six.moves.xrange(nb_epochs):
logging.info("Epoch " + str(epoch))
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / batch_size))
assert nb_batches * batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
self.rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
# Compute batch start and end indices
start, end = batch_indices(
batch, len(X_train), batch_size)
# Perform one training step
self._update_learning_params()
# Train step
X_batch = X_train[index_shuf[start:end]]
Y_batch = Y_train[index_shuf[start:end]]
self._run({'x_pre': X_batch, 'y': Y_batch})
self._sync_params()
# Clean up the queue
while not self.runner.is_finished():
self._run()
self._sync_params(forced=True)
assert end >= len(X_train), (
'Not all training examples are used.')
cur = time.time()
logging.info("\tEpoch took " + str(cur - prev) + " seconds")
prev = cur
self.eval()
# Save model
cond = ((epoch+1) % hparams.save_steps == 0
or epoch == nb_epochs)
if hparams.save and cond:
save_path = os.path.join(train_dir, filename)
saver = tf.train.Saver()
saver.save(sess, save_path)
logging.info("Model saved at: " + str(save_path))
logging.info("Completed model training.")
|
python
|
def model_train(self):
"""
Train a TF graph
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param hparams.save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
"""
assert self.runner is not None, (
"""Runner is not initialized. TrainerSingleGPU or TrainerMultiGPU
instantiate a Runner object at initialization time.""")
hparams = self.hparams
batch_size = hparams.batch_size
nb_epochs = hparams.nb_epochs
train_dir = hparams.save_dir
filename = 'model.ckpt'
X_train = self.X_train
Y_train = self.Y_train
sess = self.sess
with sess.as_default():
X_batch = X_train[:batch_size]
Y_batch = Y_train[:batch_size]
self._init_tf(X_batch, Y_batch)
for epoch in six.moves.xrange(nb_epochs):
logging.info("Epoch " + str(epoch))
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / batch_size))
assert nb_batches * batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
self.rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
# Compute batch start and end indices
start, end = batch_indices(
batch, len(X_train), batch_size)
# Perform one training step
self._update_learning_params()
# Train step
X_batch = X_train[index_shuf[start:end]]
Y_batch = Y_train[index_shuf[start:end]]
self._run({'x_pre': X_batch, 'y': Y_batch})
self._sync_params()
# Clean up the queue
while not self.runner.is_finished():
self._run()
self._sync_params(forced=True)
assert end >= len(X_train), (
'Not all training examples are used.')
cur = time.time()
logging.info("\tEpoch took " + str(cur - prev) + " seconds")
prev = cur
self.eval()
# Save model
cond = ((epoch+1) % hparams.save_steps == 0
or epoch == nb_epochs)
if hparams.save and cond:
save_path = os.path.join(train_dir, filename)
saver = tf.train.Saver()
saver.save(sess, save_path)
logging.info("Model saved at: " + str(save_path))
logging.info("Completed model training.")
|
[
"def",
"model_train",
"(",
"self",
")",
":",
"assert",
"self",
".",
"runner",
"is",
"not",
"None",
",",
"(",
"\"\"\"Runner is not initialized. TrainerSingleGPU or TrainerMultiGPU\n instantiate a Runner object at initialization time.\"\"\"",
")",
"hparams",
"=",
"self",
".",
"hparams",
"batch_size",
"=",
"hparams",
".",
"batch_size",
"nb_epochs",
"=",
"hparams",
".",
"nb_epochs",
"train_dir",
"=",
"hparams",
".",
"save_dir",
"filename",
"=",
"'model.ckpt'",
"X_train",
"=",
"self",
".",
"X_train",
"Y_train",
"=",
"self",
".",
"Y_train",
"sess",
"=",
"self",
".",
"sess",
"with",
"sess",
".",
"as_default",
"(",
")",
":",
"X_batch",
"=",
"X_train",
"[",
":",
"batch_size",
"]",
"Y_batch",
"=",
"Y_train",
"[",
":",
"batch_size",
"]",
"self",
".",
"_init_tf",
"(",
"X_batch",
",",
"Y_batch",
")",
"for",
"epoch",
"in",
"six",
".",
"moves",
".",
"xrange",
"(",
"nb_epochs",
")",
":",
"logging",
".",
"info",
"(",
"\"Epoch \"",
"+",
"str",
"(",
"epoch",
")",
")",
"# Compute number of batches",
"nb_batches",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"float",
"(",
"len",
"(",
"X_train",
")",
")",
"/",
"batch_size",
")",
")",
"assert",
"nb_batches",
"*",
"batch_size",
">=",
"len",
"(",
"X_train",
")",
"# Indices to shuffle training set",
"index_shuf",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"X_train",
")",
")",
")",
"self",
".",
"rng",
".",
"shuffle",
"(",
"index_shuf",
")",
"prev",
"=",
"time",
".",
"time",
"(",
")",
"for",
"batch",
"in",
"range",
"(",
"nb_batches",
")",
":",
"# Compute batch start and end indices",
"start",
",",
"end",
"=",
"batch_indices",
"(",
"batch",
",",
"len",
"(",
"X_train",
")",
",",
"batch_size",
")",
"# Perform one training step",
"self",
".",
"_update_learning_params",
"(",
")",
"# Train step",
"X_batch",
"=",
"X_train",
"[",
"index_shuf",
"[",
"start",
":",
"end",
"]",
"]",
"Y_batch",
"=",
"Y_train",
"[",
"index_shuf",
"[",
"start",
":",
"end",
"]",
"]",
"self",
".",
"_run",
"(",
"{",
"'x_pre'",
":",
"X_batch",
",",
"'y'",
":",
"Y_batch",
"}",
")",
"self",
".",
"_sync_params",
"(",
")",
"# Clean up the queue",
"while",
"not",
"self",
".",
"runner",
".",
"is_finished",
"(",
")",
":",
"self",
".",
"_run",
"(",
")",
"self",
".",
"_sync_params",
"(",
"forced",
"=",
"True",
")",
"assert",
"end",
">=",
"len",
"(",
"X_train",
")",
",",
"(",
"'Not all training examples are used.'",
")",
"cur",
"=",
"time",
".",
"time",
"(",
")",
"logging",
".",
"info",
"(",
"\"\\tEpoch took \"",
"+",
"str",
"(",
"cur",
"-",
"prev",
")",
"+",
"\" seconds\"",
")",
"prev",
"=",
"cur",
"self",
".",
"eval",
"(",
")",
"# Save model",
"cond",
"=",
"(",
"(",
"epoch",
"+",
"1",
")",
"%",
"hparams",
".",
"save_steps",
"==",
"0",
"or",
"epoch",
"==",
"nb_epochs",
")",
"if",
"hparams",
".",
"save",
"and",
"cond",
":",
"save_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"train_dir",
",",
"filename",
")",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
")",
"saver",
".",
"save",
"(",
"sess",
",",
"save_path",
")",
"logging",
".",
"info",
"(",
"\"Model saved at: \"",
"+",
"str",
"(",
"save_path",
")",
")",
"logging",
".",
"info",
"(",
"\"Completed model training.\"",
")"
] |
Train a TF graph
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param hparams.save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
|
[
"Train",
"a",
"TF",
"graph",
":",
"param",
"sess",
":",
"TF",
"session",
"to",
"use",
"when",
"training",
"the",
"graph",
":",
"param",
"x",
":",
"input",
"placeholder",
":",
"param",
"y",
":",
"output",
"placeholder",
"(",
"for",
"labels",
")",
":",
"param",
"predictions",
":",
"model",
"output",
"predictions",
":",
"param",
"X_train",
":",
"numpy",
"array",
"with",
"training",
"inputs",
":",
"param",
"Y_train",
":",
"numpy",
"array",
"with",
"training",
"outputs",
":",
"param",
"hparams",
".",
"save",
":",
"boolean",
"controlling",
"the",
"save",
"operation",
":",
"param",
"predictions_adv",
":",
"if",
"set",
"with",
"the",
"adversarial",
"example",
"tensor",
"will",
"run",
"adversarial",
"training",
":",
"param",
"evaluate",
":",
"function",
"that",
"is",
"run",
"after",
"each",
"training",
"iteration",
"(",
"typically",
"to",
"display",
"the",
"test",
"/",
"validation",
"accuracy",
")",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/trainer.py#L191-L274
|
train
|
tensorflow/cleverhans
|
examples/multigpu_advtrain/trainer.py
|
TrainerMultiGPU.clone_g0_inputs_on_ngpus
|
def clone_g0_inputs_on_ngpus(self, inputs, outputs, g0_inputs):
"""
Clone variables unused by the attack on all GPUs. Specifically, the
ground-truth label, y, has to be preserved until the training step.
:param inputs: A list of dictionaries as the inputs to each step.
:param outputs: A list of dictionaries as the outputs of each step.
:param g0_inputs: Initial variables to be cloned.
:return: Updated inputs and outputs.
"""
assert len(inputs) == len(outputs), (
'Inputs and outputs should have the same number of elements.')
inputs[0].update(g0_inputs)
outputs[0].update(g0_inputs)
# Copy g0_inputs forward
for i in range(1, len(inputs)):
# Create the graph for i'th step of attack
device_name = inputs[i]['x'].device
with tf.device(device_name):
with tf.variable_scope('step%d' % i):
for k, v in g0_inputs.iteritems():
if k not in inputs[i]:
v_copy = clone_variable(k, v)
inputs[i][k] = v_copy
outputs[i][k] = v_copy
return inputs, outputs
|
python
|
def clone_g0_inputs_on_ngpus(self, inputs, outputs, g0_inputs):
"""
Clone variables unused by the attack on all GPUs. Specifically, the
ground-truth label, y, has to be preserved until the training step.
:param inputs: A list of dictionaries as the inputs to each step.
:param outputs: A list of dictionaries as the outputs of each step.
:param g0_inputs: Initial variables to be cloned.
:return: Updated inputs and outputs.
"""
assert len(inputs) == len(outputs), (
'Inputs and outputs should have the same number of elements.')
inputs[0].update(g0_inputs)
outputs[0].update(g0_inputs)
# Copy g0_inputs forward
for i in range(1, len(inputs)):
# Create the graph for i'th step of attack
device_name = inputs[i]['x'].device
with tf.device(device_name):
with tf.variable_scope('step%d' % i):
for k, v in g0_inputs.iteritems():
if k not in inputs[i]:
v_copy = clone_variable(k, v)
inputs[i][k] = v_copy
outputs[i][k] = v_copy
return inputs, outputs
|
[
"def",
"clone_g0_inputs_on_ngpus",
"(",
"self",
",",
"inputs",
",",
"outputs",
",",
"g0_inputs",
")",
":",
"assert",
"len",
"(",
"inputs",
")",
"==",
"len",
"(",
"outputs",
")",
",",
"(",
"'Inputs and outputs should have the same number of elements.'",
")",
"inputs",
"[",
"0",
"]",
".",
"update",
"(",
"g0_inputs",
")",
"outputs",
"[",
"0",
"]",
".",
"update",
"(",
"g0_inputs",
")",
"# Copy g0_inputs forward",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"inputs",
")",
")",
":",
"# Create the graph for i'th step of attack",
"device_name",
"=",
"inputs",
"[",
"i",
"]",
"[",
"'x'",
"]",
".",
"device",
"with",
"tf",
".",
"device",
"(",
"device_name",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"'step%d'",
"%",
"i",
")",
":",
"for",
"k",
",",
"v",
"in",
"g0_inputs",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
"not",
"in",
"inputs",
"[",
"i",
"]",
":",
"v_copy",
"=",
"clone_variable",
"(",
"k",
",",
"v",
")",
"inputs",
"[",
"i",
"]",
"[",
"k",
"]",
"=",
"v_copy",
"outputs",
"[",
"i",
"]",
"[",
"k",
"]",
"=",
"v_copy",
"return",
"inputs",
",",
"outputs"
] |
Clone variables unused by the attack on all GPUs. Specifically, the
ground-truth label, y, has to be preserved until the training step.
:param inputs: A list of dictionaries as the inputs to each step.
:param outputs: A list of dictionaries as the outputs of each step.
:param g0_inputs: Initial variables to be cloned.
:return: Updated inputs and outputs.
|
[
"Clone",
"variables",
"unused",
"by",
"the",
"attack",
"on",
"all",
"GPUs",
".",
"Specifically",
"the",
"ground",
"-",
"truth",
"label",
"y",
"has",
"to",
"be",
"preserved",
"until",
"the",
"training",
"step",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/trainer.py#L313-L341
|
train
|
tensorflow/cleverhans
|
cleverhans/attacks/lbfgs.py
|
LBFGS.generate
|
def generate(self, x, **kwargs):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param x: (required) A tensor with the inputs.
:param kwargs: See `parse_params`
"""
assert self.sess is not None, \
'Cannot use `generate` when no `sess` was provided'
self.parse_params(**kwargs)
if self.y_target is None:
self.y_target, nb_classes = self.get_or_guess_labels(x, kwargs)
self.targeted_attack = False
else:
_, nb_classes = self.get_or_guess_labels(x, kwargs)
self.targeted_attack = True
attack = LBFGS_impl(
self.sess, x, self.model.get_logits(x),
self.y_target, self.targeted_attack,
self.binary_search_steps, self.max_iterations, self.initial_const,
self.clip_min, self.clip_max, nb_classes, self.batch_size)
def lbfgs_wrap(x_val, y_val):
"""
Wrapper creating TensorFlow interface for use with py_func
"""
return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype)
wrap = tf.py_func(lbfgs_wrap, [x, self.y_target], self.tf_dtype)
wrap.set_shape(x.get_shape())
return wrap
|
python
|
def generate(self, x, **kwargs):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param x: (required) A tensor with the inputs.
:param kwargs: See `parse_params`
"""
assert self.sess is not None, \
'Cannot use `generate` when no `sess` was provided'
self.parse_params(**kwargs)
if self.y_target is None:
self.y_target, nb_classes = self.get_or_guess_labels(x, kwargs)
self.targeted_attack = False
else:
_, nb_classes = self.get_or_guess_labels(x, kwargs)
self.targeted_attack = True
attack = LBFGS_impl(
self.sess, x, self.model.get_logits(x),
self.y_target, self.targeted_attack,
self.binary_search_steps, self.max_iterations, self.initial_const,
self.clip_min, self.clip_max, nb_classes, self.batch_size)
def lbfgs_wrap(x_val, y_val):
"""
Wrapper creating TensorFlow interface for use with py_func
"""
return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype)
wrap = tf.py_func(lbfgs_wrap, [x, self.y_target], self.tf_dtype)
wrap.set_shape(x.get_shape())
return wrap
|
[
"def",
"generate",
"(",
"self",
",",
"x",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"self",
".",
"sess",
"is",
"not",
"None",
",",
"'Cannot use `generate` when no `sess` was provided'",
"self",
".",
"parse_params",
"(",
"*",
"*",
"kwargs",
")",
"if",
"self",
".",
"y_target",
"is",
"None",
":",
"self",
".",
"y_target",
",",
"nb_classes",
"=",
"self",
".",
"get_or_guess_labels",
"(",
"x",
",",
"kwargs",
")",
"self",
".",
"targeted_attack",
"=",
"False",
"else",
":",
"_",
",",
"nb_classes",
"=",
"self",
".",
"get_or_guess_labels",
"(",
"x",
",",
"kwargs",
")",
"self",
".",
"targeted_attack",
"=",
"True",
"attack",
"=",
"LBFGS_impl",
"(",
"self",
".",
"sess",
",",
"x",
",",
"self",
".",
"model",
".",
"get_logits",
"(",
"x",
")",
",",
"self",
".",
"y_target",
",",
"self",
".",
"targeted_attack",
",",
"self",
".",
"binary_search_steps",
",",
"self",
".",
"max_iterations",
",",
"self",
".",
"initial_const",
",",
"self",
".",
"clip_min",
",",
"self",
".",
"clip_max",
",",
"nb_classes",
",",
"self",
".",
"batch_size",
")",
"def",
"lbfgs_wrap",
"(",
"x_val",
",",
"y_val",
")",
":",
"\"\"\"\n Wrapper creating TensorFlow interface for use with py_func\n \"\"\"",
"return",
"np",
".",
"array",
"(",
"attack",
".",
"attack",
"(",
"x_val",
",",
"y_val",
")",
",",
"dtype",
"=",
"self",
".",
"np_dtype",
")",
"wrap",
"=",
"tf",
".",
"py_func",
"(",
"lbfgs_wrap",
",",
"[",
"x",
",",
"self",
".",
"y_target",
"]",
",",
"self",
".",
"tf_dtype",
")",
"wrap",
".",
"set_shape",
"(",
"x",
".",
"get_shape",
"(",
")",
")",
"return",
"wrap"
] |
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param x: (required) A tensor with the inputs.
:param kwargs: See `parse_params`
|
[
"Return",
"a",
"tensor",
"that",
"constructs",
"adversarial",
"examples",
"for",
"the",
"given",
"input",
".",
"Generate",
"uses",
"tf",
".",
"py_func",
"in",
"order",
"to",
"operate",
"over",
"tensors",
".",
":",
"param",
"x",
":",
"(",
"required",
")",
"A",
"tensor",
"with",
"the",
"inputs",
".",
":",
"param",
"kwargs",
":",
"See",
"parse_params"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/lbfgs.py#L41-L74
|
train
|
tensorflow/cleverhans
|
cleverhans/attacks/lbfgs.py
|
LBFGS.parse_params
|
def parse_params(self,
y_target=None,
batch_size=1,
binary_search_steps=5,
max_iterations=1000,
initial_const=1e-2,
clip_min=0,
clip_max=1):
"""
:param y_target: (optional) A tensor with the one-hot target labels.
:param batch_size: The number of inputs to include in a batch and
process simultaneously.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the purturbation
and cross-entropy loss of classification.
:param max_iterations: The maximum number of iterations.
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the perturbation
and cross-entropy loss of the classification.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
self.y_target = y_target
self.batch_size = batch_size
self.binary_search_steps = binary_search_steps
self.max_iterations = max_iterations
self.initial_const = initial_const
self.clip_min = clip_min
self.clip_max = clip_max
|
python
|
def parse_params(self,
y_target=None,
batch_size=1,
binary_search_steps=5,
max_iterations=1000,
initial_const=1e-2,
clip_min=0,
clip_max=1):
"""
:param y_target: (optional) A tensor with the one-hot target labels.
:param batch_size: The number of inputs to include in a batch and
process simultaneously.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the purturbation
and cross-entropy loss of classification.
:param max_iterations: The maximum number of iterations.
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the perturbation
and cross-entropy loss of the classification.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
self.y_target = y_target
self.batch_size = batch_size
self.binary_search_steps = binary_search_steps
self.max_iterations = max_iterations
self.initial_const = initial_const
self.clip_min = clip_min
self.clip_max = clip_max
|
[
"def",
"parse_params",
"(",
"self",
",",
"y_target",
"=",
"None",
",",
"batch_size",
"=",
"1",
",",
"binary_search_steps",
"=",
"5",
",",
"max_iterations",
"=",
"1000",
",",
"initial_const",
"=",
"1e-2",
",",
"clip_min",
"=",
"0",
",",
"clip_max",
"=",
"1",
")",
":",
"self",
".",
"y_target",
"=",
"y_target",
"self",
".",
"batch_size",
"=",
"batch_size",
"self",
".",
"binary_search_steps",
"=",
"binary_search_steps",
"self",
".",
"max_iterations",
"=",
"max_iterations",
"self",
".",
"initial_const",
"=",
"initial_const",
"self",
".",
"clip_min",
"=",
"clip_min",
"self",
".",
"clip_max",
"=",
"clip_max"
] |
:param y_target: (optional) A tensor with the one-hot target labels.
:param batch_size: The number of inputs to include in a batch and
process simultaneously.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the purturbation
and cross-entropy loss of classification.
:param max_iterations: The maximum number of iterations.
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the perturbation
and cross-entropy loss of the classification.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
|
[
":",
"param",
"y_target",
":",
"(",
"optional",
")",
"A",
"tensor",
"with",
"the",
"one",
"-",
"hot",
"target",
"labels",
".",
":",
"param",
"batch_size",
":",
"The",
"number",
"of",
"inputs",
"to",
"include",
"in",
"a",
"batch",
"and",
"process",
"simultaneously",
".",
":",
"param",
"binary_search_steps",
":",
"The",
"number",
"of",
"times",
"we",
"perform",
"binary",
"search",
"to",
"find",
"the",
"optimal",
"tradeoff",
"-",
"constant",
"between",
"norm",
"of",
"the",
"purturbation",
"and",
"cross",
"-",
"entropy",
"loss",
"of",
"classification",
".",
":",
"param",
"max_iterations",
":",
"The",
"maximum",
"number",
"of",
"iterations",
".",
":",
"param",
"initial_const",
":",
"The",
"initial",
"tradeoff",
"-",
"constant",
"to",
"use",
"to",
"tune",
"the",
"relative",
"importance",
"of",
"size",
"of",
"the",
"perturbation",
"and",
"cross",
"-",
"entropy",
"loss",
"of",
"the",
"classification",
".",
":",
"param",
"clip_min",
":",
"(",
"optional",
"float",
")",
"Minimum",
"input",
"component",
"value",
":",
"param",
"clip_max",
":",
"(",
"optional",
"float",
")",
"Maximum",
"input",
"component",
"value"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/lbfgs.py#L76-L105
|
train
|
tensorflow/cleverhans
|
cleverhans/attacks/lbfgs.py
|
LBFGS_impl.attack
|
def attack(self, x_val, targets):
"""
Perform the attack on the given instance for the given targets.
"""
def lbfgs_objective(adv_x, self, targets, oimgs, CONST):
""" returns the function value and the gradient for fmin_l_bfgs_b """
loss = self.sess.run(
self.loss,
feed_dict={
self.x: adv_x.reshape(oimgs.shape),
self.targeted_label: targets,
self.ori_img: oimgs,
self.const: CONST
})
grad = self.sess.run(
self.grad,
feed_dict={
self.x: adv_x.reshape(oimgs.shape),
self.targeted_label: targets,
self.ori_img: oimgs,
self.const: CONST
})
return loss, grad.flatten().astype(float)
def attack_success(out, target, targeted_attack):
""" returns attack result """
if targeted_attack:
return out == target
else:
return out != target
# begin the main part for the attack
from scipy.optimize import fmin_l_bfgs_b
oimgs = np.clip(x_val, self.clip_min, self.clip_max)
CONST = np.ones(self.batch_size) * self.initial_const
# set the lower and upper bounds accordingly
lower_bound = np.zeros(self.batch_size)
upper_bound = np.ones(self.batch_size) * 1e10
# set the box constraints for the optimization function
clip_min = self.clip_min * np.ones(oimgs.shape[:])
clip_max = self.clip_max * np.ones(oimgs.shape[:])
clip_bound = list(zip(clip_min.flatten(), clip_max.flatten()))
# placeholders for the best l2 and instance attack found so far
o_bestl2 = [1e10] * self.batch_size
o_bestattack = np.copy(oimgs)
for outer_step in range(self.binary_search_steps):
_logger.debug(" Binary search step %s of %s",
outer_step, self.binary_search_steps)
# The last iteration (if we run many steps) repeat the search once.
if self.repeat and outer_step == self.binary_search_steps - 1:
CONST = upper_bound
# optimization function
adv_x, _, __ = fmin_l_bfgs_b(
lbfgs_objective,
oimgs.flatten().astype(float),
args=(self, targets, oimgs, CONST),
bounds=clip_bound,
maxiter=self.max_iterations,
iprint=0)
adv_x = adv_x.reshape(oimgs.shape)
assert np.amax(adv_x) <= self.clip_max and \
np.amin(adv_x) >= self.clip_min, \
'fmin_l_bfgs_b returns are invalid'
# adjust the best result (i.e., the adversarial example with the
# smallest perturbation in terms of L_2 norm) found so far
preds = np.atleast_1d(
utils_tf.model_argmax(self.sess, self.x, self.logits,
adv_x))
_logger.debug("predicted labels are %s", preds)
l2s = np.zeros(self.batch_size)
for i in range(self.batch_size):
l2s[i] = np.sum(np.square(adv_x[i] - oimgs[i]))
for e, (l2, pred, ii) in enumerate(zip(l2s, preds, adv_x)):
if l2 < o_bestl2[e] and attack_success(pred, np.argmax(targets[e]),
self.targeted_attack):
o_bestl2[e] = l2
o_bestattack[e] = ii
# adjust the constant as needed
for e in range(self.batch_size):
if attack_success(preds[e], np.argmax(targets[e]),
self.targeted_attack):
# success, divide const by two
upper_bound[e] = min(upper_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
# failure, either multiply by 10 if no solution found yet
# or do binary search with the known upper bound
lower_bound[e] = max(lower_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
CONST[e] *= 10
_logger.debug(" Successfully generated adversarial examples "
"on %s of %s instances.",
sum(upper_bound < 1e9), self.batch_size)
o_bestl2 = np.array(o_bestl2)
mean = np.mean(np.sqrt(o_bestl2[o_bestl2 < 1e9]))
_logger.debug(" Mean successful distortion: {:.4g}".format(mean))
# return the best solution found
o_bestl2 = np.array(o_bestl2)
return o_bestattack
|
python
|
def attack(self, x_val, targets):
"""
Perform the attack on the given instance for the given targets.
"""
def lbfgs_objective(adv_x, self, targets, oimgs, CONST):
""" returns the function value and the gradient for fmin_l_bfgs_b """
loss = self.sess.run(
self.loss,
feed_dict={
self.x: adv_x.reshape(oimgs.shape),
self.targeted_label: targets,
self.ori_img: oimgs,
self.const: CONST
})
grad = self.sess.run(
self.grad,
feed_dict={
self.x: adv_x.reshape(oimgs.shape),
self.targeted_label: targets,
self.ori_img: oimgs,
self.const: CONST
})
return loss, grad.flatten().astype(float)
def attack_success(out, target, targeted_attack):
""" returns attack result """
if targeted_attack:
return out == target
else:
return out != target
# begin the main part for the attack
from scipy.optimize import fmin_l_bfgs_b
oimgs = np.clip(x_val, self.clip_min, self.clip_max)
CONST = np.ones(self.batch_size) * self.initial_const
# set the lower and upper bounds accordingly
lower_bound = np.zeros(self.batch_size)
upper_bound = np.ones(self.batch_size) * 1e10
# set the box constraints for the optimization function
clip_min = self.clip_min * np.ones(oimgs.shape[:])
clip_max = self.clip_max * np.ones(oimgs.shape[:])
clip_bound = list(zip(clip_min.flatten(), clip_max.flatten()))
# placeholders for the best l2 and instance attack found so far
o_bestl2 = [1e10] * self.batch_size
o_bestattack = np.copy(oimgs)
for outer_step in range(self.binary_search_steps):
_logger.debug(" Binary search step %s of %s",
outer_step, self.binary_search_steps)
# The last iteration (if we run many steps) repeat the search once.
if self.repeat and outer_step == self.binary_search_steps - 1:
CONST = upper_bound
# optimization function
adv_x, _, __ = fmin_l_bfgs_b(
lbfgs_objective,
oimgs.flatten().astype(float),
args=(self, targets, oimgs, CONST),
bounds=clip_bound,
maxiter=self.max_iterations,
iprint=0)
adv_x = adv_x.reshape(oimgs.shape)
assert np.amax(adv_x) <= self.clip_max and \
np.amin(adv_x) >= self.clip_min, \
'fmin_l_bfgs_b returns are invalid'
# adjust the best result (i.e., the adversarial example with the
# smallest perturbation in terms of L_2 norm) found so far
preds = np.atleast_1d(
utils_tf.model_argmax(self.sess, self.x, self.logits,
adv_x))
_logger.debug("predicted labels are %s", preds)
l2s = np.zeros(self.batch_size)
for i in range(self.batch_size):
l2s[i] = np.sum(np.square(adv_x[i] - oimgs[i]))
for e, (l2, pred, ii) in enumerate(zip(l2s, preds, adv_x)):
if l2 < o_bestl2[e] and attack_success(pred, np.argmax(targets[e]),
self.targeted_attack):
o_bestl2[e] = l2
o_bestattack[e] = ii
# adjust the constant as needed
for e in range(self.batch_size):
if attack_success(preds[e], np.argmax(targets[e]),
self.targeted_attack):
# success, divide const by two
upper_bound[e] = min(upper_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
# failure, either multiply by 10 if no solution found yet
# or do binary search with the known upper bound
lower_bound[e] = max(lower_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
CONST[e] *= 10
_logger.debug(" Successfully generated adversarial examples "
"on %s of %s instances.",
sum(upper_bound < 1e9), self.batch_size)
o_bestl2 = np.array(o_bestl2)
mean = np.mean(np.sqrt(o_bestl2[o_bestl2 < 1e9]))
_logger.debug(" Mean successful distortion: {:.4g}".format(mean))
# return the best solution found
o_bestl2 = np.array(o_bestl2)
return o_bestattack
|
[
"def",
"attack",
"(",
"self",
",",
"x_val",
",",
"targets",
")",
":",
"def",
"lbfgs_objective",
"(",
"adv_x",
",",
"self",
",",
"targets",
",",
"oimgs",
",",
"CONST",
")",
":",
"\"\"\" returns the function value and the gradient for fmin_l_bfgs_b \"\"\"",
"loss",
"=",
"self",
".",
"sess",
".",
"run",
"(",
"self",
".",
"loss",
",",
"feed_dict",
"=",
"{",
"self",
".",
"x",
":",
"adv_x",
".",
"reshape",
"(",
"oimgs",
".",
"shape",
")",
",",
"self",
".",
"targeted_label",
":",
"targets",
",",
"self",
".",
"ori_img",
":",
"oimgs",
",",
"self",
".",
"const",
":",
"CONST",
"}",
")",
"grad",
"=",
"self",
".",
"sess",
".",
"run",
"(",
"self",
".",
"grad",
",",
"feed_dict",
"=",
"{",
"self",
".",
"x",
":",
"adv_x",
".",
"reshape",
"(",
"oimgs",
".",
"shape",
")",
",",
"self",
".",
"targeted_label",
":",
"targets",
",",
"self",
".",
"ori_img",
":",
"oimgs",
",",
"self",
".",
"const",
":",
"CONST",
"}",
")",
"return",
"loss",
",",
"grad",
".",
"flatten",
"(",
")",
".",
"astype",
"(",
"float",
")",
"def",
"attack_success",
"(",
"out",
",",
"target",
",",
"targeted_attack",
")",
":",
"\"\"\" returns attack result \"\"\"",
"if",
"targeted_attack",
":",
"return",
"out",
"==",
"target",
"else",
":",
"return",
"out",
"!=",
"target",
"# begin the main part for the attack",
"from",
"scipy",
".",
"optimize",
"import",
"fmin_l_bfgs_b",
"oimgs",
"=",
"np",
".",
"clip",
"(",
"x_val",
",",
"self",
".",
"clip_min",
",",
"self",
".",
"clip_max",
")",
"CONST",
"=",
"np",
".",
"ones",
"(",
"self",
".",
"batch_size",
")",
"*",
"self",
".",
"initial_const",
"# set the lower and upper bounds accordingly",
"lower_bound",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"batch_size",
")",
"upper_bound",
"=",
"np",
".",
"ones",
"(",
"self",
".",
"batch_size",
")",
"*",
"1e10",
"# set the box constraints for the optimization function",
"clip_min",
"=",
"self",
".",
"clip_min",
"*",
"np",
".",
"ones",
"(",
"oimgs",
".",
"shape",
"[",
":",
"]",
")",
"clip_max",
"=",
"self",
".",
"clip_max",
"*",
"np",
".",
"ones",
"(",
"oimgs",
".",
"shape",
"[",
":",
"]",
")",
"clip_bound",
"=",
"list",
"(",
"zip",
"(",
"clip_min",
".",
"flatten",
"(",
")",
",",
"clip_max",
".",
"flatten",
"(",
")",
")",
")",
"# placeholders for the best l2 and instance attack found so far",
"o_bestl2",
"=",
"[",
"1e10",
"]",
"*",
"self",
".",
"batch_size",
"o_bestattack",
"=",
"np",
".",
"copy",
"(",
"oimgs",
")",
"for",
"outer_step",
"in",
"range",
"(",
"self",
".",
"binary_search_steps",
")",
":",
"_logger",
".",
"debug",
"(",
"\" Binary search step %s of %s\"",
",",
"outer_step",
",",
"self",
".",
"binary_search_steps",
")",
"# The last iteration (if we run many steps) repeat the search once.",
"if",
"self",
".",
"repeat",
"and",
"outer_step",
"==",
"self",
".",
"binary_search_steps",
"-",
"1",
":",
"CONST",
"=",
"upper_bound",
"# optimization function",
"adv_x",
",",
"_",
",",
"__",
"=",
"fmin_l_bfgs_b",
"(",
"lbfgs_objective",
",",
"oimgs",
".",
"flatten",
"(",
")",
".",
"astype",
"(",
"float",
")",
",",
"args",
"=",
"(",
"self",
",",
"targets",
",",
"oimgs",
",",
"CONST",
")",
",",
"bounds",
"=",
"clip_bound",
",",
"maxiter",
"=",
"self",
".",
"max_iterations",
",",
"iprint",
"=",
"0",
")",
"adv_x",
"=",
"adv_x",
".",
"reshape",
"(",
"oimgs",
".",
"shape",
")",
"assert",
"np",
".",
"amax",
"(",
"adv_x",
")",
"<=",
"self",
".",
"clip_max",
"and",
"np",
".",
"amin",
"(",
"adv_x",
")",
">=",
"self",
".",
"clip_min",
",",
"'fmin_l_bfgs_b returns are invalid'",
"# adjust the best result (i.e., the adversarial example with the",
"# smallest perturbation in terms of L_2 norm) found so far",
"preds",
"=",
"np",
".",
"atleast_1d",
"(",
"utils_tf",
".",
"model_argmax",
"(",
"self",
".",
"sess",
",",
"self",
".",
"x",
",",
"self",
".",
"logits",
",",
"adv_x",
")",
")",
"_logger",
".",
"debug",
"(",
"\"predicted labels are %s\"",
",",
"preds",
")",
"l2s",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"batch_size",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"batch_size",
")",
":",
"l2s",
"[",
"i",
"]",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"square",
"(",
"adv_x",
"[",
"i",
"]",
"-",
"oimgs",
"[",
"i",
"]",
")",
")",
"for",
"e",
",",
"(",
"l2",
",",
"pred",
",",
"ii",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"l2s",
",",
"preds",
",",
"adv_x",
")",
")",
":",
"if",
"l2",
"<",
"o_bestl2",
"[",
"e",
"]",
"and",
"attack_success",
"(",
"pred",
",",
"np",
".",
"argmax",
"(",
"targets",
"[",
"e",
"]",
")",
",",
"self",
".",
"targeted_attack",
")",
":",
"o_bestl2",
"[",
"e",
"]",
"=",
"l2",
"o_bestattack",
"[",
"e",
"]",
"=",
"ii",
"# adjust the constant as needed",
"for",
"e",
"in",
"range",
"(",
"self",
".",
"batch_size",
")",
":",
"if",
"attack_success",
"(",
"preds",
"[",
"e",
"]",
",",
"np",
".",
"argmax",
"(",
"targets",
"[",
"e",
"]",
")",
",",
"self",
".",
"targeted_attack",
")",
":",
"# success, divide const by two",
"upper_bound",
"[",
"e",
"]",
"=",
"min",
"(",
"upper_bound",
"[",
"e",
"]",
",",
"CONST",
"[",
"e",
"]",
")",
"if",
"upper_bound",
"[",
"e",
"]",
"<",
"1e9",
":",
"CONST",
"[",
"e",
"]",
"=",
"(",
"lower_bound",
"[",
"e",
"]",
"+",
"upper_bound",
"[",
"e",
"]",
")",
"/",
"2",
"else",
":",
"# failure, either multiply by 10 if no solution found yet",
"# or do binary search with the known upper bound",
"lower_bound",
"[",
"e",
"]",
"=",
"max",
"(",
"lower_bound",
"[",
"e",
"]",
",",
"CONST",
"[",
"e",
"]",
")",
"if",
"upper_bound",
"[",
"e",
"]",
"<",
"1e9",
":",
"CONST",
"[",
"e",
"]",
"=",
"(",
"lower_bound",
"[",
"e",
"]",
"+",
"upper_bound",
"[",
"e",
"]",
")",
"/",
"2",
"else",
":",
"CONST",
"[",
"e",
"]",
"*=",
"10",
"_logger",
".",
"debug",
"(",
"\" Successfully generated adversarial examples \"",
"\"on %s of %s instances.\"",
",",
"sum",
"(",
"upper_bound",
"<",
"1e9",
")",
",",
"self",
".",
"batch_size",
")",
"o_bestl2",
"=",
"np",
".",
"array",
"(",
"o_bestl2",
")",
"mean",
"=",
"np",
".",
"mean",
"(",
"np",
".",
"sqrt",
"(",
"o_bestl2",
"[",
"o_bestl2",
"<",
"1e9",
"]",
")",
")",
"_logger",
".",
"debug",
"(",
"\" Mean successful distortion: {:.4g}\"",
".",
"format",
"(",
"mean",
")",
")",
"# return the best solution found",
"o_bestl2",
"=",
"np",
".",
"array",
"(",
"o_bestl2",
")",
"return",
"o_bestattack"
] |
Perform the attack on the given instance for the given targets.
|
[
"Perform",
"the",
"attack",
"on",
"the",
"given",
"instance",
"for",
"the",
"given",
"targets",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/lbfgs.py#L166-L281
|
train
|
tensorflow/cleverhans
|
examples/multigpu_advtrain/model.py
|
MLPnGPU.set_device
|
def set_device(self, device_name):
"""
Set the device before the next fprop to create a new graph on the
specified device.
"""
device_name = unify_device_name(device_name)
self.device_name = device_name
for layer in self.layers:
layer.device_name = device_name
|
python
|
def set_device(self, device_name):
"""
Set the device before the next fprop to create a new graph on the
specified device.
"""
device_name = unify_device_name(device_name)
self.device_name = device_name
for layer in self.layers:
layer.device_name = device_name
|
[
"def",
"set_device",
"(",
"self",
",",
"device_name",
")",
":",
"device_name",
"=",
"unify_device_name",
"(",
"device_name",
")",
"self",
".",
"device_name",
"=",
"device_name",
"for",
"layer",
"in",
"self",
".",
"layers",
":",
"layer",
".",
"device_name",
"=",
"device_name"
] |
Set the device before the next fprop to create a new graph on the
specified device.
|
[
"Set",
"the",
"device",
"before",
"the",
"next",
"fprop",
"to",
"create",
"a",
"new",
"graph",
"on",
"the",
"specified",
"device",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/model.py#L206-L214
|
train
|
tensorflow/cleverhans
|
examples/multigpu_advtrain/model.py
|
MLPnGPU.create_sync_ops
|
def create_sync_ops(self, host_device):
"""
Return a list of assignment operations that syncs the parameters
of all model copies with the one on host_device.
:param host_device: (required str) the name of the device with latest
parameters
"""
host_device = unify_device_name(host_device)
sync_ops = []
for layer in self.layers:
if isinstance(layer, LayernGPU):
sync_ops += layer.create_sync_ops(host_device)
return sync_ops
|
python
|
def create_sync_ops(self, host_device):
"""
Return a list of assignment operations that syncs the parameters
of all model copies with the one on host_device.
:param host_device: (required str) the name of the device with latest
parameters
"""
host_device = unify_device_name(host_device)
sync_ops = []
for layer in self.layers:
if isinstance(layer, LayernGPU):
sync_ops += layer.create_sync_ops(host_device)
return sync_ops
|
[
"def",
"create_sync_ops",
"(",
"self",
",",
"host_device",
")",
":",
"host_device",
"=",
"unify_device_name",
"(",
"host_device",
")",
"sync_ops",
"=",
"[",
"]",
"for",
"layer",
"in",
"self",
".",
"layers",
":",
"if",
"isinstance",
"(",
"layer",
",",
"LayernGPU",
")",
":",
"sync_ops",
"+=",
"layer",
".",
"create_sync_ops",
"(",
"host_device",
")",
"return",
"sync_ops"
] |
Return a list of assignment operations that syncs the parameters
of all model copies with the one on host_device.
:param host_device: (required str) the name of the device with latest
parameters
|
[
"Return",
"a",
"list",
"of",
"assignment",
"operations",
"that",
"syncs",
"the",
"parameters",
"of",
"all",
"model",
"copies",
"with",
"the",
"one",
"on",
"host_device",
".",
":",
"param",
"host_device",
":",
"(",
"required",
"str",
")",
"the",
"name",
"of",
"the",
"device",
"with",
"latest",
"parameters"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/model.py#L216-L228
|
train
|
tensorflow/cleverhans
|
examples/multigpu_advtrain/model.py
|
LayernGPU.get_variable
|
def get_variable(self, name, initializer):
"""
Create and initialize a variable using a numpy array and set trainable.
:param name: (required str) name of the variable
:param initializer: a numpy array or a tensor
"""
v = tf.get_variable(name, shape=initializer.shape,
initializer=(lambda shape, dtype, partition_info:
initializer),
trainable=self.training)
return v
|
python
|
def get_variable(self, name, initializer):
"""
Create and initialize a variable using a numpy array and set trainable.
:param name: (required str) name of the variable
:param initializer: a numpy array or a tensor
"""
v = tf.get_variable(name, shape=initializer.shape,
initializer=(lambda shape, dtype, partition_info:
initializer),
trainable=self.training)
return v
|
[
"def",
"get_variable",
"(",
"self",
",",
"name",
",",
"initializer",
")",
":",
"v",
"=",
"tf",
".",
"get_variable",
"(",
"name",
",",
"shape",
"=",
"initializer",
".",
"shape",
",",
"initializer",
"=",
"(",
"lambda",
"shape",
",",
"dtype",
",",
"partition_info",
":",
"initializer",
")",
",",
"trainable",
"=",
"self",
".",
"training",
")",
"return",
"v"
] |
Create and initialize a variable using a numpy array and set trainable.
:param name: (required str) name of the variable
:param initializer: a numpy array or a tensor
|
[
"Create",
"and",
"initialize",
"a",
"variable",
"using",
"a",
"numpy",
"array",
"and",
"set",
"trainable",
".",
":",
"param",
"name",
":",
"(",
"required",
"str",
")",
"name",
"of",
"the",
"variable",
":",
"param",
"initializer",
":",
"a",
"numpy",
"array",
"or",
"a",
"tensor"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/model.py#L254-L264
|
train
|
tensorflow/cleverhans
|
examples/multigpu_advtrain/model.py
|
LayernGPU.set_input_shape_ngpu
|
def set_input_shape_ngpu(self, new_input_shape):
"""
Create and initialize layer parameters on the device previously set
in self.device_name.
:param new_input_shape: a list or tuple for the shape of the input.
"""
assert self.device_name, "Device name has not been set."
device_name = self.device_name
if self.input_shape is None:
# First time setting the input shape
self.input_shape = [None] + [int(d) for d in list(new_input_shape)]
if device_name in self.params_device:
# There is a copy of weights on this device
self.__dict__.update(self.params_device[device_name])
return
# Stop recursion
self.params_device[device_name] = {}
# Initialize weights on this device
with tf.device(device_name):
self.set_input_shape(self.input_shape)
keys_after = self.__dict__.keys()
if self.params_names is None:
# Prevent overriding training
self.params_names = [k for k in keys_after if isinstance(
self.__dict__[k], tf.Variable)]
params = {k: self.__dict__[k] for k in self.params_names}
self.params_device[device_name] = params
|
python
|
def set_input_shape_ngpu(self, new_input_shape):
"""
Create and initialize layer parameters on the device previously set
in self.device_name.
:param new_input_shape: a list or tuple for the shape of the input.
"""
assert self.device_name, "Device name has not been set."
device_name = self.device_name
if self.input_shape is None:
# First time setting the input shape
self.input_shape = [None] + [int(d) for d in list(new_input_shape)]
if device_name in self.params_device:
# There is a copy of weights on this device
self.__dict__.update(self.params_device[device_name])
return
# Stop recursion
self.params_device[device_name] = {}
# Initialize weights on this device
with tf.device(device_name):
self.set_input_shape(self.input_shape)
keys_after = self.__dict__.keys()
if self.params_names is None:
# Prevent overriding training
self.params_names = [k for k in keys_after if isinstance(
self.__dict__[k], tf.Variable)]
params = {k: self.__dict__[k] for k in self.params_names}
self.params_device[device_name] = params
|
[
"def",
"set_input_shape_ngpu",
"(",
"self",
",",
"new_input_shape",
")",
":",
"assert",
"self",
".",
"device_name",
",",
"\"Device name has not been set.\"",
"device_name",
"=",
"self",
".",
"device_name",
"if",
"self",
".",
"input_shape",
"is",
"None",
":",
"# First time setting the input shape",
"self",
".",
"input_shape",
"=",
"[",
"None",
"]",
"+",
"[",
"int",
"(",
"d",
")",
"for",
"d",
"in",
"list",
"(",
"new_input_shape",
")",
"]",
"if",
"device_name",
"in",
"self",
".",
"params_device",
":",
"# There is a copy of weights on this device",
"self",
".",
"__dict__",
".",
"update",
"(",
"self",
".",
"params_device",
"[",
"device_name",
"]",
")",
"return",
"# Stop recursion",
"self",
".",
"params_device",
"[",
"device_name",
"]",
"=",
"{",
"}",
"# Initialize weights on this device",
"with",
"tf",
".",
"device",
"(",
"device_name",
")",
":",
"self",
".",
"set_input_shape",
"(",
"self",
".",
"input_shape",
")",
"keys_after",
"=",
"self",
".",
"__dict__",
".",
"keys",
"(",
")",
"if",
"self",
".",
"params_names",
"is",
"None",
":",
"# Prevent overriding training",
"self",
".",
"params_names",
"=",
"[",
"k",
"for",
"k",
"in",
"keys_after",
"if",
"isinstance",
"(",
"self",
".",
"__dict__",
"[",
"k",
"]",
",",
"tf",
".",
"Variable",
")",
"]",
"params",
"=",
"{",
"k",
":",
"self",
".",
"__dict__",
"[",
"k",
"]",
"for",
"k",
"in",
"self",
".",
"params_names",
"}",
"self",
".",
"params_device",
"[",
"device_name",
"]",
"=",
"params"
] |
Create and initialize layer parameters on the device previously set
in self.device_name.
:param new_input_shape: a list or tuple for the shape of the input.
|
[
"Create",
"and",
"initialize",
"layer",
"parameters",
"on",
"the",
"device",
"previously",
"set",
"in",
"self",
".",
"device_name",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/model.py#L266-L297
|
train
|
tensorflow/cleverhans
|
examples/multigpu_advtrain/model.py
|
LayernGPU.create_sync_ops
|
def create_sync_ops(self, host_device):
"""Create an assignment operation for each weight on all devices. The
weight is assigned the value of the copy on the `host_device'.
"""
sync_ops = []
host_params = self.params_device[host_device]
for device, params in (self.params_device).iteritems():
if device == host_device:
continue
for k in self.params_names:
if isinstance(params[k], tf.Variable):
sync_ops += [tf.assign(params[k], host_params[k])]
return sync_ops
|
python
|
def create_sync_ops(self, host_device):
"""Create an assignment operation for each weight on all devices. The
weight is assigned the value of the copy on the `host_device'.
"""
sync_ops = []
host_params = self.params_device[host_device]
for device, params in (self.params_device).iteritems():
if device == host_device:
continue
for k in self.params_names:
if isinstance(params[k], tf.Variable):
sync_ops += [tf.assign(params[k], host_params[k])]
return sync_ops
|
[
"def",
"create_sync_ops",
"(",
"self",
",",
"host_device",
")",
":",
"sync_ops",
"=",
"[",
"]",
"host_params",
"=",
"self",
".",
"params_device",
"[",
"host_device",
"]",
"for",
"device",
",",
"params",
"in",
"(",
"self",
".",
"params_device",
")",
".",
"iteritems",
"(",
")",
":",
"if",
"device",
"==",
"host_device",
":",
"continue",
"for",
"k",
"in",
"self",
".",
"params_names",
":",
"if",
"isinstance",
"(",
"params",
"[",
"k",
"]",
",",
"tf",
".",
"Variable",
")",
":",
"sync_ops",
"+=",
"[",
"tf",
".",
"assign",
"(",
"params",
"[",
"k",
"]",
",",
"host_params",
"[",
"k",
"]",
")",
"]",
"return",
"sync_ops"
] |
Create an assignment operation for each weight on all devices. The
weight is assigned the value of the copy on the `host_device'.
|
[
"Create",
"an",
"assignment",
"operation",
"for",
"each",
"weight",
"on",
"all",
"devices",
".",
"The",
"weight",
"is",
"assigned",
"the",
"value",
"of",
"the",
"copy",
"on",
"the",
"host_device",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/model.py#L299-L311
|
train
|
tensorflow/cleverhans
|
cleverhans/attacks/virtual_adversarial_method.py
|
vatm
|
def vatm(model,
x,
logits,
eps,
num_iterations=1,
xi=1e-6,
clip_min=None,
clip_max=None,
scope=None):
"""
Tensorflow implementation of the perturbation method used for virtual
adversarial training: https://arxiv.org/abs/1507.00677
:param model: the model which returns the network unnormalized logits
:param x: the input placeholder
:param logits: the model's unnormalized output tensor (the input to
the softmax layer)
:param eps: the epsilon (input variation parameter)
:param num_iterations: the number of iterations
:param xi: the finite difference parameter
:param clip_min: optional parameter that can be used to set a minimum
value for components of the example returned
:param clip_max: optional parameter that can be used to set a maximum
value for components of the example returned
:param seed: the seed for random generator
:return: a tensor for the adversarial example
"""
with tf.name_scope(scope, "virtual_adversarial_perturbation"):
d = tf.random_normal(tf.shape(x), dtype=tf_dtype)
for _ in range(num_iterations):
d = xi * utils_tf.l2_batch_normalize(d)
logits_d = model.get_logits(x + d)
kl = utils_tf.kl_with_logits(logits, logits_d)
Hd = tf.gradients(kl, d)[0]
d = tf.stop_gradient(Hd)
d = eps * utils_tf.l2_batch_normalize(d)
adv_x = x + d
if (clip_min is not None) and (clip_max is not None):
adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
return adv_x
|
python
|
def vatm(model,
x,
logits,
eps,
num_iterations=1,
xi=1e-6,
clip_min=None,
clip_max=None,
scope=None):
"""
Tensorflow implementation of the perturbation method used for virtual
adversarial training: https://arxiv.org/abs/1507.00677
:param model: the model which returns the network unnormalized logits
:param x: the input placeholder
:param logits: the model's unnormalized output tensor (the input to
the softmax layer)
:param eps: the epsilon (input variation parameter)
:param num_iterations: the number of iterations
:param xi: the finite difference parameter
:param clip_min: optional parameter that can be used to set a minimum
value for components of the example returned
:param clip_max: optional parameter that can be used to set a maximum
value for components of the example returned
:param seed: the seed for random generator
:return: a tensor for the adversarial example
"""
with tf.name_scope(scope, "virtual_adversarial_perturbation"):
d = tf.random_normal(tf.shape(x), dtype=tf_dtype)
for _ in range(num_iterations):
d = xi * utils_tf.l2_batch_normalize(d)
logits_d = model.get_logits(x + d)
kl = utils_tf.kl_with_logits(logits, logits_d)
Hd = tf.gradients(kl, d)[0]
d = tf.stop_gradient(Hd)
d = eps * utils_tf.l2_batch_normalize(d)
adv_x = x + d
if (clip_min is not None) and (clip_max is not None):
adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
return adv_x
|
[
"def",
"vatm",
"(",
"model",
",",
"x",
",",
"logits",
",",
"eps",
",",
"num_iterations",
"=",
"1",
",",
"xi",
"=",
"1e-6",
",",
"clip_min",
"=",
"None",
",",
"clip_max",
"=",
"None",
",",
"scope",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"scope",
",",
"\"virtual_adversarial_perturbation\"",
")",
":",
"d",
"=",
"tf",
".",
"random_normal",
"(",
"tf",
".",
"shape",
"(",
"x",
")",
",",
"dtype",
"=",
"tf_dtype",
")",
"for",
"_",
"in",
"range",
"(",
"num_iterations",
")",
":",
"d",
"=",
"xi",
"*",
"utils_tf",
".",
"l2_batch_normalize",
"(",
"d",
")",
"logits_d",
"=",
"model",
".",
"get_logits",
"(",
"x",
"+",
"d",
")",
"kl",
"=",
"utils_tf",
".",
"kl_with_logits",
"(",
"logits",
",",
"logits_d",
")",
"Hd",
"=",
"tf",
".",
"gradients",
"(",
"kl",
",",
"d",
")",
"[",
"0",
"]",
"d",
"=",
"tf",
".",
"stop_gradient",
"(",
"Hd",
")",
"d",
"=",
"eps",
"*",
"utils_tf",
".",
"l2_batch_normalize",
"(",
"d",
")",
"adv_x",
"=",
"x",
"+",
"d",
"if",
"(",
"clip_min",
"is",
"not",
"None",
")",
"and",
"(",
"clip_max",
"is",
"not",
"None",
")",
":",
"adv_x",
"=",
"tf",
".",
"clip_by_value",
"(",
"adv_x",
",",
"clip_min",
",",
"clip_max",
")",
"return",
"adv_x"
] |
Tensorflow implementation of the perturbation method used for virtual
adversarial training: https://arxiv.org/abs/1507.00677
:param model: the model which returns the network unnormalized logits
:param x: the input placeholder
:param logits: the model's unnormalized output tensor (the input to
the softmax layer)
:param eps: the epsilon (input variation parameter)
:param num_iterations: the number of iterations
:param xi: the finite difference parameter
:param clip_min: optional parameter that can be used to set a minimum
value for components of the example returned
:param clip_max: optional parameter that can be used to set a maximum
value for components of the example returned
:param seed: the seed for random generator
:return: a tensor for the adversarial example
|
[
"Tensorflow",
"implementation",
"of",
"the",
"perturbation",
"method",
"used",
"for",
"virtual",
"adversarial",
"training",
":",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1507",
".",
"00677",
":",
"param",
"model",
":",
"the",
"model",
"which",
"returns",
"the",
"network",
"unnormalized",
"logits",
":",
"param",
"x",
":",
"the",
"input",
"placeholder",
":",
"param",
"logits",
":",
"the",
"model",
"s",
"unnormalized",
"output",
"tensor",
"(",
"the",
"input",
"to",
"the",
"softmax",
"layer",
")",
":",
"param",
"eps",
":",
"the",
"epsilon",
"(",
"input",
"variation",
"parameter",
")",
":",
"param",
"num_iterations",
":",
"the",
"number",
"of",
"iterations",
":",
"param",
"xi",
":",
"the",
"finite",
"difference",
"parameter",
":",
"param",
"clip_min",
":",
"optional",
"parameter",
"that",
"can",
"be",
"used",
"to",
"set",
"a",
"minimum",
"value",
"for",
"components",
"of",
"the",
"example",
"returned",
":",
"param",
"clip_max",
":",
"optional",
"parameter",
"that",
"can",
"be",
"used",
"to",
"set",
"a",
"maximum",
"value",
"for",
"components",
"of",
"the",
"example",
"returned",
":",
"param",
"seed",
":",
"the",
"seed",
"for",
"random",
"generator",
":",
"return",
":",
"a",
"tensor",
"for",
"the",
"adversarial",
"example"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/virtual_adversarial_method.py#L107-L145
|
train
|
tensorflow/cleverhans
|
cleverhans/attacks/virtual_adversarial_method.py
|
VirtualAdversarialMethod.generate
|
def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
return vatm(
self.model,
x,
self.model.get_logits(x),
eps=self.eps,
num_iterations=self.num_iterations,
xi=self.xi,
clip_min=self.clip_min,
clip_max=self.clip_max)
|
python
|
def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
return vatm(
self.model,
x,
self.model.get_logits(x),
eps=self.eps,
num_iterations=self.num_iterations,
xi=self.xi,
clip_min=self.clip_min,
clip_max=self.clip_max)
|
[
"def",
"generate",
"(",
"self",
",",
"x",
",",
"*",
"*",
"kwargs",
")",
":",
"# Parse and save attack-specific parameters",
"assert",
"self",
".",
"parse_params",
"(",
"*",
"*",
"kwargs",
")",
"return",
"vatm",
"(",
"self",
".",
"model",
",",
"x",
",",
"self",
".",
"model",
".",
"get_logits",
"(",
"x",
")",
",",
"eps",
"=",
"self",
".",
"eps",
",",
"num_iterations",
"=",
"self",
".",
"num_iterations",
",",
"xi",
"=",
"self",
".",
"xi",
",",
"clip_min",
"=",
"self",
".",
"clip_min",
",",
"clip_max",
"=",
"self",
".",
"clip_max",
")"
] |
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
|
[
"Generate",
"symbolic",
"graph",
"for",
"adversarial",
"examples",
"and",
"return",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/virtual_adversarial_method.py#L43-L61
|
train
|
tensorflow/cleverhans
|
cleverhans/attacks/virtual_adversarial_method.py
|
VirtualAdversarialMethod.parse_params
|
def parse_params(self,
eps=2.0,
nb_iter=None,
xi=1e-6,
clip_min=None,
clip_max=None,
num_iterations=None,
**kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param eps: (optional float )the epsilon (input variation parameter)
:param nb_iter: (optional) the number of iterations
Defaults to 1 if not specified
:param xi: (optional float) the finite difference parameter
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
:param num_iterations: Deprecated alias for `nb_iter`
"""
# Save attack-specific parameters
self.eps = eps
if num_iterations is not None:
warnings.warn("`num_iterations` is deprecated. Switch to `nb_iter`."
" The old name will be removed on or after 2019-04-26.")
# Note: when we remove the deprecated alias, we can put the default
# value of 1 for nb_iter back in the method signature
assert nb_iter is None
nb_iter = num_iterations
del num_iterations
if nb_iter is None:
nb_iter = 1
self.num_iterations = nb_iter
self.xi = xi
self.clip_min = clip_min
self.clip_max = clip_max
if len(kwargs.keys()) > 0:
warnings.warn("kwargs is unused and will be removed on or after "
"2019-04-26.")
return True
|
python
|
def parse_params(self,
eps=2.0,
nb_iter=None,
xi=1e-6,
clip_min=None,
clip_max=None,
num_iterations=None,
**kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param eps: (optional float )the epsilon (input variation parameter)
:param nb_iter: (optional) the number of iterations
Defaults to 1 if not specified
:param xi: (optional float) the finite difference parameter
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
:param num_iterations: Deprecated alias for `nb_iter`
"""
# Save attack-specific parameters
self.eps = eps
if num_iterations is not None:
warnings.warn("`num_iterations` is deprecated. Switch to `nb_iter`."
" The old name will be removed on or after 2019-04-26.")
# Note: when we remove the deprecated alias, we can put the default
# value of 1 for nb_iter back in the method signature
assert nb_iter is None
nb_iter = num_iterations
del num_iterations
if nb_iter is None:
nb_iter = 1
self.num_iterations = nb_iter
self.xi = xi
self.clip_min = clip_min
self.clip_max = clip_max
if len(kwargs.keys()) > 0:
warnings.warn("kwargs is unused and will be removed on or after "
"2019-04-26.")
return True
|
[
"def",
"parse_params",
"(",
"self",
",",
"eps",
"=",
"2.0",
",",
"nb_iter",
"=",
"None",
",",
"xi",
"=",
"1e-6",
",",
"clip_min",
"=",
"None",
",",
"clip_max",
"=",
"None",
",",
"num_iterations",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Save attack-specific parameters",
"self",
".",
"eps",
"=",
"eps",
"if",
"num_iterations",
"is",
"not",
"None",
":",
"warnings",
".",
"warn",
"(",
"\"`num_iterations` is deprecated. Switch to `nb_iter`.\"",
"\" The old name will be removed on or after 2019-04-26.\"",
")",
"# Note: when we remove the deprecated alias, we can put the default",
"# value of 1 for nb_iter back in the method signature",
"assert",
"nb_iter",
"is",
"None",
"nb_iter",
"=",
"num_iterations",
"del",
"num_iterations",
"if",
"nb_iter",
"is",
"None",
":",
"nb_iter",
"=",
"1",
"self",
".",
"num_iterations",
"=",
"nb_iter",
"self",
".",
"xi",
"=",
"xi",
"self",
".",
"clip_min",
"=",
"clip_min",
"self",
".",
"clip_max",
"=",
"clip_max",
"if",
"len",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
">",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"kwargs is unused and will be removed on or after \"",
"\"2019-04-26.\"",
")",
"return",
"True"
] |
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param eps: (optional float )the epsilon (input variation parameter)
:param nb_iter: (optional) the number of iterations
Defaults to 1 if not specified
:param xi: (optional float) the finite difference parameter
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
:param num_iterations: Deprecated alias for `nb_iter`
|
[
"Take",
"in",
"a",
"dictionary",
"of",
"parameters",
"and",
"applies",
"attack",
"-",
"specific",
"checks",
"before",
"saving",
"them",
"as",
"attributes",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/virtual_adversarial_method.py#L63-L104
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/cloud_client.py
|
iterate_with_exp_backoff
|
def iterate_with_exp_backoff(base_iter,
max_num_tries=6,
max_backoff=300.0,
start_backoff=4.0,
backoff_multiplier=2.0,
frac_random_backoff=0.25):
"""Iterate with exponential backoff on failures.
Useful to wrap results of datastore Query.fetch to avoid 429 error.
Args:
base_iter: basic iterator of generator object
max_num_tries: maximum number of tries for each request
max_backoff: maximum backoff, in seconds
start_backoff: initial value of backoff
backoff_multiplier: backoff multiplier
frac_random_backoff: fraction of the value of random part of the backoff
Yields:
values of yielded by base iterator
"""
try_number = 0
if hasattr(base_iter, '__iter__'):
base_iter = iter(base_iter)
while True:
try:
yield next(base_iter)
try_number = 0
except StopIteration:
break
except TooManyRequests as e:
logging.warning('TooManyRequests error: %s', tb.format_exc())
if try_number >= max_num_tries:
logging.error('Number of tries exceeded, too many requests: %s', e)
raise
# compute sleep time for truncated exponential backoff
sleep_time = start_backoff * math.pow(backoff_multiplier, try_number)
sleep_time *= (1.0 + frac_random_backoff * random.random())
sleep_time = min(sleep_time, max_backoff)
logging.warning('Too many requests error, '
'retrying with exponential backoff %.3f', sleep_time)
time.sleep(sleep_time)
try_number += 1
|
python
|
def iterate_with_exp_backoff(base_iter,
max_num_tries=6,
max_backoff=300.0,
start_backoff=4.0,
backoff_multiplier=2.0,
frac_random_backoff=0.25):
"""Iterate with exponential backoff on failures.
Useful to wrap results of datastore Query.fetch to avoid 429 error.
Args:
base_iter: basic iterator of generator object
max_num_tries: maximum number of tries for each request
max_backoff: maximum backoff, in seconds
start_backoff: initial value of backoff
backoff_multiplier: backoff multiplier
frac_random_backoff: fraction of the value of random part of the backoff
Yields:
values of yielded by base iterator
"""
try_number = 0
if hasattr(base_iter, '__iter__'):
base_iter = iter(base_iter)
while True:
try:
yield next(base_iter)
try_number = 0
except StopIteration:
break
except TooManyRequests as e:
logging.warning('TooManyRequests error: %s', tb.format_exc())
if try_number >= max_num_tries:
logging.error('Number of tries exceeded, too many requests: %s', e)
raise
# compute sleep time for truncated exponential backoff
sleep_time = start_backoff * math.pow(backoff_multiplier, try_number)
sleep_time *= (1.0 + frac_random_backoff * random.random())
sleep_time = min(sleep_time, max_backoff)
logging.warning('Too many requests error, '
'retrying with exponential backoff %.3f', sleep_time)
time.sleep(sleep_time)
try_number += 1
|
[
"def",
"iterate_with_exp_backoff",
"(",
"base_iter",
",",
"max_num_tries",
"=",
"6",
",",
"max_backoff",
"=",
"300.0",
",",
"start_backoff",
"=",
"4.0",
",",
"backoff_multiplier",
"=",
"2.0",
",",
"frac_random_backoff",
"=",
"0.25",
")",
":",
"try_number",
"=",
"0",
"if",
"hasattr",
"(",
"base_iter",
",",
"'__iter__'",
")",
":",
"base_iter",
"=",
"iter",
"(",
"base_iter",
")",
"while",
"True",
":",
"try",
":",
"yield",
"next",
"(",
"base_iter",
")",
"try_number",
"=",
"0",
"except",
"StopIteration",
":",
"break",
"except",
"TooManyRequests",
"as",
"e",
":",
"logging",
".",
"warning",
"(",
"'TooManyRequests error: %s'",
",",
"tb",
".",
"format_exc",
"(",
")",
")",
"if",
"try_number",
">=",
"max_num_tries",
":",
"logging",
".",
"error",
"(",
"'Number of tries exceeded, too many requests: %s'",
",",
"e",
")",
"raise",
"# compute sleep time for truncated exponential backoff",
"sleep_time",
"=",
"start_backoff",
"*",
"math",
".",
"pow",
"(",
"backoff_multiplier",
",",
"try_number",
")",
"sleep_time",
"*=",
"(",
"1.0",
"+",
"frac_random_backoff",
"*",
"random",
".",
"random",
"(",
")",
")",
"sleep_time",
"=",
"min",
"(",
"sleep_time",
",",
"max_backoff",
")",
"logging",
".",
"warning",
"(",
"'Too many requests error, '",
"'retrying with exponential backoff %.3f'",
",",
"sleep_time",
")",
"time",
".",
"sleep",
"(",
"sleep_time",
")",
"try_number",
"+=",
"1"
] |
Iterate with exponential backoff on failures.
Useful to wrap results of datastore Query.fetch to avoid 429 error.
Args:
base_iter: basic iterator of generator object
max_num_tries: maximum number of tries for each request
max_backoff: maximum backoff, in seconds
start_backoff: initial value of backoff
backoff_multiplier: backoff multiplier
frac_random_backoff: fraction of the value of random part of the backoff
Yields:
values of yielded by base iterator
|
[
"Iterate",
"with",
"exponential",
"backoff",
"on",
"failures",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/cloud_client.py#L167-L209
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/cloud_client.py
|
CompetitionStorageClient.list_blobs
|
def list_blobs(self, prefix=''):
"""Lists names of all blobs by their prefix."""
return [b.name for b in self.bucket.list_blobs(prefix=prefix)]
|
python
|
def list_blobs(self, prefix=''):
"""Lists names of all blobs by their prefix."""
return [b.name for b in self.bucket.list_blobs(prefix=prefix)]
|
[
"def",
"list_blobs",
"(",
"self",
",",
"prefix",
"=",
"''",
")",
":",
"return",
"[",
"b",
".",
"name",
"for",
"b",
"in",
"self",
".",
"bucket",
".",
"list_blobs",
"(",
"prefix",
"=",
"prefix",
")",
"]"
] |
Lists names of all blobs by their prefix.
|
[
"Lists",
"names",
"of",
"all",
"blobs",
"by",
"their",
"prefix",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/cloud_client.py#L43-L45
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/cloud_client.py
|
NoTransactionBatch.begin
|
def begin(self):
"""Begins a batch."""
if self._cur_batch:
raise ValueError('Previous batch is not committed.')
self._cur_batch = self._client.batch()
self._cur_batch.begin()
self._num_mutations = 0
|
python
|
def begin(self):
"""Begins a batch."""
if self._cur_batch:
raise ValueError('Previous batch is not committed.')
self._cur_batch = self._client.batch()
self._cur_batch.begin()
self._num_mutations = 0
|
[
"def",
"begin",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cur_batch",
":",
"raise",
"ValueError",
"(",
"'Previous batch is not committed.'",
")",
"self",
".",
"_cur_batch",
"=",
"self",
".",
"_client",
".",
"batch",
"(",
")",
"self",
".",
"_cur_batch",
".",
"begin",
"(",
")",
"self",
".",
"_num_mutations",
"=",
"0"
] |
Begins a batch.
|
[
"Begins",
"a",
"batch",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/cloud_client.py#L93-L99
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/cloud_client.py
|
NoTransactionBatch.rollback
|
def rollback(self):
"""Rolls back pending mutations.
Keep in mind that NoTransactionBatch splits all mutations into smaller
batches and commit them as soon as mutation buffer reaches maximum length.
That's why rollback method will only roll back pending mutations from the
buffer, but won't be able to rollback already committed mutations.
"""
try:
if self._cur_batch:
self._cur_batch.rollback()
except ValueError:
# ignore "Batch must be in progress to rollback" error
pass
self._cur_batch = None
self._num_mutations = 0
|
python
|
def rollback(self):
"""Rolls back pending mutations.
Keep in mind that NoTransactionBatch splits all mutations into smaller
batches and commit them as soon as mutation buffer reaches maximum length.
That's why rollback method will only roll back pending mutations from the
buffer, but won't be able to rollback already committed mutations.
"""
try:
if self._cur_batch:
self._cur_batch.rollback()
except ValueError:
# ignore "Batch must be in progress to rollback" error
pass
self._cur_batch = None
self._num_mutations = 0
|
[
"def",
"rollback",
"(",
"self",
")",
":",
"try",
":",
"if",
"self",
".",
"_cur_batch",
":",
"self",
".",
"_cur_batch",
".",
"rollback",
"(",
")",
"except",
"ValueError",
":",
"# ignore \"Batch must be in progress to rollback\" error",
"pass",
"self",
".",
"_cur_batch",
"=",
"None",
"self",
".",
"_num_mutations",
"=",
"0"
] |
Rolls back pending mutations.
Keep in mind that NoTransactionBatch splits all mutations into smaller
batches and commit them as soon as mutation buffer reaches maximum length.
That's why rollback method will only roll back pending mutations from the
buffer, but won't be able to rollback already committed mutations.
|
[
"Rolls",
"back",
"pending",
"mutations",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/cloud_client.py#L107-L122
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/cloud_client.py
|
NoTransactionBatch.put
|
def put(self, entity):
"""Adds mutation of the entity to the mutation buffer.
If mutation buffer reaches its capacity then this method commit all pending
mutations from the buffer and emties it.
Args:
entity: entity which should be put into the datastore
"""
self._cur_batch.put(entity)
self._num_mutations += 1
if self._num_mutations >= MAX_MUTATIONS_IN_BATCH:
self.commit()
self.begin()
|
python
|
def put(self, entity):
"""Adds mutation of the entity to the mutation buffer.
If mutation buffer reaches its capacity then this method commit all pending
mutations from the buffer and emties it.
Args:
entity: entity which should be put into the datastore
"""
self._cur_batch.put(entity)
self._num_mutations += 1
if self._num_mutations >= MAX_MUTATIONS_IN_BATCH:
self.commit()
self.begin()
|
[
"def",
"put",
"(",
"self",
",",
"entity",
")",
":",
"self",
".",
"_cur_batch",
".",
"put",
"(",
"entity",
")",
"self",
".",
"_num_mutations",
"+=",
"1",
"if",
"self",
".",
"_num_mutations",
">=",
"MAX_MUTATIONS_IN_BATCH",
":",
"self",
".",
"commit",
"(",
")",
"self",
".",
"begin",
"(",
")"
] |
Adds mutation of the entity to the mutation buffer.
If mutation buffer reaches its capacity then this method commit all pending
mutations from the buffer and emties it.
Args:
entity: entity which should be put into the datastore
|
[
"Adds",
"mutation",
"of",
"the",
"entity",
"to",
"the",
"mutation",
"buffer",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/cloud_client.py#L124-L137
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/cloud_client.py
|
NoTransactionBatch.delete
|
def delete(self, key):
"""Adds deletion of the entity with given key to the mutation buffer.
If mutation buffer reaches its capacity then this method commit all pending
mutations from the buffer and emties it.
Args:
key: key of the entity which should be deleted
"""
self._cur_batch.delete(key)
self._num_mutations += 1
if self._num_mutations >= MAX_MUTATIONS_IN_BATCH:
self.commit()
self.begin()
|
python
|
def delete(self, key):
"""Adds deletion of the entity with given key to the mutation buffer.
If mutation buffer reaches its capacity then this method commit all pending
mutations from the buffer and emties it.
Args:
key: key of the entity which should be deleted
"""
self._cur_batch.delete(key)
self._num_mutations += 1
if self._num_mutations >= MAX_MUTATIONS_IN_BATCH:
self.commit()
self.begin()
|
[
"def",
"delete",
"(",
"self",
",",
"key",
")",
":",
"self",
".",
"_cur_batch",
".",
"delete",
"(",
"key",
")",
"self",
".",
"_num_mutations",
"+=",
"1",
"if",
"self",
".",
"_num_mutations",
">=",
"MAX_MUTATIONS_IN_BATCH",
":",
"self",
".",
"commit",
"(",
")",
"self",
".",
"begin",
"(",
")"
] |
Adds deletion of the entity with given key to the mutation buffer.
If mutation buffer reaches its capacity then this method commit all pending
mutations from the buffer and emties it.
Args:
key: key of the entity which should be deleted
|
[
"Adds",
"deletion",
"of",
"the",
"entity",
"with",
"given",
"key",
"to",
"the",
"mutation",
"buffer",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/cloud_client.py#L139-L152
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/cloud_client.py
|
CompetitionDatastoreClient.get
|
def get(self, key, transaction=None):
"""Retrieves an entity given its key."""
return self._client.get(key, transaction=transaction)
|
python
|
def get(self, key, transaction=None):
"""Retrieves an entity given its key."""
return self._client.get(key, transaction=transaction)
|
[
"def",
"get",
"(",
"self",
",",
"key",
",",
"transaction",
"=",
"None",
")",
":",
"return",
"self",
".",
"_client",
".",
"get",
"(",
"key",
",",
"transaction",
"=",
"transaction",
")"
] |
Retrieves an entity given its key.
|
[
"Retrieves",
"an",
"entity",
"given",
"its",
"key",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/cloud_client.py#L239-L241
|
train
|
tensorflow/cleverhans
|
cleverhans_tutorials/mnist_tutorial_cw.py
|
mnist_tutorial_cw
|
def mnist_tutorial_cw(train_start=0, train_end=60000, test_start=0,
test_end=10000, viz_enabled=VIZ_ENABLED,
nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE,
source_samples=SOURCE_SAMPLES,
learning_rate=LEARNING_RATE,
attack_iterations=ATTACK_ITERATIONS,
model_path=MODEL_PATH,
targeted=TARGETED):
"""
MNIST tutorial for Carlini and Wagner's attack
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param viz_enabled: (boolean) activate plots of adversarial examples
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param nb_classes: number of output classes
:param source_samples: number of test inputs to attack
:param learning_rate: learning rate for training
:param model_path: path to the model file
:param targeted: should we run a targeted attack? or untargeted?
:return: an AccuracyReport object
"""
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Create TF session
sess = tf.Session()
print("Created TensorFlow session.")
set_log_level(logging.DEBUG)
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
x_train, y_train = mnist.get_set('train')
x_test, y_test = mnist.get_set('test')
# Obtain Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
nb_filters = 64
# Define TF model graph
model = ModelBasicCNN('model1', nb_classes, nb_filters)
preds = model.get_logits(x)
loss = CrossEntropy(model, smoothing=0.1)
print("Defined TensorFlow model graph.")
###########################################################################
# Training the model using TensorFlow
###########################################################################
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate,
'filename': os.path.split(model_path)[-1]
}
rng = np.random.RandomState([2017, 8, 30])
# check if we've trained before, and if we have, use that pre-trained model
if os.path.exists(model_path + ".meta"):
tf_model_load(sess, model_path)
else:
train(sess, loss, x_train, y_train, args=train_params, rng=rng)
saver = tf.train.Saver()
saver.save(sess, model_path)
# Evaluate the accuracy of the MNIST model on legitimate test examples
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, preds, x_test, y_test, args=eval_params)
assert x_test.shape[0] == test_end - test_start, x_test.shape
print('Test accuracy on legitimate test examples: {0}'.format(accuracy))
report.clean_train_clean_eval = accuracy
###########################################################################
# Craft adversarial examples using Carlini and Wagner's approach
###########################################################################
nb_adv_per_sample = str(nb_classes - 1) if targeted else '1'
print('Crafting ' + str(source_samples) + ' * ' + nb_adv_per_sample +
' adversarial examples')
print("This could take some time ...")
# Instantiate a CW attack object
cw = CarliniWagnerL2(model, sess=sess)
if viz_enabled:
assert source_samples == nb_classes
idxs = [np.where(np.argmax(y_test, axis=1) == i)[0][0]
for i in range(nb_classes)]
if targeted:
if viz_enabled:
# Initialize our array for grid visualization
grid_shape = (nb_classes, nb_classes, img_rows, img_cols,
nchannels)
grid_viz_data = np.zeros(grid_shape, dtype='f')
adv_inputs = np.array(
[[instance] * nb_classes for instance in x_test[idxs]],
dtype=np.float32)
else:
adv_inputs = np.array(
[[instance] * nb_classes for
instance in x_test[:source_samples]], dtype=np.float32)
one_hot = np.zeros((nb_classes, nb_classes))
one_hot[np.arange(nb_classes), np.arange(nb_classes)] = 1
adv_inputs = adv_inputs.reshape(
(source_samples * nb_classes, img_rows, img_cols, nchannels))
adv_ys = np.array([one_hot] * source_samples,
dtype=np.float32).reshape((source_samples *
nb_classes, nb_classes))
yname = "y_target"
else:
if viz_enabled:
# Initialize our array for grid visualization
grid_shape = (nb_classes, 2, img_rows, img_cols, nchannels)
grid_viz_data = np.zeros(grid_shape, dtype='f')
adv_inputs = x_test[idxs]
else:
adv_inputs = x_test[:source_samples]
adv_ys = None
yname = "y"
if targeted:
cw_params_batch_size = source_samples * nb_classes
else:
cw_params_batch_size = source_samples
cw_params = {'binary_search_steps': 1,
yname: adv_ys,
'max_iterations': attack_iterations,
'learning_rate': CW_LEARNING_RATE,
'batch_size': cw_params_batch_size,
'initial_const': 10}
adv = cw.generate_np(adv_inputs,
**cw_params)
eval_params = {'batch_size': np.minimum(nb_classes, source_samples)}
if targeted:
adv_accuracy = model_eval(
sess, x, y, preds, adv, adv_ys, args=eval_params)
else:
if viz_enabled:
err = model_eval(sess, x, y, preds, adv, y_test[idxs], args=eval_params)
adv_accuracy = 1 - err
else:
err = model_eval(sess, x, y, preds, adv, y_test[:source_samples],
args=eval_params)
adv_accuracy = 1 - err
if viz_enabled:
for j in range(nb_classes):
if targeted:
for i in range(nb_classes):
grid_viz_data[i, j] = adv[i * nb_classes + j]
else:
grid_viz_data[j, 0] = adv_inputs[j]
grid_viz_data[j, 1] = adv[j]
print(grid_viz_data.shape)
print('--------------------------------------')
# Compute the number of adversarial examples that were successfully found
print('Avg. rate of successful adv. examples {0:.4f}'.format(adv_accuracy))
report.clean_train_adv_eval = 1. - adv_accuracy
# Compute the average distortion introduced by the algorithm
percent_perturbed = np.mean(np.sum((adv - adv_inputs)**2,
axis=(1, 2, 3))**.5)
print('Avg. L_2 norm of perturbations {0:.4f}'.format(percent_perturbed))
# Close TF session
sess.close()
# Finally, block & display a grid of all the adversarial examples
if viz_enabled:
_ = grid_visual(grid_viz_data)
return report
|
python
|
def mnist_tutorial_cw(train_start=0, train_end=60000, test_start=0,
test_end=10000, viz_enabled=VIZ_ENABLED,
nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE,
source_samples=SOURCE_SAMPLES,
learning_rate=LEARNING_RATE,
attack_iterations=ATTACK_ITERATIONS,
model_path=MODEL_PATH,
targeted=TARGETED):
"""
MNIST tutorial for Carlini and Wagner's attack
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param viz_enabled: (boolean) activate plots of adversarial examples
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param nb_classes: number of output classes
:param source_samples: number of test inputs to attack
:param learning_rate: learning rate for training
:param model_path: path to the model file
:param targeted: should we run a targeted attack? or untargeted?
:return: an AccuracyReport object
"""
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Create TF session
sess = tf.Session()
print("Created TensorFlow session.")
set_log_level(logging.DEBUG)
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
x_train, y_train = mnist.get_set('train')
x_test, y_test = mnist.get_set('test')
# Obtain Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
nb_filters = 64
# Define TF model graph
model = ModelBasicCNN('model1', nb_classes, nb_filters)
preds = model.get_logits(x)
loss = CrossEntropy(model, smoothing=0.1)
print("Defined TensorFlow model graph.")
###########################################################################
# Training the model using TensorFlow
###########################################################################
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate,
'filename': os.path.split(model_path)[-1]
}
rng = np.random.RandomState([2017, 8, 30])
# check if we've trained before, and if we have, use that pre-trained model
if os.path.exists(model_path + ".meta"):
tf_model_load(sess, model_path)
else:
train(sess, loss, x_train, y_train, args=train_params, rng=rng)
saver = tf.train.Saver()
saver.save(sess, model_path)
# Evaluate the accuracy of the MNIST model on legitimate test examples
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, preds, x_test, y_test, args=eval_params)
assert x_test.shape[0] == test_end - test_start, x_test.shape
print('Test accuracy on legitimate test examples: {0}'.format(accuracy))
report.clean_train_clean_eval = accuracy
###########################################################################
# Craft adversarial examples using Carlini and Wagner's approach
###########################################################################
nb_adv_per_sample = str(nb_classes - 1) if targeted else '1'
print('Crafting ' + str(source_samples) + ' * ' + nb_adv_per_sample +
' adversarial examples')
print("This could take some time ...")
# Instantiate a CW attack object
cw = CarliniWagnerL2(model, sess=sess)
if viz_enabled:
assert source_samples == nb_classes
idxs = [np.where(np.argmax(y_test, axis=1) == i)[0][0]
for i in range(nb_classes)]
if targeted:
if viz_enabled:
# Initialize our array for grid visualization
grid_shape = (nb_classes, nb_classes, img_rows, img_cols,
nchannels)
grid_viz_data = np.zeros(grid_shape, dtype='f')
adv_inputs = np.array(
[[instance] * nb_classes for instance in x_test[idxs]],
dtype=np.float32)
else:
adv_inputs = np.array(
[[instance] * nb_classes for
instance in x_test[:source_samples]], dtype=np.float32)
one_hot = np.zeros((nb_classes, nb_classes))
one_hot[np.arange(nb_classes), np.arange(nb_classes)] = 1
adv_inputs = adv_inputs.reshape(
(source_samples * nb_classes, img_rows, img_cols, nchannels))
adv_ys = np.array([one_hot] * source_samples,
dtype=np.float32).reshape((source_samples *
nb_classes, nb_classes))
yname = "y_target"
else:
if viz_enabled:
# Initialize our array for grid visualization
grid_shape = (nb_classes, 2, img_rows, img_cols, nchannels)
grid_viz_data = np.zeros(grid_shape, dtype='f')
adv_inputs = x_test[idxs]
else:
adv_inputs = x_test[:source_samples]
adv_ys = None
yname = "y"
if targeted:
cw_params_batch_size = source_samples * nb_classes
else:
cw_params_batch_size = source_samples
cw_params = {'binary_search_steps': 1,
yname: adv_ys,
'max_iterations': attack_iterations,
'learning_rate': CW_LEARNING_RATE,
'batch_size': cw_params_batch_size,
'initial_const': 10}
adv = cw.generate_np(adv_inputs,
**cw_params)
eval_params = {'batch_size': np.minimum(nb_classes, source_samples)}
if targeted:
adv_accuracy = model_eval(
sess, x, y, preds, adv, adv_ys, args=eval_params)
else:
if viz_enabled:
err = model_eval(sess, x, y, preds, adv, y_test[idxs], args=eval_params)
adv_accuracy = 1 - err
else:
err = model_eval(sess, x, y, preds, adv, y_test[:source_samples],
args=eval_params)
adv_accuracy = 1 - err
if viz_enabled:
for j in range(nb_classes):
if targeted:
for i in range(nb_classes):
grid_viz_data[i, j] = adv[i * nb_classes + j]
else:
grid_viz_data[j, 0] = adv_inputs[j]
grid_viz_data[j, 1] = adv[j]
print(grid_viz_data.shape)
print('--------------------------------------')
# Compute the number of adversarial examples that were successfully found
print('Avg. rate of successful adv. examples {0:.4f}'.format(adv_accuracy))
report.clean_train_adv_eval = 1. - adv_accuracy
# Compute the average distortion introduced by the algorithm
percent_perturbed = np.mean(np.sum((adv - adv_inputs)**2,
axis=(1, 2, 3))**.5)
print('Avg. L_2 norm of perturbations {0:.4f}'.format(percent_perturbed))
# Close TF session
sess.close()
# Finally, block & display a grid of all the adversarial examples
if viz_enabled:
_ = grid_visual(grid_viz_data)
return report
|
[
"def",
"mnist_tutorial_cw",
"(",
"train_start",
"=",
"0",
",",
"train_end",
"=",
"60000",
",",
"test_start",
"=",
"0",
",",
"test_end",
"=",
"10000",
",",
"viz_enabled",
"=",
"VIZ_ENABLED",
",",
"nb_epochs",
"=",
"NB_EPOCHS",
",",
"batch_size",
"=",
"BATCH_SIZE",
",",
"source_samples",
"=",
"SOURCE_SAMPLES",
",",
"learning_rate",
"=",
"LEARNING_RATE",
",",
"attack_iterations",
"=",
"ATTACK_ITERATIONS",
",",
"model_path",
"=",
"MODEL_PATH",
",",
"targeted",
"=",
"TARGETED",
")",
":",
"# Object used to keep track of (and return) key accuracies",
"report",
"=",
"AccuracyReport",
"(",
")",
"# Set TF random seed to improve reproducibility",
"tf",
".",
"set_random_seed",
"(",
"1234",
")",
"# Create TF session",
"sess",
"=",
"tf",
".",
"Session",
"(",
")",
"print",
"(",
"\"Created TensorFlow session.\"",
")",
"set_log_level",
"(",
"logging",
".",
"DEBUG",
")",
"# Get MNIST test data",
"mnist",
"=",
"MNIST",
"(",
"train_start",
"=",
"train_start",
",",
"train_end",
"=",
"train_end",
",",
"test_start",
"=",
"test_start",
",",
"test_end",
"=",
"test_end",
")",
"x_train",
",",
"y_train",
"=",
"mnist",
".",
"get_set",
"(",
"'train'",
")",
"x_test",
",",
"y_test",
"=",
"mnist",
".",
"get_set",
"(",
"'test'",
")",
"# Obtain Image Parameters",
"img_rows",
",",
"img_cols",
",",
"nchannels",
"=",
"x_train",
".",
"shape",
"[",
"1",
":",
"4",
"]",
"nb_classes",
"=",
"y_train",
".",
"shape",
"[",
"1",
"]",
"# Define input TF placeholder",
"x",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"shape",
"=",
"(",
"None",
",",
"img_rows",
",",
"img_cols",
",",
"nchannels",
")",
")",
"y",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"shape",
"=",
"(",
"None",
",",
"nb_classes",
")",
")",
"nb_filters",
"=",
"64",
"# Define TF model graph",
"model",
"=",
"ModelBasicCNN",
"(",
"'model1'",
",",
"nb_classes",
",",
"nb_filters",
")",
"preds",
"=",
"model",
".",
"get_logits",
"(",
"x",
")",
"loss",
"=",
"CrossEntropy",
"(",
"model",
",",
"smoothing",
"=",
"0.1",
")",
"print",
"(",
"\"Defined TensorFlow model graph.\"",
")",
"###########################################################################",
"# Training the model using TensorFlow",
"###########################################################################",
"# Train an MNIST model",
"train_params",
"=",
"{",
"'nb_epochs'",
":",
"nb_epochs",
",",
"'batch_size'",
":",
"batch_size",
",",
"'learning_rate'",
":",
"learning_rate",
",",
"'filename'",
":",
"os",
".",
"path",
".",
"split",
"(",
"model_path",
")",
"[",
"-",
"1",
"]",
"}",
"rng",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"[",
"2017",
",",
"8",
",",
"30",
"]",
")",
"# check if we've trained before, and if we have, use that pre-trained model",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"model_path",
"+",
"\".meta\"",
")",
":",
"tf_model_load",
"(",
"sess",
",",
"model_path",
")",
"else",
":",
"train",
"(",
"sess",
",",
"loss",
",",
"x_train",
",",
"y_train",
",",
"args",
"=",
"train_params",
",",
"rng",
"=",
"rng",
")",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
")",
"saver",
".",
"save",
"(",
"sess",
",",
"model_path",
")",
"# Evaluate the accuracy of the MNIST model on legitimate test examples",
"eval_params",
"=",
"{",
"'batch_size'",
":",
"batch_size",
"}",
"accuracy",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds",
",",
"x_test",
",",
"y_test",
",",
"args",
"=",
"eval_params",
")",
"assert",
"x_test",
".",
"shape",
"[",
"0",
"]",
"==",
"test_end",
"-",
"test_start",
",",
"x_test",
".",
"shape",
"print",
"(",
"'Test accuracy on legitimate test examples: {0}'",
".",
"format",
"(",
"accuracy",
")",
")",
"report",
".",
"clean_train_clean_eval",
"=",
"accuracy",
"###########################################################################",
"# Craft adversarial examples using Carlini and Wagner's approach",
"###########################################################################",
"nb_adv_per_sample",
"=",
"str",
"(",
"nb_classes",
"-",
"1",
")",
"if",
"targeted",
"else",
"'1'",
"print",
"(",
"'Crafting '",
"+",
"str",
"(",
"source_samples",
")",
"+",
"' * '",
"+",
"nb_adv_per_sample",
"+",
"' adversarial examples'",
")",
"print",
"(",
"\"This could take some time ...\"",
")",
"# Instantiate a CW attack object",
"cw",
"=",
"CarliniWagnerL2",
"(",
"model",
",",
"sess",
"=",
"sess",
")",
"if",
"viz_enabled",
":",
"assert",
"source_samples",
"==",
"nb_classes",
"idxs",
"=",
"[",
"np",
".",
"where",
"(",
"np",
".",
"argmax",
"(",
"y_test",
",",
"axis",
"=",
"1",
")",
"==",
"i",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"nb_classes",
")",
"]",
"if",
"targeted",
":",
"if",
"viz_enabled",
":",
"# Initialize our array for grid visualization",
"grid_shape",
"=",
"(",
"nb_classes",
",",
"nb_classes",
",",
"img_rows",
",",
"img_cols",
",",
"nchannels",
")",
"grid_viz_data",
"=",
"np",
".",
"zeros",
"(",
"grid_shape",
",",
"dtype",
"=",
"'f'",
")",
"adv_inputs",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"instance",
"]",
"*",
"nb_classes",
"for",
"instance",
"in",
"x_test",
"[",
"idxs",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"else",
":",
"adv_inputs",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"instance",
"]",
"*",
"nb_classes",
"for",
"instance",
"in",
"x_test",
"[",
":",
"source_samples",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"one_hot",
"=",
"np",
".",
"zeros",
"(",
"(",
"nb_classes",
",",
"nb_classes",
")",
")",
"one_hot",
"[",
"np",
".",
"arange",
"(",
"nb_classes",
")",
",",
"np",
".",
"arange",
"(",
"nb_classes",
")",
"]",
"=",
"1",
"adv_inputs",
"=",
"adv_inputs",
".",
"reshape",
"(",
"(",
"source_samples",
"*",
"nb_classes",
",",
"img_rows",
",",
"img_cols",
",",
"nchannels",
")",
")",
"adv_ys",
"=",
"np",
".",
"array",
"(",
"[",
"one_hot",
"]",
"*",
"source_samples",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
".",
"reshape",
"(",
"(",
"source_samples",
"*",
"nb_classes",
",",
"nb_classes",
")",
")",
"yname",
"=",
"\"y_target\"",
"else",
":",
"if",
"viz_enabled",
":",
"# Initialize our array for grid visualization",
"grid_shape",
"=",
"(",
"nb_classes",
",",
"2",
",",
"img_rows",
",",
"img_cols",
",",
"nchannels",
")",
"grid_viz_data",
"=",
"np",
".",
"zeros",
"(",
"grid_shape",
",",
"dtype",
"=",
"'f'",
")",
"adv_inputs",
"=",
"x_test",
"[",
"idxs",
"]",
"else",
":",
"adv_inputs",
"=",
"x_test",
"[",
":",
"source_samples",
"]",
"adv_ys",
"=",
"None",
"yname",
"=",
"\"y\"",
"if",
"targeted",
":",
"cw_params_batch_size",
"=",
"source_samples",
"*",
"nb_classes",
"else",
":",
"cw_params_batch_size",
"=",
"source_samples",
"cw_params",
"=",
"{",
"'binary_search_steps'",
":",
"1",
",",
"yname",
":",
"adv_ys",
",",
"'max_iterations'",
":",
"attack_iterations",
",",
"'learning_rate'",
":",
"CW_LEARNING_RATE",
",",
"'batch_size'",
":",
"cw_params_batch_size",
",",
"'initial_const'",
":",
"10",
"}",
"adv",
"=",
"cw",
".",
"generate_np",
"(",
"adv_inputs",
",",
"*",
"*",
"cw_params",
")",
"eval_params",
"=",
"{",
"'batch_size'",
":",
"np",
".",
"minimum",
"(",
"nb_classes",
",",
"source_samples",
")",
"}",
"if",
"targeted",
":",
"adv_accuracy",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds",
",",
"adv",
",",
"adv_ys",
",",
"args",
"=",
"eval_params",
")",
"else",
":",
"if",
"viz_enabled",
":",
"err",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds",
",",
"adv",
",",
"y_test",
"[",
"idxs",
"]",
",",
"args",
"=",
"eval_params",
")",
"adv_accuracy",
"=",
"1",
"-",
"err",
"else",
":",
"err",
"=",
"model_eval",
"(",
"sess",
",",
"x",
",",
"y",
",",
"preds",
",",
"adv",
",",
"y_test",
"[",
":",
"source_samples",
"]",
",",
"args",
"=",
"eval_params",
")",
"adv_accuracy",
"=",
"1",
"-",
"err",
"if",
"viz_enabled",
":",
"for",
"j",
"in",
"range",
"(",
"nb_classes",
")",
":",
"if",
"targeted",
":",
"for",
"i",
"in",
"range",
"(",
"nb_classes",
")",
":",
"grid_viz_data",
"[",
"i",
",",
"j",
"]",
"=",
"adv",
"[",
"i",
"*",
"nb_classes",
"+",
"j",
"]",
"else",
":",
"grid_viz_data",
"[",
"j",
",",
"0",
"]",
"=",
"adv_inputs",
"[",
"j",
"]",
"grid_viz_data",
"[",
"j",
",",
"1",
"]",
"=",
"adv",
"[",
"j",
"]",
"print",
"(",
"grid_viz_data",
".",
"shape",
")",
"print",
"(",
"'--------------------------------------'",
")",
"# Compute the number of adversarial examples that were successfully found",
"print",
"(",
"'Avg. rate of successful adv. examples {0:.4f}'",
".",
"format",
"(",
"adv_accuracy",
")",
")",
"report",
".",
"clean_train_adv_eval",
"=",
"1.",
"-",
"adv_accuracy",
"# Compute the average distortion introduced by the algorithm",
"percent_perturbed",
"=",
"np",
".",
"mean",
"(",
"np",
".",
"sum",
"(",
"(",
"adv",
"-",
"adv_inputs",
")",
"**",
"2",
",",
"axis",
"=",
"(",
"1",
",",
"2",
",",
"3",
")",
")",
"**",
".5",
")",
"print",
"(",
"'Avg. L_2 norm of perturbations {0:.4f}'",
".",
"format",
"(",
"percent_perturbed",
")",
")",
"# Close TF session",
"sess",
".",
"close",
"(",
")",
"# Finally, block & display a grid of all the adversarial examples",
"if",
"viz_enabled",
":",
"_",
"=",
"grid_visual",
"(",
"grid_viz_data",
")",
"return",
"report"
] |
MNIST tutorial for Carlini and Wagner's attack
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param viz_enabled: (boolean) activate plots of adversarial examples
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param nb_classes: number of output classes
:param source_samples: number of test inputs to attack
:param learning_rate: learning rate for training
:param model_path: path to the model file
:param targeted: should we run a targeted attack? or untargeted?
:return: an AccuracyReport object
|
[
"MNIST",
"tutorial",
"for",
"Carlini",
"and",
"Wagner",
"s",
"attack",
":",
"param",
"train_start",
":",
"index",
"of",
"first",
"training",
"set",
"example",
":",
"param",
"train_end",
":",
"index",
"of",
"last",
"training",
"set",
"example",
":",
"param",
"test_start",
":",
"index",
"of",
"first",
"test",
"set",
"example",
":",
"param",
"test_end",
":",
"index",
"of",
"last",
"test",
"set",
"example",
":",
"param",
"viz_enabled",
":",
"(",
"boolean",
")",
"activate",
"plots",
"of",
"adversarial",
"examples",
":",
"param",
"nb_epochs",
":",
"number",
"of",
"epochs",
"to",
"train",
"model",
":",
"param",
"batch_size",
":",
"size",
"of",
"training",
"batches",
":",
"param",
"nb_classes",
":",
"number",
"of",
"output",
"classes",
":",
"param",
"source_samples",
":",
"number",
"of",
"test",
"inputs",
"to",
"attack",
":",
"param",
"learning_rate",
":",
"learning",
"rate",
"for",
"training",
":",
"param",
"model_path",
":",
"path",
"to",
"the",
"model",
"file",
":",
"param",
"targeted",
":",
"should",
"we",
"run",
"a",
"targeted",
"attack?",
"or",
"untargeted?",
":",
"return",
":",
"an",
"AccuracyReport",
"object"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans_tutorials/mnist_tutorial_cw.py#L41-L235
|
train
|
tensorflow/cleverhans
|
cleverhans_tutorials/mnist_tutorial_tfe.py
|
attack_selection
|
def attack_selection(attack_string):
"""
Selects the Attack Class using string input.
:param attack_string: adversarial attack name in string format
:return: attack class defined in cleverhans.attacks_eager
"""
# List of Implemented attacks
attacks_list = AVAILABLE_ATTACKS.keys()
# Checking for requested attack in list of available attacks.
if attack_string is None:
raise AttributeError("Attack type is not specified, "
"list of available attacks\t".join(attacks_list))
if attack_string not in attacks_list:
raise AttributeError("Attack not available "
"list of available attacks\t".join(attacks_list))
# Mapping attack from string to class.
attack_class = AVAILABLE_ATTACKS[attack_string]
return attack_class
|
python
|
def attack_selection(attack_string):
"""
Selects the Attack Class using string input.
:param attack_string: adversarial attack name in string format
:return: attack class defined in cleverhans.attacks_eager
"""
# List of Implemented attacks
attacks_list = AVAILABLE_ATTACKS.keys()
# Checking for requested attack in list of available attacks.
if attack_string is None:
raise AttributeError("Attack type is not specified, "
"list of available attacks\t".join(attacks_list))
if attack_string not in attacks_list:
raise AttributeError("Attack not available "
"list of available attacks\t".join(attacks_list))
# Mapping attack from string to class.
attack_class = AVAILABLE_ATTACKS[attack_string]
return attack_class
|
[
"def",
"attack_selection",
"(",
"attack_string",
")",
":",
"# List of Implemented attacks",
"attacks_list",
"=",
"AVAILABLE_ATTACKS",
".",
"keys",
"(",
")",
"# Checking for requested attack in list of available attacks.",
"if",
"attack_string",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"\"Attack type is not specified, \"",
"\"list of available attacks\\t\"",
".",
"join",
"(",
"attacks_list",
")",
")",
"if",
"attack_string",
"not",
"in",
"attacks_list",
":",
"raise",
"AttributeError",
"(",
"\"Attack not available \"",
"\"list of available attacks\\t\"",
".",
"join",
"(",
"attacks_list",
")",
")",
"# Mapping attack from string to class.",
"attack_class",
"=",
"AVAILABLE_ATTACKS",
"[",
"attack_string",
"]",
"return",
"attack_class"
] |
Selects the Attack Class using string input.
:param attack_string: adversarial attack name in string format
:return: attack class defined in cleverhans.attacks_eager
|
[
"Selects",
"the",
"Attack",
"Class",
"using",
"string",
"input",
".",
":",
"param",
"attack_string",
":",
"adversarial",
"attack",
"name",
"in",
"string",
"format",
":",
"return",
":",
"attack",
"class",
"defined",
"in",
"cleverhans",
".",
"attacks_eager"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans_tutorials/mnist_tutorial_tfe.py#L54-L73
|
train
|
tensorflow/cleverhans
|
cleverhans_tutorials/mnist_tutorial_tfe.py
|
mnist_tutorial
|
def mnist_tutorial(train_start=0, train_end=60000, test_start=0,
test_end=10000, nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE,
learning_rate=LEARNING_RATE,
clean_train=True,
testing=False,
backprop_through_attack=False,
nb_filters=NB_FILTERS, num_threads=None,
attack_string=None):
"""
MNIST cleverhans tutorial
:param train_start: index of first training set example.
:param train_end: index of last training set example.
:param test_start: index of first test set example.
:param test_end: index of last test set example.
:param nb_epochs: number of epochs to train model.
:param batch_size: size of training batches.
:param learning_rate: learning rate for training.
:param clean_train: perform normal training on clean examples only
before performing adversarial training.
:param testing: if true, complete an AccuracyReport for unit tests
to verify that performance is adequate.
:param backprop_through_attack: If True, backprop through adversarial
example construction process during
adversarial training.
:param nb_filters: number of filters in the CNN used for training.
:param num_threads: number of threads used for running the process.
:param attack_string: attack name for crafting adversarial attacks and
adversarial training, in string format.
:return: an AccuracyReport object
"""
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Set logging level to see debug information
set_log_level(logging.DEBUG)
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
X_train, Y_train = mnist.get_set('train')
X_test, Y_test = mnist.get_set('test')
# Use label smoothing
assert Y_train.shape[1] == 10
label_smooth = .1
Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate
}
# Initialize the attack object
attack_class = attack_selection(attack_string)
attack_params = {'eps': 0.3, 'clip_min': 0.,
'clip_max': 1.}
rng = np.random.RandomState([2018, 6, 18])
if clean_train:
model = ModelBasicCNNTFE(nb_filters=nb_filters)
def evaluate_clean():
"""Evaluate the accuracy of the MNIST model on legitimate test
examples
"""
eval_params = {'batch_size': batch_size}
acc = model_eval(model, X_test, Y_test, args=eval_params)
report.clean_train_clean_eval = acc
assert X_test.shape[0] == test_end - test_start, X_test.shape
print('Test accuracy on legitimate examples: %0.4f' % acc)
train(model, X_train, Y_train, evaluate=evaluate_clean,
args=train_params, rng=rng, var_list=model.get_params())
if testing:
# Calculate training error
eval_params = {'batch_size': batch_size}
acc = model_eval(model, X_train, Y_train, args=eval_params)
report.train_clean_train_clean_eval = acc
# Evaluate the accuracy of the MNIST model on adversarial examples
eval_par = {'batch_size': batch_size}
attack = attack_class(model)
acc = model_eval(
model, X_test, Y_test, args=eval_par,
attack=attack, attack_args=attack_params)
print('Test accuracy on adversarial examples: %0.4f\n' % acc)
report.clean_train_adv_eval = acc
# Calculate training error
if testing:
eval_par = {'batch_size': batch_size}
acc = model_eval(
model, X_train, Y_train, args=eval_par,
attack=attack, attack_args=attack_params)
print('Train accuracy on adversarial examples: %0.4f\n' % acc)
report.train_clean_train_adv_eval = acc
attack = None
print("Repeating the process, using adversarial training")
model_adv_train = ModelBasicCNNTFE(nb_filters=nb_filters)
attack = attack_class(model_adv_train)
def evaluate_adv():
# Accuracy of adversarially trained model on legitimate test inputs
eval_params = {'batch_size': batch_size}
accuracy = model_eval(
model_adv_train, X_test, Y_test,
args=eval_params)
print('Test accuracy on legitimate examples: %0.4f' % accuracy)
report.adv_train_clean_eval = accuracy
# Accuracy of the adversarially trained model on adversarial examples
accuracy = model_eval(
model_adv_train, X_test, Y_test,
args=eval_params, attack=attack,
attack_args=attack_params)
print('Test accuracy on adversarial examples: %0.4f' % accuracy)
report.adv_train_adv_eval = accuracy
# Perform and evaluate adversarial training
train(model_adv_train, X_train, Y_train, evaluate=evaluate_adv,
args=train_params, rng=rng,
var_list=model_adv_train.get_params(),
attack=attack, attack_args=attack_params)
# Calculate training errors
if testing:
eval_params = {'batch_size': batch_size}
accuracy = model_eval(
model_adv_train, X_train, Y_train, args=eval_params,
attack=None, attack_args=None)
report.train_adv_train_clean_eval = accuracy
accuracy = model_eval(
model_adv_train, X_train, Y_train, args=eval_params,
attack=attack, attack_args=attack_params)
report.train_adv_train_adv_eval = accuracy
return report
|
python
|
def mnist_tutorial(train_start=0, train_end=60000, test_start=0,
test_end=10000, nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE,
learning_rate=LEARNING_RATE,
clean_train=True,
testing=False,
backprop_through_attack=False,
nb_filters=NB_FILTERS, num_threads=None,
attack_string=None):
"""
MNIST cleverhans tutorial
:param train_start: index of first training set example.
:param train_end: index of last training set example.
:param test_start: index of first test set example.
:param test_end: index of last test set example.
:param nb_epochs: number of epochs to train model.
:param batch_size: size of training batches.
:param learning_rate: learning rate for training.
:param clean_train: perform normal training on clean examples only
before performing adversarial training.
:param testing: if true, complete an AccuracyReport for unit tests
to verify that performance is adequate.
:param backprop_through_attack: If True, backprop through adversarial
example construction process during
adversarial training.
:param nb_filters: number of filters in the CNN used for training.
:param num_threads: number of threads used for running the process.
:param attack_string: attack name for crafting adversarial attacks and
adversarial training, in string format.
:return: an AccuracyReport object
"""
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Set logging level to see debug information
set_log_level(logging.DEBUG)
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
X_train, Y_train = mnist.get_set('train')
X_test, Y_test = mnist.get_set('test')
# Use label smoothing
assert Y_train.shape[1] == 10
label_smooth = .1
Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate
}
# Initialize the attack object
attack_class = attack_selection(attack_string)
attack_params = {'eps': 0.3, 'clip_min': 0.,
'clip_max': 1.}
rng = np.random.RandomState([2018, 6, 18])
if clean_train:
model = ModelBasicCNNTFE(nb_filters=nb_filters)
def evaluate_clean():
"""Evaluate the accuracy of the MNIST model on legitimate test
examples
"""
eval_params = {'batch_size': batch_size}
acc = model_eval(model, X_test, Y_test, args=eval_params)
report.clean_train_clean_eval = acc
assert X_test.shape[0] == test_end - test_start, X_test.shape
print('Test accuracy on legitimate examples: %0.4f' % acc)
train(model, X_train, Y_train, evaluate=evaluate_clean,
args=train_params, rng=rng, var_list=model.get_params())
if testing:
# Calculate training error
eval_params = {'batch_size': batch_size}
acc = model_eval(model, X_train, Y_train, args=eval_params)
report.train_clean_train_clean_eval = acc
# Evaluate the accuracy of the MNIST model on adversarial examples
eval_par = {'batch_size': batch_size}
attack = attack_class(model)
acc = model_eval(
model, X_test, Y_test, args=eval_par,
attack=attack, attack_args=attack_params)
print('Test accuracy on adversarial examples: %0.4f\n' % acc)
report.clean_train_adv_eval = acc
# Calculate training error
if testing:
eval_par = {'batch_size': batch_size}
acc = model_eval(
model, X_train, Y_train, args=eval_par,
attack=attack, attack_args=attack_params)
print('Train accuracy on adversarial examples: %0.4f\n' % acc)
report.train_clean_train_adv_eval = acc
attack = None
print("Repeating the process, using adversarial training")
model_adv_train = ModelBasicCNNTFE(nb_filters=nb_filters)
attack = attack_class(model_adv_train)
def evaluate_adv():
# Accuracy of adversarially trained model on legitimate test inputs
eval_params = {'batch_size': batch_size}
accuracy = model_eval(
model_adv_train, X_test, Y_test,
args=eval_params)
print('Test accuracy on legitimate examples: %0.4f' % accuracy)
report.adv_train_clean_eval = accuracy
# Accuracy of the adversarially trained model on adversarial examples
accuracy = model_eval(
model_adv_train, X_test, Y_test,
args=eval_params, attack=attack,
attack_args=attack_params)
print('Test accuracy on adversarial examples: %0.4f' % accuracy)
report.adv_train_adv_eval = accuracy
# Perform and evaluate adversarial training
train(model_adv_train, X_train, Y_train, evaluate=evaluate_adv,
args=train_params, rng=rng,
var_list=model_adv_train.get_params(),
attack=attack, attack_args=attack_params)
# Calculate training errors
if testing:
eval_params = {'batch_size': batch_size}
accuracy = model_eval(
model_adv_train, X_train, Y_train, args=eval_params,
attack=None, attack_args=None)
report.train_adv_train_clean_eval = accuracy
accuracy = model_eval(
model_adv_train, X_train, Y_train, args=eval_params,
attack=attack, attack_args=attack_params)
report.train_adv_train_adv_eval = accuracy
return report
|
[
"def",
"mnist_tutorial",
"(",
"train_start",
"=",
"0",
",",
"train_end",
"=",
"60000",
",",
"test_start",
"=",
"0",
",",
"test_end",
"=",
"10000",
",",
"nb_epochs",
"=",
"NB_EPOCHS",
",",
"batch_size",
"=",
"BATCH_SIZE",
",",
"learning_rate",
"=",
"LEARNING_RATE",
",",
"clean_train",
"=",
"True",
",",
"testing",
"=",
"False",
",",
"backprop_through_attack",
"=",
"False",
",",
"nb_filters",
"=",
"NB_FILTERS",
",",
"num_threads",
"=",
"None",
",",
"attack_string",
"=",
"None",
")",
":",
"# Object used to keep track of (and return) key accuracies",
"report",
"=",
"AccuracyReport",
"(",
")",
"# Set TF random seed to improve reproducibility",
"tf",
".",
"set_random_seed",
"(",
"1234",
")",
"# Set logging level to see debug information",
"set_log_level",
"(",
"logging",
".",
"DEBUG",
")",
"# Get MNIST test data",
"mnist",
"=",
"MNIST",
"(",
"train_start",
"=",
"train_start",
",",
"train_end",
"=",
"train_end",
",",
"test_start",
"=",
"test_start",
",",
"test_end",
"=",
"test_end",
")",
"X_train",
",",
"Y_train",
"=",
"mnist",
".",
"get_set",
"(",
"'train'",
")",
"X_test",
",",
"Y_test",
"=",
"mnist",
".",
"get_set",
"(",
"'test'",
")",
"# Use label smoothing",
"assert",
"Y_train",
".",
"shape",
"[",
"1",
"]",
"==",
"10",
"label_smooth",
"=",
".1",
"Y_train",
"=",
"Y_train",
".",
"clip",
"(",
"label_smooth",
"/",
"9.",
",",
"1.",
"-",
"label_smooth",
")",
"# Train an MNIST model",
"train_params",
"=",
"{",
"'nb_epochs'",
":",
"nb_epochs",
",",
"'batch_size'",
":",
"batch_size",
",",
"'learning_rate'",
":",
"learning_rate",
"}",
"# Initialize the attack object",
"attack_class",
"=",
"attack_selection",
"(",
"attack_string",
")",
"attack_params",
"=",
"{",
"'eps'",
":",
"0.3",
",",
"'clip_min'",
":",
"0.",
",",
"'clip_max'",
":",
"1.",
"}",
"rng",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"[",
"2018",
",",
"6",
",",
"18",
"]",
")",
"if",
"clean_train",
":",
"model",
"=",
"ModelBasicCNNTFE",
"(",
"nb_filters",
"=",
"nb_filters",
")",
"def",
"evaluate_clean",
"(",
")",
":",
"\"\"\"Evaluate the accuracy of the MNIST model on legitimate test\n examples\n \"\"\"",
"eval_params",
"=",
"{",
"'batch_size'",
":",
"batch_size",
"}",
"acc",
"=",
"model_eval",
"(",
"model",
",",
"X_test",
",",
"Y_test",
",",
"args",
"=",
"eval_params",
")",
"report",
".",
"clean_train_clean_eval",
"=",
"acc",
"assert",
"X_test",
".",
"shape",
"[",
"0",
"]",
"==",
"test_end",
"-",
"test_start",
",",
"X_test",
".",
"shape",
"print",
"(",
"'Test accuracy on legitimate examples: %0.4f'",
"%",
"acc",
")",
"train",
"(",
"model",
",",
"X_train",
",",
"Y_train",
",",
"evaluate",
"=",
"evaluate_clean",
",",
"args",
"=",
"train_params",
",",
"rng",
"=",
"rng",
",",
"var_list",
"=",
"model",
".",
"get_params",
"(",
")",
")",
"if",
"testing",
":",
"# Calculate training error",
"eval_params",
"=",
"{",
"'batch_size'",
":",
"batch_size",
"}",
"acc",
"=",
"model_eval",
"(",
"model",
",",
"X_train",
",",
"Y_train",
",",
"args",
"=",
"eval_params",
")",
"report",
".",
"train_clean_train_clean_eval",
"=",
"acc",
"# Evaluate the accuracy of the MNIST model on adversarial examples",
"eval_par",
"=",
"{",
"'batch_size'",
":",
"batch_size",
"}",
"attack",
"=",
"attack_class",
"(",
"model",
")",
"acc",
"=",
"model_eval",
"(",
"model",
",",
"X_test",
",",
"Y_test",
",",
"args",
"=",
"eval_par",
",",
"attack",
"=",
"attack",
",",
"attack_args",
"=",
"attack_params",
")",
"print",
"(",
"'Test accuracy on adversarial examples: %0.4f\\n'",
"%",
"acc",
")",
"report",
".",
"clean_train_adv_eval",
"=",
"acc",
"# Calculate training error",
"if",
"testing",
":",
"eval_par",
"=",
"{",
"'batch_size'",
":",
"batch_size",
"}",
"acc",
"=",
"model_eval",
"(",
"model",
",",
"X_train",
",",
"Y_train",
",",
"args",
"=",
"eval_par",
",",
"attack",
"=",
"attack",
",",
"attack_args",
"=",
"attack_params",
")",
"print",
"(",
"'Train accuracy on adversarial examples: %0.4f\\n'",
"%",
"acc",
")",
"report",
".",
"train_clean_train_adv_eval",
"=",
"acc",
"attack",
"=",
"None",
"print",
"(",
"\"Repeating the process, using adversarial training\"",
")",
"model_adv_train",
"=",
"ModelBasicCNNTFE",
"(",
"nb_filters",
"=",
"nb_filters",
")",
"attack",
"=",
"attack_class",
"(",
"model_adv_train",
")",
"def",
"evaluate_adv",
"(",
")",
":",
"# Accuracy of adversarially trained model on legitimate test inputs",
"eval_params",
"=",
"{",
"'batch_size'",
":",
"batch_size",
"}",
"accuracy",
"=",
"model_eval",
"(",
"model_adv_train",
",",
"X_test",
",",
"Y_test",
",",
"args",
"=",
"eval_params",
")",
"print",
"(",
"'Test accuracy on legitimate examples: %0.4f'",
"%",
"accuracy",
")",
"report",
".",
"adv_train_clean_eval",
"=",
"accuracy",
"# Accuracy of the adversarially trained model on adversarial examples",
"accuracy",
"=",
"model_eval",
"(",
"model_adv_train",
",",
"X_test",
",",
"Y_test",
",",
"args",
"=",
"eval_params",
",",
"attack",
"=",
"attack",
",",
"attack_args",
"=",
"attack_params",
")",
"print",
"(",
"'Test accuracy on adversarial examples: %0.4f'",
"%",
"accuracy",
")",
"report",
".",
"adv_train_adv_eval",
"=",
"accuracy",
"# Perform and evaluate adversarial training",
"train",
"(",
"model_adv_train",
",",
"X_train",
",",
"Y_train",
",",
"evaluate",
"=",
"evaluate_adv",
",",
"args",
"=",
"train_params",
",",
"rng",
"=",
"rng",
",",
"var_list",
"=",
"model_adv_train",
".",
"get_params",
"(",
")",
",",
"attack",
"=",
"attack",
",",
"attack_args",
"=",
"attack_params",
")",
"# Calculate training errors",
"if",
"testing",
":",
"eval_params",
"=",
"{",
"'batch_size'",
":",
"batch_size",
"}",
"accuracy",
"=",
"model_eval",
"(",
"model_adv_train",
",",
"X_train",
",",
"Y_train",
",",
"args",
"=",
"eval_params",
",",
"attack",
"=",
"None",
",",
"attack_args",
"=",
"None",
")",
"report",
".",
"train_adv_train_clean_eval",
"=",
"accuracy",
"accuracy",
"=",
"model_eval",
"(",
"model_adv_train",
",",
"X_train",
",",
"Y_train",
",",
"args",
"=",
"eval_params",
",",
"attack",
"=",
"attack",
",",
"attack_args",
"=",
"attack_params",
")",
"report",
".",
"train_adv_train_adv_eval",
"=",
"accuracy",
"return",
"report"
] |
MNIST cleverhans tutorial
:param train_start: index of first training set example.
:param train_end: index of last training set example.
:param test_start: index of first test set example.
:param test_end: index of last test set example.
:param nb_epochs: number of epochs to train model.
:param batch_size: size of training batches.
:param learning_rate: learning rate for training.
:param clean_train: perform normal training on clean examples only
before performing adversarial training.
:param testing: if true, complete an AccuracyReport for unit tests
to verify that performance is adequate.
:param backprop_through_attack: If True, backprop through adversarial
example construction process during
adversarial training.
:param nb_filters: number of filters in the CNN used for training.
:param num_threads: number of threads used for running the process.
:param attack_string: attack name for crafting adversarial attacks and
adversarial training, in string format.
:return: an AccuracyReport object
|
[
"MNIST",
"cleverhans",
"tutorial",
":",
"param",
"train_start",
":",
"index",
"of",
"first",
"training",
"set",
"example",
".",
":",
"param",
"train_end",
":",
"index",
"of",
"last",
"training",
"set",
"example",
".",
":",
"param",
"test_start",
":",
"index",
"of",
"first",
"test",
"set",
"example",
".",
":",
"param",
"test_end",
":",
"index",
"of",
"last",
"test",
"set",
"example",
".",
":",
"param",
"nb_epochs",
":",
"number",
"of",
"epochs",
"to",
"train",
"model",
".",
":",
"param",
"batch_size",
":",
"size",
"of",
"training",
"batches",
".",
":",
"param",
"learning_rate",
":",
"learning",
"rate",
"for",
"training",
".",
":",
"param",
"clean_train",
":",
"perform",
"normal",
"training",
"on",
"clean",
"examples",
"only",
"before",
"performing",
"adversarial",
"training",
".",
":",
"param",
"testing",
":",
"if",
"true",
"complete",
"an",
"AccuracyReport",
"for",
"unit",
"tests",
"to",
"verify",
"that",
"performance",
"is",
"adequate",
".",
":",
"param",
"backprop_through_attack",
":",
"If",
"True",
"backprop",
"through",
"adversarial",
"example",
"construction",
"process",
"during",
"adversarial",
"training",
".",
":",
"param",
"nb_filters",
":",
"number",
"of",
"filters",
"in",
"the",
"CNN",
"used",
"for",
"training",
".",
":",
"param",
"num_threads",
":",
"number",
"of",
"threads",
"used",
"for",
"running",
"the",
"process",
".",
":",
"param",
"attack_string",
":",
"attack",
"name",
"for",
"crafting",
"adversarial",
"attacks",
"and",
"adversarial",
"training",
"in",
"string",
"format",
".",
":",
"return",
":",
"an",
"AccuracyReport",
"object"
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans_tutorials/mnist_tutorial_tfe.py#L76-L219
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/worker.py
|
sudo_remove_dirtree
|
def sudo_remove_dirtree(dir_name):
"""Removes directory tree as a superuser.
Args:
dir_name: name of the directory to remove.
This function is necessary to cleanup directories created from inside a
Docker, since they usually written as a root, thus have to be removed as a
root.
"""
try:
subprocess.check_output(['sudo', 'rm', '-rf', dir_name])
except subprocess.CalledProcessError as e:
raise WorkerError('Can''t remove directory {0}'.format(dir_name), e)
|
python
|
def sudo_remove_dirtree(dir_name):
"""Removes directory tree as a superuser.
Args:
dir_name: name of the directory to remove.
This function is necessary to cleanup directories created from inside a
Docker, since they usually written as a root, thus have to be removed as a
root.
"""
try:
subprocess.check_output(['sudo', 'rm', '-rf', dir_name])
except subprocess.CalledProcessError as e:
raise WorkerError('Can''t remove directory {0}'.format(dir_name), e)
|
[
"def",
"sudo_remove_dirtree",
"(",
"dir_name",
")",
":",
"try",
":",
"subprocess",
".",
"check_output",
"(",
"[",
"'sudo'",
",",
"'rm'",
",",
"'-rf'",
",",
"dir_name",
"]",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"e",
":",
"raise",
"WorkerError",
"(",
"'Can'",
"'t remove directory {0}'",
".",
"format",
"(",
"dir_name",
")",
",",
"e",
")"
] |
Removes directory tree as a superuser.
Args:
dir_name: name of the directory to remove.
This function is necessary to cleanup directories created from inside a
Docker, since they usually written as a root, thus have to be removed as a
root.
|
[
"Removes",
"directory",
"tree",
"as",
"a",
"superuser",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/worker.py#L116-L129
|
train
|
tensorflow/cleverhans
|
examples/nips17_adversarial_competition/eval_infra/code/worker.py
|
main
|
def main(args):
"""Main function which runs worker."""
title = '## Starting evaluation of round {0} ##'.format(args.round_name)
logging.info('\n'
+ '#' * len(title) + '\n'
+ '#' * len(title) + '\n'
+ '##' + ' ' * (len(title)-2) + '##' + '\n'
+ title + '\n'
+ '#' * len(title) + '\n'
+ '#' * len(title) + '\n'
+ '##' + ' ' * (len(title)-2) + '##' + '\n')
if args.blacklisted_submissions:
logging.warning('BLACKLISTED SUBMISSIONS: %s',
args.blacklisted_submissions)
random.seed()
logging.info('Running nvidia-docker to ensure that GPU works')
shell_call(['docker', 'run', '--runtime=nvidia',
'--rm', 'nvidia/cuda', 'nvidia-smi'])
eval_worker = EvaluationWorker(
worker_id=args.worker_id,
storage_client=eval_lib.CompetitionStorageClient(
args.project_id, args.storage_bucket),
datastore_client=eval_lib.CompetitionDatastoreClient(
args.project_id, args.round_name),
storage_bucket=args.storage_bucket,
round_name=args.round_name,
dataset_name=args.dataset_name,
blacklisted_submissions=args.blacklisted_submissions,
num_defense_shards=args.num_defense_shards)
eval_worker.run_work()
|
python
|
def main(args):
"""Main function which runs worker."""
title = '## Starting evaluation of round {0} ##'.format(args.round_name)
logging.info('\n'
+ '#' * len(title) + '\n'
+ '#' * len(title) + '\n'
+ '##' + ' ' * (len(title)-2) + '##' + '\n'
+ title + '\n'
+ '#' * len(title) + '\n'
+ '#' * len(title) + '\n'
+ '##' + ' ' * (len(title)-2) + '##' + '\n')
if args.blacklisted_submissions:
logging.warning('BLACKLISTED SUBMISSIONS: %s',
args.blacklisted_submissions)
random.seed()
logging.info('Running nvidia-docker to ensure that GPU works')
shell_call(['docker', 'run', '--runtime=nvidia',
'--rm', 'nvidia/cuda', 'nvidia-smi'])
eval_worker = EvaluationWorker(
worker_id=args.worker_id,
storage_client=eval_lib.CompetitionStorageClient(
args.project_id, args.storage_bucket),
datastore_client=eval_lib.CompetitionDatastoreClient(
args.project_id, args.round_name),
storage_bucket=args.storage_bucket,
round_name=args.round_name,
dataset_name=args.dataset_name,
blacklisted_submissions=args.blacklisted_submissions,
num_defense_shards=args.num_defense_shards)
eval_worker.run_work()
|
[
"def",
"main",
"(",
"args",
")",
":",
"title",
"=",
"'## Starting evaluation of round {0} ##'",
".",
"format",
"(",
"args",
".",
"round_name",
")",
"logging",
".",
"info",
"(",
"'\\n'",
"+",
"'#'",
"*",
"len",
"(",
"title",
")",
"+",
"'\\n'",
"+",
"'#'",
"*",
"len",
"(",
"title",
")",
"+",
"'\\n'",
"+",
"'##'",
"+",
"' '",
"*",
"(",
"len",
"(",
"title",
")",
"-",
"2",
")",
"+",
"'##'",
"+",
"'\\n'",
"+",
"title",
"+",
"'\\n'",
"+",
"'#'",
"*",
"len",
"(",
"title",
")",
"+",
"'\\n'",
"+",
"'#'",
"*",
"len",
"(",
"title",
")",
"+",
"'\\n'",
"+",
"'##'",
"+",
"' '",
"*",
"(",
"len",
"(",
"title",
")",
"-",
"2",
")",
"+",
"'##'",
"+",
"'\\n'",
")",
"if",
"args",
".",
"blacklisted_submissions",
":",
"logging",
".",
"warning",
"(",
"'BLACKLISTED SUBMISSIONS: %s'",
",",
"args",
".",
"blacklisted_submissions",
")",
"random",
".",
"seed",
"(",
")",
"logging",
".",
"info",
"(",
"'Running nvidia-docker to ensure that GPU works'",
")",
"shell_call",
"(",
"[",
"'docker'",
",",
"'run'",
",",
"'--runtime=nvidia'",
",",
"'--rm'",
",",
"'nvidia/cuda'",
",",
"'nvidia-smi'",
"]",
")",
"eval_worker",
"=",
"EvaluationWorker",
"(",
"worker_id",
"=",
"args",
".",
"worker_id",
",",
"storage_client",
"=",
"eval_lib",
".",
"CompetitionStorageClient",
"(",
"args",
".",
"project_id",
",",
"args",
".",
"storage_bucket",
")",
",",
"datastore_client",
"=",
"eval_lib",
".",
"CompetitionDatastoreClient",
"(",
"args",
".",
"project_id",
",",
"args",
".",
"round_name",
")",
",",
"storage_bucket",
"=",
"args",
".",
"storage_bucket",
",",
"round_name",
"=",
"args",
".",
"round_name",
",",
"dataset_name",
"=",
"args",
".",
"dataset_name",
",",
"blacklisted_submissions",
"=",
"args",
".",
"blacklisted_submissions",
",",
"num_defense_shards",
"=",
"args",
".",
"num_defense_shards",
")",
"eval_worker",
".",
"run_work",
"(",
")"
] |
Main function which runs worker.
|
[
"Main",
"function",
"which",
"runs",
"worker",
"."
] |
97488e215760547b81afc53f5e5de8ba7da5bd98
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/worker.py#L900-L929
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.