hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f709817624225eac19cd625a3048f72b03e64a4b
| 382
|
py
|
Python
|
wsgi.py
|
iamsayem/smart-editor
|
012ad2775cd33247642c629a2a92ec89e4462412
|
[
"MIT"
] | null | null | null |
wsgi.py
|
iamsayem/smart-editor
|
012ad2775cd33247642c629a2a92ec89e4462412
|
[
"MIT"
] | null | null | null |
wsgi.py
|
iamsayem/smart-editor
|
012ad2775cd33247642c629a2a92ec89e4462412
|
[
"MIT"
] | null | null | null |
"""
WSGI config for editor project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
application = get_wsgi_application()
| 22.470588
| 78
| 0.782723
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
application = get_wsgi_application()
| true
| true
|
f7098178cdc1fe45aac531ccf83efb684e2e1369
| 12,691
|
py
|
Python
|
src/python/pants/backend/docker/target_types.py
|
xyzst/pants
|
d6a357fe67ee7e8e1aefeae625e107f5609f1717
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/docker/target_types.py
|
xyzst/pants
|
d6a357fe67ee7e8e1aefeae625e107f5609f1717
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/docker/target_types.py
|
xyzst/pants
|
d6a357fe67ee7e8e1aefeae625e107f5609f1717
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
import re
from abc import ABC, abstractmethod
from textwrap import dedent
from typing import Callable, ClassVar, Iterator, Optional, cast
from typing_extensions import final
from pants.backend.docker.registries import ALL_DEFAULT_REGISTRIES
from pants.base.build_environment import get_buildroot
from pants.core.goals.run import RestartableField
from pants.engine.addresses import Address
from pants.engine.fs import GlobMatchErrorBehavior
from pants.engine.target import (
COMMON_TARGET_FIELDS,
AsyncFieldMixin,
BoolField,
Dependencies,
DictStringToStringField,
InvalidFieldException,
OptionalSingleSourceField,
StringField,
StringSequenceField,
Target,
)
from pants.util.docutil import doc_url
# Common help text to be applied to each field that supports value interpolation.
_interpolation_help = (
"{kind} may use placeholders in curly braces to be interpolated. The placeholders are derived "
"from various sources, such as the Dockerfile instructions and build args.\n\n"
)
class DockerImageBuildArgsField(StringSequenceField):
alias = "extra_build_args"
default = ()
help = (
"Build arguments (`--build-arg`) to use when building this image. "
"Entries are either strings in the form `ARG_NAME=value` to set an explicit value; "
"or just `ARG_NAME` to copy the value from Pants's own environment.\n\n"
"Use `[docker].build_args` to set default build args for all images."
)
class DockerImageContextRootField(StringField):
alias = "context_root"
help = (
"Specify which directory to use as the Docker build context root. This affects the file "
"paths to use for the `COPY` and `ADD` instructions. For example, whether "
"`COPY files/f.txt` should look for the file relative to the build root: "
"`<build root>/files/f.txt` vs relative to the BUILD file: "
"`<build root>/path_to_build_file/files/f.txt`.\n\n"
"Specify the `context_root` path as `files` for relative to build root, or as `./files` "
"for relative to the BUILD file.\n\n"
"If `context_root` is not specified, it defaults to `[docker].default_context_root`."
)
@classmethod
def compute_value(cls, raw_value: Optional[str], address: Address) -> Optional[str]:
value_or_default = super().compute_value(raw_value, address=address)
if isinstance(value_or_default, str) and value_or_default.startswith("/"):
val = value_or_default.strip("/")
raise InvalidFieldException(
f"The `{cls.alias}` field in target {address} must be a relative path, but was "
f"{value_or_default!r}. Use {val!r} for a path relative to the build root, or "
f"{'./' + val!r} for a path relative to the BUILD file (i.e. {os.path.join(address.spec_path, val)!r})."
)
return value_or_default
class DockerImageSourceField(OptionalSingleSourceField):
default = "Dockerfile"
# When the default glob value is in effect, we don't want the normal glob match error behavior
# to kick in for a missing Dockerfile, in case there are `instructions` provided, in which case
# we generate the Dockerfile instead. If there are no `instructions`, or there are both
# `instructions` and a Dockerfile hydrated from the `source` glob, we error out with a message
# to the user.
default_glob_match_error_behavior = GlobMatchErrorBehavior.ignore
help = (
"The Dockerfile to use when building the Docker image.\n\n"
"Use the `instructions` field instead if you prefer not having the Dockerfile in your "
"source tree."
)
class DockerImageInstructionsField(StringSequenceField):
alias = "instructions"
required = False
help = (
"The `Dockerfile` content, typically one instruction per list item.\n\n"
"Use the `source` field instead if you prefer having the Dockerfile in your source tree."
"\n\n"
+ dedent(
"""\
Example:
# example/BUILD
docker_image(
instructions=[
"FROM base/image:1.0",
"RUN echo example",
],
)
"""
)
)
class DockerImageTagsField(StringSequenceField):
alias = "image_tags"
default = ("latest",)
help = (
"Any tags to apply to the Docker image name (the version is usually applied as a tag).\n\n"
+ _interpolation_help.format(kind="tag")
+ f"See {doc_url('tagging-docker-images')}."
)
class DockerImageTargetStageField(StringField):
alias = "target_stage"
help = (
"Specify target build stage, rather than building the entire `Dockerfile`.\n\n"
"When using multi-stage build, you may name your stages, and can target them when building "
"to only selectively build a certain stage. See also the `--docker-build-target-stage` "
"option.\n\n"
"Read more about [multi-stage Docker builds]"
"(https://docs.docker.com/develop/develop-images/multistage-build/#stop-at-a-specific-build-stage)"
)
class DockerImageDependenciesField(Dependencies):
supports_transitive_excludes = True
class DockerImageRegistriesField(StringSequenceField):
alias = "registries"
default = (ALL_DEFAULT_REGISTRIES,)
help = (
"List of addresses or configured aliases to any Docker registries to use for the "
"built image.\n\n"
"The address is a domain name with optional port for your registry, and any registry "
"aliases are prefixed with `@` for addresses in the [docker].registries configuration "
"section.\n\n"
"By default, all configured registries with `default = true` are used.\n\n"
+ dedent(
"""\
Example:
# pants.toml
[docker.registries.my-registry-alias]
address = "myregistrydomain:port"
default = false # optional
# example/BUILD
docker_image(
registries = [
"@my-registry-alias",
"myregistrydomain:port",
],
)
"""
)
+ (
"The above example shows two valid `registry` options: using an alias to a configured "
"registry and the address to a registry verbatim in the BUILD file."
)
)
class DockerImageRepositoryField(StringField):
alias = "repository"
help = (
'The repository name for the Docker image. e.g. "<repository>/<name>".\n\n'
"It uses the `[docker].default_repository` by default.\n\n"
+ _interpolation_help.format(kind="repository")
+ "Additional placeholders for the repository field are: `name`, `directory` and "
"`parent_directory`.\n\nSee the documentation for `[docker].default_repository` for more "
"information."
)
class DockerImageSkipPushField(BoolField):
alias = "skip_push"
default = False
help = "If set to true, do not push this image to registries when running `./pants publish`."
OptionValueFormatter = Callable[[str], str]
class DockerBuildOptionFieldMixin(ABC):
"""Inherit this mixin class to provide options to `docker build`."""
docker_build_option: ClassVar[str]
@abstractmethod
def option_values(self, *, value_formatter: OptionValueFormatter) -> Iterator[str]:
"""Subclasses must implement this, to turn their `self.value` into none, one or more option
values."""
@final
def options(self, value_formatter: OptionValueFormatter) -> Iterator[str]:
for value in self.option_values(value_formatter=value_formatter):
yield from (self.docker_build_option, value)
class DockerImageBuildImageLabelsOptionField(DockerBuildOptionFieldMixin, DictStringToStringField):
alias = "image_labels"
help = (
"Provide image metadata.\n\n"
+ _interpolation_help.format(kind="label value")
+ "See [Docker labels](https://docs.docker.com/config/labels-custom-metadata/"
"#manage-labels-on-objects) for more information."
)
docker_build_option = "--label"
def option_values(self, value_formatter: OptionValueFormatter) -> Iterator[str]:
for label, value in (self.value or {}).items():
yield f"{label}={value_formatter(value)}"
class DockerImageBuildSecretsOptionField(
AsyncFieldMixin, DockerBuildOptionFieldMixin, DictStringToStringField
):
alias = "secrets"
help = (
"Secret files to expose to the build (only if BuildKit enabled).\n\n"
"Secrets may use absolute paths, or paths relative to your build root, or the BUILD file "
"if prefixed with `./`. The id should be valid as used by the Docker build `--secret` "
"option. See [Docker secrets](https://docs.docker.com/engine/swarm/secrets/) for more "
"information.\n\n"
+ dedent(
"""\
Example:
docker_image(
secrets={
"mysecret": "/var/secrets/some-secret",
"repo-secret": "src/proj/secrets/some-secret",
"target-secret": "./secrets/some-secret",
}
)
"""
)
)
docker_build_option = "--secret"
def option_values(self, **kwargs) -> Iterator[str]:
# os.path.join() discards preceding parts if encountering an abs path, e.g. if the secret
# `path` is an absolute path, the `buildroot` and `spec_path` will not be considered. Also,
# an empty path part is ignored.
for secret, path in (self.value or {}).items():
full_path = os.path.join(
get_buildroot(),
self.address.spec_path if re.match(r"\.{1,2}/", path) else "",
path,
)
yield f"id={secret},src={os.path.normpath(full_path)}"
class DockerImageBuildSSHOptionField(DockerBuildOptionFieldMixin, StringSequenceField):
alias = "ssh"
default = ()
help = (
"SSH agent socket or keys to expose to the build (only if BuildKit enabled) "
"(format: default|<id>[=<socket>|<key>[,<key>]])\n\n"
"The exposed agent and/or keys can then be used in your `Dockerfile` by mounting them in "
"your `RUN` instructions:\n\n"
" RUN --mount=type=ssh ...\n\n"
"See [Docker documentation](https://docs.docker.com/develop/develop-images"
"/build_enhancements/#using-ssh-to-access-private-data-in-builds) for more information."
)
docker_build_option = "--ssh"
def option_values(self, **kwargs) -> Iterator[str]:
yield from cast("tuple[str]", self.value)
class DockerImageTarget(Target):
alias = "docker_image"
core_fields = (
*COMMON_TARGET_FIELDS,
DockerImageBuildArgsField,
DockerImageDependenciesField,
DockerImageSourceField,
DockerImageInstructionsField,
DockerImageContextRootField,
DockerImageTagsField,
DockerImageRegistriesField,
DockerImageRepositoryField,
DockerImageBuildImageLabelsOptionField,
DockerImageBuildSecretsOptionField,
DockerImageBuildSSHOptionField,
DockerImageSkipPushField,
DockerImageTargetStageField,
RestartableField,
)
help = (
"The `docker_image` target describes how to build and tag a Docker image.\n\n"
"Any dependencies, as inferred or explicitly specified, will be included in the Docker "
"build context, after being packaged if applicable.\n\n"
"By default, will use a Dockerfile from the same directory as the BUILD file this target "
"is defined in. Point at another file with the `source` field, or use the `instructions` "
"field to have the Dockerfile contents verbatim directly in the BUILD file.\n\n"
"Dependencies on upstream/base images defined by another `docker_image` are inferred if "
"referenced by a build argument with a default value of the target address.\n\n"
+ dedent(
"""\
Example:
# src/docker/downstream/Dockerfile
ARG BASE=src/docker/upstream:image
FROM $BASE
...
"""
)
)
| 38.574468
| 120
| 0.643685
|
from __future__ import annotations
import os
import re
from abc import ABC, abstractmethod
from textwrap import dedent
from typing import Callable, ClassVar, Iterator, Optional, cast
from typing_extensions import final
from pants.backend.docker.registries import ALL_DEFAULT_REGISTRIES
from pants.base.build_environment import get_buildroot
from pants.core.goals.run import RestartableField
from pants.engine.addresses import Address
from pants.engine.fs import GlobMatchErrorBehavior
from pants.engine.target import (
COMMON_TARGET_FIELDS,
AsyncFieldMixin,
BoolField,
Dependencies,
DictStringToStringField,
InvalidFieldException,
OptionalSingleSourceField,
StringField,
StringSequenceField,
Target,
)
from pants.util.docutil import doc_url
_interpolation_help = (
"{kind} may use placeholders in curly braces to be interpolated. The placeholders are derived "
"from various sources, such as the Dockerfile instructions and build args.\n\n"
)
class DockerImageBuildArgsField(StringSequenceField):
alias = "extra_build_args"
default = ()
help = (
"Build arguments (`--build-arg`) to use when building this image. "
"Entries are either strings in the form `ARG_NAME=value` to set an explicit value; "
"or just `ARG_NAME` to copy the value from Pants's own environment.\n\n"
"Use `[docker].build_args` to set default build args for all images."
)
class DockerImageContextRootField(StringField):
alias = "context_root"
help = (
"Specify which directory to use as the Docker build context root. This affects the file "
"paths to use for the `COPY` and `ADD` instructions. For example, whether "
"`COPY files/f.txt` should look for the file relative to the build root: "
"`<build root>/files/f.txt` vs relative to the BUILD file: "
"`<build root>/path_to_build_file/files/f.txt`.\n\n"
"Specify the `context_root` path as `files` for relative to build root, or as `./files` "
"for relative to the BUILD file.\n\n"
"If `context_root` is not specified, it defaults to `[docker].default_context_root`."
)
@classmethod
def compute_value(cls, raw_value: Optional[str], address: Address) -> Optional[str]:
value_or_default = super().compute_value(raw_value, address=address)
if isinstance(value_or_default, str) and value_or_default.startswith("/"):
val = value_or_default.strip("/")
raise InvalidFieldException(
f"The `{cls.alias}` field in target {address} must be a relative path, but was "
f"{value_or_default!r}. Use {val!r} for a path relative to the build root, or "
f"{'./' + val!r} for a path relative to the BUILD file (i.e. {os.path.join(address.spec_path, val)!r})."
)
return value_or_default
class DockerImageSourceField(OptionalSingleSourceField):
default = "Dockerfile"
# When the default glob value is in effect, we don't want the normal glob match error behavior
default_glob_match_error_behavior = GlobMatchErrorBehavior.ignore
help = (
"The Dockerfile to use when building the Docker image.\n\n"
"Use the `instructions` field instead if you prefer not having the Dockerfile in your "
"source tree."
)
class DockerImageInstructionsField(StringSequenceField):
alias = "instructions"
required = False
help = (
"The `Dockerfile` content, typically one instruction per list item.\n\n"
"Use the `source` field instead if you prefer having the Dockerfile in your source tree."
"\n\n"
+ dedent(
"""\
Example:
# example/BUILD
docker_image(
instructions=[
"FROM base/image:1.0",
"RUN echo example",
],
)
"""
)
)
class DockerImageTagsField(StringSequenceField):
alias = "image_tags"
default = ("latest",)
help = (
"Any tags to apply to the Docker image name (the version is usually applied as a tag).\n\n"
+ _interpolation_help.format(kind="tag")
+ f"See {doc_url('tagging-docker-images')}."
)
class DockerImageTargetStageField(StringField):
alias = "target_stage"
help = (
"Specify target build stage, rather than building the entire `Dockerfile`.\n\n"
"When using multi-stage build, you may name your stages, and can target them when building "
"to only selectively build a certain stage. See also the `--docker-build-target-stage` "
"option.\n\n"
"Read more about [multi-stage Docker builds]"
"(https://docs.docker.com/develop/develop-images/multistage-build/#stop-at-a-specific-build-stage)"
)
class DockerImageDependenciesField(Dependencies):
supports_transitive_excludes = True
class DockerImageRegistriesField(StringSequenceField):
alias = "registries"
default = (ALL_DEFAULT_REGISTRIES,)
help = (
"List of addresses or configured aliases to any Docker registries to use for the "
"built image.\n\n"
"The address is a domain name with optional port for your registry, and any registry "
"aliases are prefixed with `@` for addresses in the [docker].registries configuration "
"section.\n\n"
"By default, all configured registries with `default = true` are used.\n\n"
+ dedent(
"""\
Example:
# pants.toml
[docker.registries.my-registry-alias]
address = "myregistrydomain:port"
default = false # optional
# example/BUILD
docker_image(
registries = [
"@my-registry-alias",
"myregistrydomain:port",
],
)
"""
)
+ (
"The above example shows two valid `registry` options: using an alias to a configured "
"registry and the address to a registry verbatim in the BUILD file."
)
)
class DockerImageRepositoryField(StringField):
alias = "repository"
help = (
'The repository name for the Docker image. e.g. "<repository>/<name>".\n\n'
"It uses the `[docker].default_repository` by default.\n\n"
+ _interpolation_help.format(kind="repository")
+ "Additional placeholders for the repository field are: `name`, `directory` and "
"`parent_directory`.\n\nSee the documentation for `[docker].default_repository` for more "
"information."
)
class DockerImageSkipPushField(BoolField):
alias = "skip_push"
default = False
help = "If set to true, do not push this image to registries when running `./pants publish`."
OptionValueFormatter = Callable[[str], str]
class DockerBuildOptionFieldMixin(ABC):
docker_build_option: ClassVar[str]
@abstractmethod
def option_values(self, *, value_formatter: OptionValueFormatter) -> Iterator[str]:
@final
def options(self, value_formatter: OptionValueFormatter) -> Iterator[str]:
for value in self.option_values(value_formatter=value_formatter):
yield from (self.docker_build_option, value)
class DockerImageBuildImageLabelsOptionField(DockerBuildOptionFieldMixin, DictStringToStringField):
alias = "image_labels"
help = (
"Provide image metadata.\n\n"
+ _interpolation_help.format(kind="label value")
+ "See [Docker labels](https://docs.docker.com/config/labels-custom-metadata/"
"#manage-labels-on-objects) for more information."
)
docker_build_option = "--label"
def option_values(self, value_formatter: OptionValueFormatter) -> Iterator[str]:
for label, value in (self.value or {}).items():
yield f"{label}={value_formatter(value)}"
class DockerImageBuildSecretsOptionField(
AsyncFieldMixin, DockerBuildOptionFieldMixin, DictStringToStringField
):
alias = "secrets"
help = (
"Secret files to expose to the build (only if BuildKit enabled).\n\n"
"Secrets may use absolute paths, or paths relative to your build root, or the BUILD file "
"if prefixed with `./`. The id should be valid as used by the Docker build `--secret` "
"option. See [Docker secrets](https://docs.docker.com/engine/swarm/secrets/) for more "
"information.\n\n"
+ dedent(
"""\
Example:
docker_image(
secrets={
"mysecret": "/var/secrets/some-secret",
"repo-secret": "src/proj/secrets/some-secret",
"target-secret": "./secrets/some-secret",
}
)
"""
)
)
docker_build_option = "--secret"
def option_values(self, **kwargs) -> Iterator[str]:
for secret, path in (self.value or {}).items():
full_path = os.path.join(
get_buildroot(),
self.address.spec_path if re.match(r"\.{1,2}/", path) else "",
path,
)
yield f"id={secret},src={os.path.normpath(full_path)}"
class DockerImageBuildSSHOptionField(DockerBuildOptionFieldMixin, StringSequenceField):
alias = "ssh"
default = ()
help = (
"SSH agent socket or keys to expose to the build (only if BuildKit enabled) "
"(format: default|<id>[=<socket>|<key>[,<key>]])\n\n"
"The exposed agent and/or keys can then be used in your `Dockerfile` by mounting them in "
"your `RUN` instructions:\n\n"
" RUN --mount=type=ssh ...\n\n"
"See [Docker documentation](https://docs.docker.com/develop/develop-images"
"/build_enhancements/#using-ssh-to-access-private-data-in-builds) for more information."
)
docker_build_option = "--ssh"
def option_values(self, **kwargs) -> Iterator[str]:
yield from cast("tuple[str]", self.value)
class DockerImageTarget(Target):
alias = "docker_image"
core_fields = (
*COMMON_TARGET_FIELDS,
DockerImageBuildArgsField,
DockerImageDependenciesField,
DockerImageSourceField,
DockerImageInstructionsField,
DockerImageContextRootField,
DockerImageTagsField,
DockerImageRegistriesField,
DockerImageRepositoryField,
DockerImageBuildImageLabelsOptionField,
DockerImageBuildSecretsOptionField,
DockerImageBuildSSHOptionField,
DockerImageSkipPushField,
DockerImageTargetStageField,
RestartableField,
)
help = (
"The `docker_image` target describes how to build and tag a Docker image.\n\n"
"Any dependencies, as inferred or explicitly specified, will be included in the Docker "
"build context, after being packaged if applicable.\n\n"
"By default, will use a Dockerfile from the same directory as the BUILD file this target "
"is defined in. Point at another file with the `source` field, or use the `instructions` "
"field to have the Dockerfile contents verbatim directly in the BUILD file.\n\n"
"Dependencies on upstream/base images defined by another `docker_image` are inferred if "
"referenced by a build argument with a default value of the target address.\n\n"
+ dedent(
"""\
Example:
# src/docker/downstream/Dockerfile
ARG BASE=src/docker/upstream:image
FROM $BASE
...
"""
)
)
| true
| true
|
f70982161030e47bcbd5ca140e005db20ffc06d5
| 1,694
|
py
|
Python
|
test/writing/test_minimize.py
|
backwardn/policy_sentry
|
6676fba80b00bcfb3d3884ce5777168a9bbcbf71
|
[
"MIT"
] | 1
|
2020-07-20T16:16:30.000Z
|
2020-07-20T16:16:30.000Z
|
test/writing/test_minimize.py
|
avineshwar/policy_sentry
|
1b52b50d97293109ac54350a6c09e48643c7170d
|
[
"MIT"
] | 14
|
2020-05-06T21:34:17.000Z
|
2021-03-05T01:04:06.000Z
|
test/writing/test_minimize.py
|
Mohib-hub/policy_sentry
|
d04a69eb7cce2e184c986e0a364b57eea01ef4da
|
[
"MIT"
] | null | null | null |
import unittest
from policy_sentry.writing.minimize import minimize_statement_actions
from policy_sentry.querying.all import get_all_actions
class MinimizeWildcardActionsTestCase(unittest.TestCase):
def test_minimize_statement_actions(self):
actions_to_minimize = [
"kms:CreateGrant",
"kms:CreateCustomKeyStore",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
]
desired_result = ["ec2:authorizes*", "kms:createc*", "kms:createg*"]
all_actions = get_all_actions(lowercase=True)
minchars = None
self.maxDiff = None
# minimized_actions_list = minimize_statement_actions(desired_actions, all_actions, minchars)
self.assertListEqual(
sorted(
minimize_statement_actions(actions_to_minimize, all_actions, minchars)
),
sorted(desired_result),
)
def test_minimize_statement_actions_funky_case(self):
actions_to_minimize = [
"kms:creategrant",
"kms:createcustomkeystore",
"ec2:authorizesecuritygroupegress",
"ec2:authorizesecuritygroupingress",
]
desired_result = ["ec2:authorizes*", "kms:createc*", "kms:createg*"]
all_actions = get_all_actions(lowercase=True)
minchars = None
self.maxDiff = None
# minimized_actions_list = minimize_statement_actions(desired_actions, all_actions, minchars)
self.assertListEqual(
sorted(
minimize_statement_actions(actions_to_minimize, all_actions, minchars)
),
sorted(desired_result),
)
| 37.644444
| 101
| 0.656434
|
import unittest
from policy_sentry.writing.minimize import minimize_statement_actions
from policy_sentry.querying.all import get_all_actions
class MinimizeWildcardActionsTestCase(unittest.TestCase):
def test_minimize_statement_actions(self):
actions_to_minimize = [
"kms:CreateGrant",
"kms:CreateCustomKeyStore",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
]
desired_result = ["ec2:authorizes*", "kms:createc*", "kms:createg*"]
all_actions = get_all_actions(lowercase=True)
minchars = None
self.maxDiff = None
self.assertListEqual(
sorted(
minimize_statement_actions(actions_to_minimize, all_actions, minchars)
),
sorted(desired_result),
)
def test_minimize_statement_actions_funky_case(self):
actions_to_minimize = [
"kms:creategrant",
"kms:createcustomkeystore",
"ec2:authorizesecuritygroupegress",
"ec2:authorizesecuritygroupingress",
]
desired_result = ["ec2:authorizes*", "kms:createc*", "kms:createg*"]
all_actions = get_all_actions(lowercase=True)
minchars = None
self.maxDiff = None
self.assertListEqual(
sorted(
minimize_statement_actions(actions_to_minimize, all_actions, minchars)
),
sorted(desired_result),
)
| true
| true
|
f709848ee6e175d33f02d22031606644dbbc1dcf
| 3,364
|
py
|
Python
|
ckanext/example_theme_docs/custom_emails/test_custom_emails.py
|
robin-NEC/ckan
|
71a82c4b0bb499fd3a6d1ccfd038b2231f50f92a
|
[
"BSD-3-Clause"
] | 1
|
2022-03-24T04:47:38.000Z
|
2022-03-24T04:47:38.000Z
|
ckanext/example_theme_docs/custom_emails/test_custom_emails.py
|
robin-NEC/ckan
|
71a82c4b0bb499fd3a6d1ccfd038b2231f50f92a
|
[
"BSD-3-Clause"
] | 1
|
2021-09-22T12:53:39.000Z
|
2021-09-22T12:53:39.000Z
|
ckanext/example_theme_docs/custom_emails/test_custom_emails.py
|
robin-NEC/ckan
|
71a82c4b0bb499fd3a6d1ccfd038b2231f50f92a
|
[
"BSD-3-Clause"
] | 2
|
2018-01-21T17:03:08.000Z
|
2019-07-23T08:49:52.000Z
|
# encoding: utf-8
import os
import pytest
import ckan.model as model
import ckan.lib.mailer as mailer
from ckan.tests import factories
from ckan.lib.base import render
from ckan.common import config
from ckan.tests.lib.test_mailer import MailerBase
@pytest.mark.usefixtures("with_request_context", "clean_db", "with_plugins")
@pytest.mark.ckan_config("ckan.plugins", "example_theme_custom_emails")
class TestExampleCustomEmailsPlugin(MailerBase):
def _get_template_content(self, name):
templates_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "templates", "emails"
)
with open(os.path.join(templates_path, name), "r") as f:
return f.read()
def test_reset_password_custom_subject(self, mail_server):
user = factories.User()
user_obj = model.User.by_name(user["name"])
mailer.send_reset_link(user_obj)
# check it went to the mock smtp server
msgs = mail_server.get_smtp_messages()
assert len(msgs) == 1
msg = msgs[0]
extra_vars = {"site_title": config.get_value("ckan.site_title")}
expected = render(
"emails/reset_password_subject.txt", extra_vars
)
expected = expected.split("\n")[0]
subject = self.get_email_subject(msg[3])
assert expected == subject
assert "**test**" in subject
def test_reset_password_custom_body(self, mail_server):
user = factories.User()
user_obj = model.User.by_name(user["name"])
mailer.send_reset_link(user_obj)
# check it went to the mock smtp server
msgs = mail_server.get_smtp_messages()
assert len(msgs) == 1
msg = msgs[0]
extra_vars = {"reset_link": mailer.get_reset_link(user_obj)}
expected = render("emails/reset_password.txt", extra_vars)
body = self.get_email_body(msg[3]).decode()
assert expected == body.strip()
assert "**test**" in body
def test_invite_user_custom_subject(self, mail_server):
user = factories.User()
user_obj = model.User.by_name(user["name"])
mailer.send_invite(user_obj)
# check it went to the mock smtp server
msgs = mail_server.get_smtp_messages()
assert len(msgs) == 1
msg = msgs[0]
extra_vars = {
"site_title": config.get_value("ckan.site_title"),
}
expected = render("emails/invite_user_subject.txt", extra_vars)
expected = expected.split("\n")[0]
subject = self.get_email_subject(msg[3])
assert expected == subject
assert "**test**" in subject
def test_invite_user_custom_body(self, mail_server):
user = factories.User()
user_obj = model.User.by_name(user["name"])
mailer.send_invite(user_obj)
# check it went to the mock smtp server
msgs = mail_server.get_smtp_messages()
assert len(msgs) == 1
msg = msgs[0]
extra_vars = {
"reset_link": mailer.get_reset_link(user_obj),
"user_name": user["name"],
"site_title": config.get_value("ckan.site_title"),
}
expected = render("emails/invite_user.txt", extra_vars)
body = self.get_email_body(msg[3]).decode()
assert expected == body.strip()
assert "**test**" in body
| 32.660194
| 77
| 0.636445
|
import os
import pytest
import ckan.model as model
import ckan.lib.mailer as mailer
from ckan.tests import factories
from ckan.lib.base import render
from ckan.common import config
from ckan.tests.lib.test_mailer import MailerBase
@pytest.mark.usefixtures("with_request_context", "clean_db", "with_plugins")
@pytest.mark.ckan_config("ckan.plugins", "example_theme_custom_emails")
class TestExampleCustomEmailsPlugin(MailerBase):
def _get_template_content(self, name):
templates_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "templates", "emails"
)
with open(os.path.join(templates_path, name), "r") as f:
return f.read()
def test_reset_password_custom_subject(self, mail_server):
user = factories.User()
user_obj = model.User.by_name(user["name"])
mailer.send_reset_link(user_obj)
msgs = mail_server.get_smtp_messages()
assert len(msgs) == 1
msg = msgs[0]
extra_vars = {"site_title": config.get_value("ckan.site_title")}
expected = render(
"emails/reset_password_subject.txt", extra_vars
)
expected = expected.split("\n")[0]
subject = self.get_email_subject(msg[3])
assert expected == subject
assert "**test**" in subject
def test_reset_password_custom_body(self, mail_server):
user = factories.User()
user_obj = model.User.by_name(user["name"])
mailer.send_reset_link(user_obj)
msgs = mail_server.get_smtp_messages()
assert len(msgs) == 1
msg = msgs[0]
extra_vars = {"reset_link": mailer.get_reset_link(user_obj)}
expected = render("emails/reset_password.txt", extra_vars)
body = self.get_email_body(msg[3]).decode()
assert expected == body.strip()
assert "**test**" in body
def test_invite_user_custom_subject(self, mail_server):
user = factories.User()
user_obj = model.User.by_name(user["name"])
mailer.send_invite(user_obj)
msgs = mail_server.get_smtp_messages()
assert len(msgs) == 1
msg = msgs[0]
extra_vars = {
"site_title": config.get_value("ckan.site_title"),
}
expected = render("emails/invite_user_subject.txt", extra_vars)
expected = expected.split("\n")[0]
subject = self.get_email_subject(msg[3])
assert expected == subject
assert "**test**" in subject
def test_invite_user_custom_body(self, mail_server):
user = factories.User()
user_obj = model.User.by_name(user["name"])
mailer.send_invite(user_obj)
msgs = mail_server.get_smtp_messages()
assert len(msgs) == 1
msg = msgs[0]
extra_vars = {
"reset_link": mailer.get_reset_link(user_obj),
"user_name": user["name"],
"site_title": config.get_value("ckan.site_title"),
}
expected = render("emails/invite_user.txt", extra_vars)
body = self.get_email_body(msg[3]).decode()
assert expected == body.strip()
assert "**test**" in body
| true
| true
|
f709850ca1f40d6a987f9f5e257bf3085fc0b583
| 2,646
|
py
|
Python
|
.history/mercari/mercari_search_20201124185000.py
|
KustomApe/nerdape
|
aef6fb2d1f8c364b26d91bf8570b4487a24de69a
|
[
"MIT"
] | null | null | null |
.history/mercari/mercari_search_20201124185000.py
|
KustomApe/nerdape
|
aef6fb2d1f8c364b26d91bf8570b4487a24de69a
|
[
"MIT"
] | null | null | null |
.history/mercari/mercari_search_20201124185000.py
|
KustomApe/nerdape
|
aef6fb2d1f8c364b26d91bf8570b4487a24de69a
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import pandas as pd
import re
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import PyQt5
import time
"""[Initial Settings]
初期設定
"""
options = webdriver.ChromeOptions()
options.add_argument('--headeless')
options.add_argument('--disable-gpu')
options.add_argument('--lang-ja')
browser = webdriver.Chrome(chrome_options=options, executable_path='./chromedriver')
"""[CSS Selector Settings]
CSSセレクターの設定
"""
PAGER = "li.pager-next a"
word = input("検索したいキーワードを入力してください:")
df_main = pd.DataFrame(columns=['在庫有無','タイトル','値段','URL'])
df_graf = pd.DataFrame(columns=['SOLD','PRICE'])
n = 1
res = browser.get("https://www.mercari.com/jp/search/?page=" + str(n) +"&keyword="+ word)
res = browser.get("https://www.mercari.com/jp/search/?page="+str(num)+"&keyword="+word)
print(res)
browser.get(res)
while True:
if PAGER:
item_boxlist = browser.find_elements_by_css_selector(".items-box")
for item_box in item_boxlist:
try:
if len(item_box.find_elements_by_css_selector(".item-sold-out-badge")) > 0:
sold = "SOLD"
else:
sold = "NOT SOLD"
sub_title = item_box.find_element_by_class_name("items-box-body")
title = sub_title.find_element_by_tag_name("h3").text
item_price = item_box.find_element_by_css_selector(".items-box-price")
price_text = item_price.text
price_text = re.sub(r",", "", price_text).lstrip("¥ ")
price_text_int = int(price_text)
print(price_text_int)
url = item_box.find_element_by_tag_name("a").get_attribute("href")
data = pd.Series( [ sold,title,price_text_int,url ], index=df_main.columns )
grdata = pd.Series( [ sold,price_text_int ], index=df_graf.columns )
df_main = df_main.append( data, ignore_index=True )
df_graf = df_graf.append( grdata, ignore_index=True )
except Exception as e:
print(e)
btn = browser.find_element_by_css_selector(PAGER).get_attribute('href')
n += 1
print('next url:{}'.format(btn))
time.sleep(3)
browser.get(btn)
print('Moving to next page...')
else:
print('No items anymore...')
break
print(df_main)
sns.stripplot(x='SOLD', y='PRICE', data=df_graf)
plt.show()
sns.pairplot(df_graf,hue="SOLD")
plt.show()
print('Writing out to CSV file...')
df_main.to_csv("pricedata.csv", encoding="utf_8_sig")
print("Done")
| 37.267606
| 93
| 0.637188
|
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import pandas as pd
import re
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import PyQt5
import time
options = webdriver.ChromeOptions()
options.add_argument('--headeless')
options.add_argument('--disable-gpu')
options.add_argument('--lang-ja')
browser = webdriver.Chrome(chrome_options=options, executable_path='./chromedriver')
PAGER = "li.pager-next a"
word = input("検索したいキーワードを入力してください:")
df_main = pd.DataFrame(columns=['在庫有無','タイトル','値段','URL'])
df_graf = pd.DataFrame(columns=['SOLD','PRICE'])
n = 1
res = browser.get("https://www.mercari.com/jp/search/?page=" + str(n) +"&keyword="+ word)
res = browser.get("https://www.mercari.com/jp/search/?page="+str(num)+"&keyword="+word)
print(res)
browser.get(res)
while True:
if PAGER:
item_boxlist = browser.find_elements_by_css_selector(".items-box")
for item_box in item_boxlist:
try:
if len(item_box.find_elements_by_css_selector(".item-sold-out-badge")) > 0:
sold = "SOLD"
else:
sold = "NOT SOLD"
sub_title = item_box.find_element_by_class_name("items-box-body")
title = sub_title.find_element_by_tag_name("h3").text
item_price = item_box.find_element_by_css_selector(".items-box-price")
price_text = item_price.text
price_text = re.sub(r",", "", price_text).lstrip("¥ ")
price_text_int = int(price_text)
print(price_text_int)
url = item_box.find_element_by_tag_name("a").get_attribute("href")
data = pd.Series( [ sold,title,price_text_int,url ], index=df_main.columns )
grdata = pd.Series( [ sold,price_text_int ], index=df_graf.columns )
df_main = df_main.append( data, ignore_index=True )
df_graf = df_graf.append( grdata, ignore_index=True )
except Exception as e:
print(e)
btn = browser.find_element_by_css_selector(PAGER).get_attribute('href')
n += 1
print('next url:{}'.format(btn))
time.sleep(3)
browser.get(btn)
print('Moving to next page...')
else:
print('No items anymore...')
break
print(df_main)
sns.stripplot(x='SOLD', y='PRICE', data=df_graf)
plt.show()
sns.pairplot(df_graf,hue="SOLD")
plt.show()
print('Writing out to CSV file...')
df_main.to_csv("pricedata.csv", encoding="utf_8_sig")
print("Done")
| true
| true
|
f70985406baacff2698cda8db75a4fcb1039b24d
| 14,992
|
py
|
Python
|
Feature_Extraction.py
|
peiyan1234/NTU_UpperTUC_biomed
|
f96766aa3f4de8cd9f37d252bd0f063d8f841069
|
[
"MIT"
] | null | null | null |
Feature_Extraction.py
|
peiyan1234/NTU_UpperTUC_biomed
|
f96766aa3f4de8cd9f37d252bd0f063d8f841069
|
[
"MIT"
] | null | null | null |
Feature_Extraction.py
|
peiyan1234/NTU_UpperTUC_biomed
|
f96766aa3f4de8cd9f37d252bd0f063d8f841069
|
[
"MIT"
] | null | null | null |
import argparse
import os
import glob
import copy
import csv
import json
import numpy as np
from PIL import Image
import nrrd
import radiomics
from radiomics import featureextractor
import SimpleITK as sitk
_pwd_ = os.getcwd()
data_Table = {}
Feature_Table = {}
hyperparameters = {}
hyperparameters['setting'] = {}
hyperparameters['force2D'] = True
hyperparameters['force2Ddimension'] = 0
def assert_paser_valid(args):
assert (os.path.exists(args.input_root)), "The image root folder cannot be found"
if args.Table != None:
assert (os.path.exists(args.Table)), "The data table cannot be found"
assert (len(args.Volume) != 0), "Input volume cannot be found"
assert (len(args.Mask) != 0), "Input Mask cannot be found"
assert (len(args.Mask) == len(args.Volume)), "The number of Masks is not consistent with the number of Volumes."
if os.path.exists(args.output_folder) == False:
os.mkdir(args.output_folder)
if args.Volume[0] == 'all':
assert (args.Mask[0]) == 'all', "-Mask: should be \'all\'"
assert (isinstance(eval(args.width), float) or
isinstance(eval(args.width), int)), "-width: should be a float/int number"
assert (isinstance(eval(args.level), float) or
isinstance(eval(args.level), int)), "-level: should be a float/int number"
def read_data_Table(Table_path):
global data_Table
data_csv = open(Table_path, 'r')
csv_reader = csv.reader(data_csv, delimiter = ',')
for row in csv_reader:
ID = row[0]
data_Table[ID] = row
data_csv.close()
def read_data(args):
global Feature_Table
Vols = []
Segs = []
Folder_Vol = os.path.join(args.input_root,
'crop_vol')
Folder_Seg = os.path.join(args.input_root,
'crop_msk')
if args.Volume[0] == 'all':
Vols = sorted( glob.glob( os.path.join(Folder_Vol,
'UC*')))
Segs = sorted( glob.glob( os.path.join(Folder_Seg,
'UC*')))
for _index_ in range(len(Vols)):
ID = os.path.basename(Vols[_index_]).split('_')[0]
Feature_Table[ID] = {}
Feature_Table[ID]['Type'] = 'UTUC'
Feature_Table[ID]['Sex'] = data_Table[ID][2]
Grade_info = data_Table[ID][4]
if ('High' in Grade_info or
'high' in Grade_info):
Feature_Table[ID]['Histological grade'] = 'HG'
elif ('Low' in Grade_info or
'low' in Grade_info):
Feature_Table[ID]['Histological grade'] = 'LG'
else:
Feature_Table[ID]['Histological grade'] = 'None'
if (data_Table[ID][6] == '' or
data_Table[ID][6] == None):
Feature_Table[ID]['T stage'] = 'None'
elif data_Table[ID][6] == 'A':
Feature_Table[ID]['T stage'] = 'a'
else:
Feature_Table[ID]['T stage'] = data_Table[ID][6]
Feature_Table[ID]['Lymph-Invasion'] = data_Table[ID][9]
Feature_Table[ID]['tumor'] = glob.glob( os.path.join(Vols[_index_],
'*.tif'))[0]
Feature_Table[ID]['mask'] = glob.glob( os.path.join(Segs[_index_],
'*.png'))[0]
else:
N = len(args.Volume)
for _index_ in range(N):
Vol = glob.glob( os.path.join(Folder_Vol,
f'{args.Volume[_index_]}*'))[0]
Seg = glob.glob( os.path.join(Folder_Seg,
f'{args.Mask[_index_]}*'))[0]
ID = os.path.basename(Vol).split('_')[0]
Feature_Table[ID] = {}
Feature_Table[ID]['Type'] = 'UTUC'
Feature_Table[ID]['Sex'] = data_Table[ID][2]
Grade_info = data_Table[ID][4]
if ('High' in Grade_info or
'high' in Grade_info):
Feature_Table[ID]['Histological grade'] = 'HG'
elif ('Low' in Grade_info or
'low' in Grade_info):
Feature_Table[ID]['Histological grade'] = 'LG'
else:
Feature_Table[ID]['Histological grade'] = 'None'
if (data_Table[ID][6] == '' or
data_Table[ID][6] == None):
Feature_Table[ID]['T stage'] = 'None'
else:
Feature_Table[ID]['T stage'] = data_Table[ID][6]
Feature_Table[ID]['Lymph-Invasion'] = data_Table[ID][9]
Feature_Table[ID]['tumor'] = glob.glob( os.path.join(Vol,
'*.tif'))[0]
Feature_Table[ID]['mask'] = glob.glob( os.path.join(Seg,
'*.png'))[0]
def Extract_features(args):
import matplotlib.pyplot as plt
global Feature_Table
global hyperparameters
args.width = eval(args.width)
args.level = eval(args.level)
Lower_bound = (args.level - (args.width/2))
hyperparameters['setting']['voxelArrayShift'] = Lower_bound
extractor = featureextractor.RadiomicsFeatureExtractor(**hyperparameters)
extractor.enableImageTypeByName('Wavelet',
customArgs={'level':1})
extractor.enableImageTypeByName('Square')
extractor.enableImageTypeByName('SquareRoot')
extractor.enableImageTypeByName('Logarithm')
extractor.enableImageTypeByName('Exponential')
extractor.enableImageTypeByName('Gradient',
customArgs={'gradientUseSpacing':False})
extractor.enableImageTypeByName('LBP2D',
customArgs={'lbp2Dmethod':'default',
'lbp2DRadius':3,
'lbp2DSamples':36})
extractor.enableAllFeatures()
for ID in Feature_Table.keys():
imageFilepath = Feature_Table[ID]['tumor']
maskFilepath = Feature_Table[ID]['mask']
img = sitk.ReadImage(imageFilepath)
np_img = sitk.GetArrayFromImage(img)
np_img = np_img * (args.width/65535) + Lower_bound
np_img = np_img.astype(np.int)
#plt.imshow(np_img, cmap='gray')
#plt.show()
IMG = sitk.GetImageFromArray(np_img)
features = extractor.execute(IMG,
maskFilepath,
255)
F = {}
print(f'analyzing {ID}')
F['Original'] = {}
F['Wavelet'] = {}
F['Square'] = {}
F['SquareRoot'] = {}
F['Logarithm'] = {}
F['Exponential'] = {}
F['Gradient'] = {}
F['LBP2D'] = {}
for key in features.keys():
#print(f"Compute {key} : {features[key]}")
if 'diagnostics' in key:
continue
if 'original' in key:
F['Original'][key.split('original_')[1]] = float(features[key])
continue
if 'wavelet' in key:
F['Wavelet'][key.split('wavelet-')[1]] = float(features[key])
continue
if 'square_' in key:
F['Square'][key.split('square_')[1]] = float(features[key])
continue
if 'squareroot_' in key:
F['SquareRoot'][key.split('squareroot_')[1]] = float(features[key])
continue
if 'logarithm_' in key:
F['Logarithm'][key.split('logarithm_')[1]] = float(features[key])
if 'exponential' in key:
F['Exponential'][key.split('exponential_')[1]] = float(features[key])
continue
if 'gradient' in key:
F['Gradient'][key.split('gradient_')[1]] = float(features[key])
continue
if 'lbp-2D_' in key:
F['LBP2D'][key.split('lbp-2D_')[1]] = float(features[key])
continue
Feature_Table[ID]['Features'] = F
def normalization():
NumberOfpatients = len(list(Feature_Table.keys()))
base_ID = list(Feature_Table.keys())[0]
F = Feature_Table[base_ID]['Features']
buffer_list = [0.0] * NumberOfpatients
for _filter_ in list(F.keys()):
feature_types = list(F[_filter_].keys())
for _feature_ in feature_types:
_index_ = 0
_Max_ = Feature_Table[base_ID]['Features'][_filter_][_feature_]
_Min_ = Feature_Table[base_ID]['Features'][_filter_][_feature_]
for ID in list(Feature_Table.keys()):
feature_value = Feature_Table[ID]['Features'][_filter_][_feature_]
buffer_list[_index_] = feature_value
print(_filter_,
_feature_,
feature_value,
_Max_,
_Min_)
if feature_value > _Max_:
_Max_ = feature_value
if feature_value < _Min_:
_Min_ = feature_value
_index_ += 1
#Normalize to the range of [0, 1]
offset = 0.0
if (_Max_ - _Min_) == 0:
continue
scale_factor = (1.0 - 0.0)/(_Max_ - _Min_)
_index_ = 0
for ID in list(Feature_Table.keys()):
Feature_Table[ID]['Features'][_filter_][_feature_] = (offset +
scale_factor*(buffer_list[_index_] -
_Min_))
_index_ += 1
def save_results(args):
json_path = os.path.join(args.output_folder,
'Features.txt')
json_file = open(json_path, 'w')
json_content = json.dumps(Feature_Table,
indent = 4)
json_file.writelines(json_content)
json_file.close()
csv_path = os.path.join(args.output_folder,
'Features.csv')
csv_file = open(csv_path, 'w')
writer = csv.writer(csv_file, dialect='excel')
headers = []
headers.append('Subject')
first_key = list(Feature_Table.keys())[0]
inner_keys = list(Feature_Table[first_key].keys())
for inner_key in inner_keys:
if inner_key == 'Features':
Feature_keys = list(Feature_Table[first_key][inner_key].keys())
for Feature_key in Feature_keys:
_features_ = list(Feature_Table[first_key][inner_key][Feature_key].keys())
for _feature_ in _features_:
headers.append(f'{Feature_key}: ' + _feature_)
else:
headers.append(inner_key)
writer.writerow(headers)
_line_ = []
print(f"We totally analyze {len(list(Feature_Table.keys()))} participants")
for key in sorted(list(Feature_Table.keys())):
_line_ = []
_line_.append(key)
inner_keys = list(Feature_Table[key].keys())
for inner_key in inner_keys:
if inner_key == 'Features':
Feature_keys = list(Feature_Table[key][inner_key].keys())
for Feature_key in Feature_keys:
_features_ = list(Feature_Table[first_key][inner_key][Feature_key].keys())
for _feature_ in _features_:
_line_.append(Feature_Table[key][inner_key][Feature_key][_feature_])
else:
_line_.append(Feature_Table[key][inner_key])
writer.writerow(_line_)
csv_file.close()
a = zip(*csv.reader(open(csv_path, "r")))
csv.writer(open(csv_path, "w")).writerows(a)
def main():
API_description = """
***** Radiomics Analysis Platform *****
API Name: Radiomics Feature Analysis
Version: 1.0
Developer: Alvin Li
Email: d05548014@ntu.edu.tw
****************************************
"""
parser = argparse.ArgumentParser(prog='Feature_Extraction.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=API_description)
parser.add_argument('-input_root',
action = 'store',
type = str,
help = 'The absolute path to input root.')
parser.add_argument('-Table',
action = 'store',
type = str,
help = 'The absolute path to the DATA TABLE (*.csv).')
parser.add_argument('-Volume',
nargs = '+',
help = 'ex: -Volume Vol1.tif Vol2.tif ...')
parser.add_argument('-Mask',
nargs = '+',
help = 'ex: -Mask Msk1.png Msk2.png ...')
parser.add_argument('-output_folder',
action = 'store',
help = 'The absolute path to the output folder used to store extracted Feature Table')
parser.add_argument('-width',
action = 'store',
type = str,
help = 'window width')
parser.add_argument('-level',
action = 'store',
type = str,
help = 'window level')
parser.add_argument('-normalize',
action = 'store',
type = str,
help = 'True/False')
args = parser.parse_args()
assert_paser_valid(args)
read_data_Table(args.Table)
read_data(args)
Extract_features(args)
if args.normalize == 'True':
normalization()
save_results(args)
if __name__ == '__main__':
main()
| 28.02243
| 116
| 0.480123
|
import argparse
import os
import glob
import copy
import csv
import json
import numpy as np
from PIL import Image
import nrrd
import radiomics
from radiomics import featureextractor
import SimpleITK as sitk
_pwd_ = os.getcwd()
data_Table = {}
Feature_Table = {}
hyperparameters = {}
hyperparameters['setting'] = {}
hyperparameters['force2D'] = True
hyperparameters['force2Ddimension'] = 0
def assert_paser_valid(args):
assert (os.path.exists(args.input_root)), "The image root folder cannot be found"
if args.Table != None:
assert (os.path.exists(args.Table)), "The data table cannot be found"
assert (len(args.Volume) != 0), "Input volume cannot be found"
assert (len(args.Mask) != 0), "Input Mask cannot be found"
assert (len(args.Mask) == len(args.Volume)), "The number of Masks is not consistent with the number of Volumes."
if os.path.exists(args.output_folder) == False:
os.mkdir(args.output_folder)
if args.Volume[0] == 'all':
assert (args.Mask[0]) == 'all', "-Mask: should be \'all\'"
assert (isinstance(eval(args.width), float) or
isinstance(eval(args.width), int)), "-width: should be a float/int number"
assert (isinstance(eval(args.level), float) or
isinstance(eval(args.level), int)), "-level: should be a float/int number"
def read_data_Table(Table_path):
global data_Table
data_csv = open(Table_path, 'r')
csv_reader = csv.reader(data_csv, delimiter = ',')
for row in csv_reader:
ID = row[0]
data_Table[ID] = row
data_csv.close()
def read_data(args):
global Feature_Table
Vols = []
Segs = []
Folder_Vol = os.path.join(args.input_root,
'crop_vol')
Folder_Seg = os.path.join(args.input_root,
'crop_msk')
if args.Volume[0] == 'all':
Vols = sorted( glob.glob( os.path.join(Folder_Vol,
'UC*')))
Segs = sorted( glob.glob( os.path.join(Folder_Seg,
'UC*')))
for _index_ in range(len(Vols)):
ID = os.path.basename(Vols[_index_]).split('_')[0]
Feature_Table[ID] = {}
Feature_Table[ID]['Type'] = 'UTUC'
Feature_Table[ID]['Sex'] = data_Table[ID][2]
Grade_info = data_Table[ID][4]
if ('High' in Grade_info or
'high' in Grade_info):
Feature_Table[ID]['Histological grade'] = 'HG'
elif ('Low' in Grade_info or
'low' in Grade_info):
Feature_Table[ID]['Histological grade'] = 'LG'
else:
Feature_Table[ID]['Histological grade'] = 'None'
if (data_Table[ID][6] == '' or
data_Table[ID][6] == None):
Feature_Table[ID]['T stage'] = 'None'
elif data_Table[ID][6] == 'A':
Feature_Table[ID]['T stage'] = 'a'
else:
Feature_Table[ID]['T stage'] = data_Table[ID][6]
Feature_Table[ID]['Lymph-Invasion'] = data_Table[ID][9]
Feature_Table[ID]['tumor'] = glob.glob( os.path.join(Vols[_index_],
'*.tif'))[0]
Feature_Table[ID]['mask'] = glob.glob( os.path.join(Segs[_index_],
'*.png'))[0]
else:
N = len(args.Volume)
for _index_ in range(N):
Vol = glob.glob( os.path.join(Folder_Vol,
f'{args.Volume[_index_]}*'))[0]
Seg = glob.glob( os.path.join(Folder_Seg,
f'{args.Mask[_index_]}*'))[0]
ID = os.path.basename(Vol).split('_')[0]
Feature_Table[ID] = {}
Feature_Table[ID]['Type'] = 'UTUC'
Feature_Table[ID]['Sex'] = data_Table[ID][2]
Grade_info = data_Table[ID][4]
if ('High' in Grade_info or
'high' in Grade_info):
Feature_Table[ID]['Histological grade'] = 'HG'
elif ('Low' in Grade_info or
'low' in Grade_info):
Feature_Table[ID]['Histological grade'] = 'LG'
else:
Feature_Table[ID]['Histological grade'] = 'None'
if (data_Table[ID][6] == '' or
data_Table[ID][6] == None):
Feature_Table[ID]['T stage'] = 'None'
else:
Feature_Table[ID]['T stage'] = data_Table[ID][6]
Feature_Table[ID]['Lymph-Invasion'] = data_Table[ID][9]
Feature_Table[ID]['tumor'] = glob.glob( os.path.join(Vol,
'*.tif'))[0]
Feature_Table[ID]['mask'] = glob.glob( os.path.join(Seg,
'*.png'))[0]
def Extract_features(args):
import matplotlib.pyplot as plt
global Feature_Table
global hyperparameters
args.width = eval(args.width)
args.level = eval(args.level)
Lower_bound = (args.level - (args.width/2))
hyperparameters['setting']['voxelArrayShift'] = Lower_bound
extractor = featureextractor.RadiomicsFeatureExtractor(**hyperparameters)
extractor.enableImageTypeByName('Wavelet',
customArgs={'level':1})
extractor.enableImageTypeByName('Square')
extractor.enableImageTypeByName('SquareRoot')
extractor.enableImageTypeByName('Logarithm')
extractor.enableImageTypeByName('Exponential')
extractor.enableImageTypeByName('Gradient',
customArgs={'gradientUseSpacing':False})
extractor.enableImageTypeByName('LBP2D',
customArgs={'lbp2Dmethod':'default',
'lbp2DRadius':3,
'lbp2DSamples':36})
extractor.enableAllFeatures()
for ID in Feature_Table.keys():
imageFilepath = Feature_Table[ID]['tumor']
maskFilepath = Feature_Table[ID]['mask']
img = sitk.ReadImage(imageFilepath)
np_img = sitk.GetArrayFromImage(img)
np_img = np_img * (args.width/65535) + Lower_bound
np_img = np_img.astype(np.int)
IMG = sitk.GetImageFromArray(np_img)
features = extractor.execute(IMG,
maskFilepath,
255)
F = {}
print(f'analyzing {ID}')
F['Original'] = {}
F['Wavelet'] = {}
F['Square'] = {}
F['SquareRoot'] = {}
F['Logarithm'] = {}
F['Exponential'] = {}
F['Gradient'] = {}
F['LBP2D'] = {}
for key in features.keys():
if 'diagnostics' in key:
continue
if 'original' in key:
F['Original'][key.split('original_')[1]] = float(features[key])
continue
if 'wavelet' in key:
F['Wavelet'][key.split('wavelet-')[1]] = float(features[key])
continue
if 'square_' in key:
F['Square'][key.split('square_')[1]] = float(features[key])
continue
if 'squareroot_' in key:
F['SquareRoot'][key.split('squareroot_')[1]] = float(features[key])
continue
if 'logarithm_' in key:
F['Logarithm'][key.split('logarithm_')[1]] = float(features[key])
if 'exponential' in key:
F['Exponential'][key.split('exponential_')[1]] = float(features[key])
continue
if 'gradient' in key:
F['Gradient'][key.split('gradient_')[1]] = float(features[key])
continue
if 'lbp-2D_' in key:
F['LBP2D'][key.split('lbp-2D_')[1]] = float(features[key])
continue
Feature_Table[ID]['Features'] = F
def normalization():
NumberOfpatients = len(list(Feature_Table.keys()))
base_ID = list(Feature_Table.keys())[0]
F = Feature_Table[base_ID]['Features']
buffer_list = [0.0] * NumberOfpatients
for _filter_ in list(F.keys()):
feature_types = list(F[_filter_].keys())
for _feature_ in feature_types:
_index_ = 0
_Max_ = Feature_Table[base_ID]['Features'][_filter_][_feature_]
_Min_ = Feature_Table[base_ID]['Features'][_filter_][_feature_]
for ID in list(Feature_Table.keys()):
feature_value = Feature_Table[ID]['Features'][_filter_][_feature_]
buffer_list[_index_] = feature_value
print(_filter_,
_feature_,
feature_value,
_Max_,
_Min_)
if feature_value > _Max_:
_Max_ = feature_value
if feature_value < _Min_:
_Min_ = feature_value
_index_ += 1
offset = 0.0
if (_Max_ - _Min_) == 0:
continue
scale_factor = (1.0 - 0.0)/(_Max_ - _Min_)
_index_ = 0
for ID in list(Feature_Table.keys()):
Feature_Table[ID]['Features'][_filter_][_feature_] = (offset +
scale_factor*(buffer_list[_index_] -
_Min_))
_index_ += 1
def save_results(args):
json_path = os.path.join(args.output_folder,
'Features.txt')
json_file = open(json_path, 'w')
json_content = json.dumps(Feature_Table,
indent = 4)
json_file.writelines(json_content)
json_file.close()
csv_path = os.path.join(args.output_folder,
'Features.csv')
csv_file = open(csv_path, 'w')
writer = csv.writer(csv_file, dialect='excel')
headers = []
headers.append('Subject')
first_key = list(Feature_Table.keys())[0]
inner_keys = list(Feature_Table[first_key].keys())
for inner_key in inner_keys:
if inner_key == 'Features':
Feature_keys = list(Feature_Table[first_key][inner_key].keys())
for Feature_key in Feature_keys:
_features_ = list(Feature_Table[first_key][inner_key][Feature_key].keys())
for _feature_ in _features_:
headers.append(f'{Feature_key}: ' + _feature_)
else:
headers.append(inner_key)
writer.writerow(headers)
_line_ = []
print(f"We totally analyze {len(list(Feature_Table.keys()))} participants")
for key in sorted(list(Feature_Table.keys())):
_line_ = []
_line_.append(key)
inner_keys = list(Feature_Table[key].keys())
for inner_key in inner_keys:
if inner_key == 'Features':
Feature_keys = list(Feature_Table[key][inner_key].keys())
for Feature_key in Feature_keys:
_features_ = list(Feature_Table[first_key][inner_key][Feature_key].keys())
for _feature_ in _features_:
_line_.append(Feature_Table[key][inner_key][Feature_key][_feature_])
else:
_line_.append(Feature_Table[key][inner_key])
writer.writerow(_line_)
csv_file.close()
a = zip(*csv.reader(open(csv_path, "r")))
csv.writer(open(csv_path, "w")).writerows(a)
def main():
API_description = """
***** Radiomics Analysis Platform *****
API Name: Radiomics Feature Analysis
Version: 1.0
Developer: Alvin Li
Email: d05548014@ntu.edu.tw
****************************************
"""
parser = argparse.ArgumentParser(prog='Feature_Extraction.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=API_description)
parser.add_argument('-input_root',
action = 'store',
type = str,
help = 'The absolute path to input root.')
parser.add_argument('-Table',
action = 'store',
type = str,
help = 'The absolute path to the DATA TABLE (*.csv).')
parser.add_argument('-Volume',
nargs = '+',
help = 'ex: -Volume Vol1.tif Vol2.tif ...')
parser.add_argument('-Mask',
nargs = '+',
help = 'ex: -Mask Msk1.png Msk2.png ...')
parser.add_argument('-output_folder',
action = 'store',
help = 'The absolute path to the output folder used to store extracted Feature Table')
parser.add_argument('-width',
action = 'store',
type = str,
help = 'window width')
parser.add_argument('-level',
action = 'store',
type = str,
help = 'window level')
parser.add_argument('-normalize',
action = 'store',
type = str,
help = 'True/False')
args = parser.parse_args()
assert_paser_valid(args)
read_data_Table(args.Table)
read_data(args)
Extract_features(args)
if args.normalize == 'True':
normalization()
save_results(args)
if __name__ == '__main__':
main()
| true
| true
|
f70985996cf54c23bb1a95550c2daac3207fa3fb
| 209
|
py
|
Python
|
backend/common/context_processors.py
|
olegpobedynskyi/Boilerplate-React-Django
|
79281a6254be3402bbe1c8216c98b84750f54646
|
[
"MIT"
] | 3
|
2020-02-06T01:06:29.000Z
|
2020-05-20T14:25:22.000Z
|
backend/common/context_processors.py
|
olegpobedynskyi/Boilerplate-React-Django
|
79281a6254be3402bbe1c8216c98b84750f54646
|
[
"MIT"
] | 19
|
2020-02-11T04:54:40.000Z
|
2022-02-26T23:03:01.000Z
|
backend/common/context_processors.py
|
davidpierre21/repository-monitor
|
0be5fbf1d5d404aa9e4952a0f02a44f1662efa91
|
[
"MIT"
] | 2
|
2021-01-28T16:00:01.000Z
|
2021-06-15T03:49:20.000Z
|
from django.conf import settings
def sentry_dsn(request):
return {
'SENTRY_DSN': settings.SENTRY_DSN
}
def commit_sha(request):
return {
'COMMIT_SHA': settings.COMMIT_SHA
}
| 14.928571
| 41
| 0.650718
|
from django.conf import settings
def sentry_dsn(request):
return {
'SENTRY_DSN': settings.SENTRY_DSN
}
def commit_sha(request):
return {
'COMMIT_SHA': settings.COMMIT_SHA
}
| true
| true
|
f70985e843e47bc768ffb3a11799ccf0e11fff29
| 1,454
|
py
|
Python
|
mlcollect/cnn/lenet.py
|
sanghuynh1501/mlcollect
|
e85fe6a08e14fa6502166c1a7bfffdcd8c3a25b2
|
[
"MIT"
] | null | null | null |
mlcollect/cnn/lenet.py
|
sanghuynh1501/mlcollect
|
e85fe6a08e14fa6502166c1a7bfffdcd8c3a25b2
|
[
"MIT"
] | null | null | null |
mlcollect/cnn/lenet.py
|
sanghuynh1501/mlcollect
|
e85fe6a08e14fa6502166c1a7bfffdcd8c3a25b2
|
[
"MIT"
] | null | null | null |
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras import backend as K
class LeNet:
@staticmethod
def build(width, height, depth, classes, last_active="softmax"):
# Initialize the model
model = Sequential()
input_shape = (height, width, depth)
# If we are using 'channels-first', update the input shape
if K.image_data_format() == 'channels_first':
input_shape = (depth, height, width)
# First set of CONV => RELU => POOL layers
model.add(Conv2D(20, (5, 5), padding='same', input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Second set of CONV => RELU => POOL layers
model.add(Conv2D(50, (5, 5), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# First (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dense(classes))
model.add(Activation(last_active))
# return the constructed network architecture
return model
| 35.463415
| 78
| 0.652682
|
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras import backend as K
class LeNet:
@staticmethod
def build(width, height, depth, classes, last_active="softmax"):
model = Sequential()
input_shape = (height, width, depth)
if K.image_data_format() == 'channels_first':
input_shape = (depth, height, width)
model.add(Conv2D(20, (5, 5), padding='same', input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(50, (5, 5), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dense(classes))
model.add(Activation(last_active))
return model
| true
| true
|
f709888e7bb1e2b7e0336cde6b0426fffa9cbec5
| 896
|
py
|
Python
|
lightutils/sys/path.py
|
smilelight/lightUtils
|
e9b7ed35ed50cf6b7c6284fe60918ce4dc71beac
|
[
"MIT"
] | 2
|
2020-01-23T02:03:19.000Z
|
2020-12-13T09:05:45.000Z
|
lightutils/sys/path.py
|
smilelight/lightUtils
|
e9b7ed35ed50cf6b7c6284fe60918ce4dc71beac
|
[
"MIT"
] | null | null | null |
lightutils/sys/path.py
|
smilelight/lightUtils
|
e9b7ed35ed50cf6b7c6284fe60918ce4dc71beac
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
from ..common.file import get_file_name
from ..common.log import logger
def add_sys_path(file_path: str, project_name: str):
if not os.path.exists(file_path):
raise FileNotFoundError("{} not found".format(file_path))
flag = False
parent_path = os.path.abspath(file_path)
parent_name = get_file_name(parent_path)
project_path = None
while parent_name:
parent_path = os.path.dirname(parent_path)
for child_name in os.listdir(parent_path):
if child_name == project_name:
flag = True
project_path = parent_path
break
if flag:
break
if flag:
sys.path.insert(0, project_path)
logger.info("已成功将{}添加至系统路径".format(project_path))
else:
raise FileNotFoundError("{} not found".format(project_name))
| 28.903226
| 68
| 0.637277
|
import os
import sys
from ..common.file import get_file_name
from ..common.log import logger
def add_sys_path(file_path: str, project_name: str):
if not os.path.exists(file_path):
raise FileNotFoundError("{} not found".format(file_path))
flag = False
parent_path = os.path.abspath(file_path)
parent_name = get_file_name(parent_path)
project_path = None
while parent_name:
parent_path = os.path.dirname(parent_path)
for child_name in os.listdir(parent_path):
if child_name == project_name:
flag = True
project_path = parent_path
break
if flag:
break
if flag:
sys.path.insert(0, project_path)
logger.info("已成功将{}添加至系统路径".format(project_path))
else:
raise FileNotFoundError("{} not found".format(project_name))
| true
| true
|
f70988d5b64503f0de01827901e8b85c32db26c7
| 2,832
|
py
|
Python
|
theano/gof/__init__.py
|
JimmyRetza/Theano
|
72d83bce0d547d54ab3513bcba35c166979f7a6f
|
[
"BSD-3-Clause"
] | 9
|
2018-10-29T20:25:25.000Z
|
2021-11-17T11:03:17.000Z
|
theano/gof/__init__.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
theano/gof/__init__.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2020-01-06T20:28:42.000Z
|
2020-01-06T20:28:42.000Z
|
"""
gof.py
gof stands for Graph Optimization Framework.
The gof submodule of theano implements a framework
for manipulating programs described as graphs. The
gof module defines basic theano graph concepts:
-Apply nodes, which represent the application
of an Op to Variables. Together these make up a
graph.
-The Type, needed for Variables to make sense.
-The FunctionGraph, which defines how a subgraph
should be interpreted to implement a function.
-The Thunk, a callable object that becames part
of the executable emitted by theano.
-Linkers/VMs, the objects that call Thunks in
sequence in order to execute a theano program.
Conceptually, gof is intended to be sufficiently abstract
that it could be used to implement a language other than
theano. ie, theano is a domain-specific language for
numerical computation, created by implementing
tensor Variables and Ops that perform mathematical functions.
A different kind of domain-specific language could be
made by using gof with different Variables and Ops.
In practice, gof and the rest of theano are somewhat more
tightly intertwined.
Currently, gof also contains much of the C compilation
functionality. Ideally this should be refactored into
a different submodule.
For more details and discussion, see the theano-dev
e-mail thread "What is gof?".
"""
from __future__ import absolute_import, print_function, division
from theano.gof.cc import \
CLinker, OpWiseCLinker, DualLinker, HideC
from theano.gof.fg import \
CachedConstantError, InconsistencyError, MissingInputError, FunctionGraph
from theano.gof.destroyhandler import \
DestroyHandler
from theano.gof.graph import \
Apply, Variable, Constant, view_roots
from theano.gof.link import \
Container, Linker, LocalLinker, PerformLinker, WrapLinker, WrapLinkerMany
from theano.gof.op import \
Op, OpenMPOp, PureOp, COp, ops_with_inner_function
from theano.gof.type import EnumType, EnumList, CEnumType
from theano.gof.opt import (
Optimizer,
optimizer, inplace_optimizer,
SeqOptimizer,
MergeOptimizer,
LocalOptimizer, local_optimizer, LocalOptGroup,
OpSub, OpRemove, PatternSub,
NavigatorOptimizer, TopoOptimizer, EquilibriumOptimizer,
OpKeyOptimizer, CheckStackTraceOptimization)
from theano.gof.optdb import \
DB, LocalGroupDB, Query, \
EquilibriumDB, SequenceDB, ProxyDB
from theano.gof.toolbox import \
Feature, \
Bookkeeper, History, Validator, ReplaceValidate, NodeFinder,\
PrintListener, ReplacementDidntRemovedError, NoOutputFromInplace
from theano.gof.type import \
Type, Generic, generic
from theano.gof.utils import \
hashtype, object2, MethodNotDefined
from theano.gof.params_type import ParamsType, Params
import theano
if theano.config.cmodule.preload_cache:
cc.get_module_cache()
| 31.120879
| 77
| 0.784958
|
from __future__ import absolute_import, print_function, division
from theano.gof.cc import \
CLinker, OpWiseCLinker, DualLinker, HideC
from theano.gof.fg import \
CachedConstantError, InconsistencyError, MissingInputError, FunctionGraph
from theano.gof.destroyhandler import \
DestroyHandler
from theano.gof.graph import \
Apply, Variable, Constant, view_roots
from theano.gof.link import \
Container, Linker, LocalLinker, PerformLinker, WrapLinker, WrapLinkerMany
from theano.gof.op import \
Op, OpenMPOp, PureOp, COp, ops_with_inner_function
from theano.gof.type import EnumType, EnumList, CEnumType
from theano.gof.opt import (
Optimizer,
optimizer, inplace_optimizer,
SeqOptimizer,
MergeOptimizer,
LocalOptimizer, local_optimizer, LocalOptGroup,
OpSub, OpRemove, PatternSub,
NavigatorOptimizer, TopoOptimizer, EquilibriumOptimizer,
OpKeyOptimizer, CheckStackTraceOptimization)
from theano.gof.optdb import \
DB, LocalGroupDB, Query, \
EquilibriumDB, SequenceDB, ProxyDB
from theano.gof.toolbox import \
Feature, \
Bookkeeper, History, Validator, ReplaceValidate, NodeFinder,\
PrintListener, ReplacementDidntRemovedError, NoOutputFromInplace
from theano.gof.type import \
Type, Generic, generic
from theano.gof.utils import \
hashtype, object2, MethodNotDefined
from theano.gof.params_type import ParamsType, Params
import theano
if theano.config.cmodule.preload_cache:
cc.get_module_cache()
| true
| true
|
f7098960057372652381a92072a1f17f38411d41
| 8,769
|
py
|
Python
|
models.py
|
mattj241/FSWD_Capstone
|
a677f44ec5b6fc3c360d1cb94399c8d99bb6df00
|
[
"MIT"
] | null | null | null |
models.py
|
mattj241/FSWD_Capstone
|
a677f44ec5b6fc3c360d1cb94399c8d99bb6df00
|
[
"MIT"
] | null | null | null |
models.py
|
mattj241/FSWD_Capstone
|
a677f44ec5b6fc3c360d1cb94399c8d99bb6df00
|
[
"MIT"
] | null | null | null |
import os
import enum
from typing import Counter
from sqlalchemy import Column, String, Integer, create_engine
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import backref, relationship
from sqlalchemy.sql.expression import false, null
from sqlalchemy.sql.schema import ForeignKey, PrimaryKeyConstraint, Table, MetaData
from sqlalchemy.sql.sqltypes import Boolean, Float
from config import init_env_vars
Base = declarative_base()
init_env_vars()
### UNCOMMENT these below vars to enable for local
# database_name = os.getenv('DB_NAME')
# database_username = os.getenv('DB_USER')
# database_password = os.getenv('DB_PASSWORD')
# database_path = "postgresql://{}:{}@{}/{}"\
# .format(database_username, database_password, 'localhost:5432', database_name)
### HEROKU REQUIREMENTS
database_path = os.environ.get('DATABASE_URL').replace("://", "ql://", 1)
db = SQLAlchemy()
'''
setup_db(app)
binds a flask application and a SQLAlchemy service
'''
def setup_db(app, database_path=database_path):
app.config["SQLALCHEMY_DATABASE_URI"] = database_path
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.app = app
db.init_app(app)
db.create_all()
Migrate(app, db)
def session_revert():
db.session.rollback()
def session_close():
db.session.close()
'''
Schema Configuration
'''
class Reservation (db.Model):
__tablename__ = 'reservation'
id = Column(Integer, primary_key=True)
vehicle_id = Column(Integer, ForeignKey('vehicle.id'), nullable=False)
customer_id = Column(Integer, ForeignKey('customer.id'), nullable=False)
employee_id = Column(Integer, ForeignKey('employee.id'), nullable=False)
# implemented the time attrib, if time allows
# start_time =
# end_time =
cost = Column(Float, nullable=False)
reservation_open = Column(Boolean, nullable=False)
vehicle =relationship('Vehicle', uselist=False, foreign_keys=[vehicle_id])
customer=relationship('Customer', uselist=False, foreign_keys=[customer_id])
employee=relationship('Employee', uselist=False, foreign_keys=[employee_id])
def __init__(self, vehicle_id, customer_id,
employee_id, cost, reservation_open):
self.vehicle_id = vehicle_id
self.customer_id = customer_id
self.employee_id = employee_id
self.cost = cost
self.reservation_open = reservation_open
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def get_cust_info(id):
return Customer.query.filter_by(id=id).first()
def get_emp_info(id):
return Employee.query.filter_by(id=id).first()
def get_veh_info(id):
return Vehicle.query.filter_by(id=id).first()
def format(self):
customer = Reservation.get_cust_info(self.customer_id)
employee = Reservation.get_emp_info(self.employee_id)
vehicle = Reservation.get_veh_info(self.vehicle_id)
return {
'id' : self.id,
'cost': self.cost,
'customer_name': customer.first_name + ' ' + customer.last_name,
'employee_name': employee.first_name + ' ' + employee.last_name,
'vehicle_id': self.vehicle_id,
'vehicle_make_and_model': vehicle.make + ' ' + vehicle.model,
'reservation_open' : self.reservation_open
}
class Vehicle(db.Model):
__tablename__= 'vehicle'
id = Column(Integer, primary_key=True)
make = Column(String, nullable=False)
model = Column(String, nullable=False)
year = Column(Integer, nullable=False)
body_style = Column(String)
color = Column(String)
currently_rented = Column(Boolean, nullable=False)
reservations = relationship('Reservation', back_populates='vehicle')
def __init__(self, make, model, year, body_style, color,
currently_rented):
self.make = make
self.model = model
self.year = year
self.body_style = body_style
self.color = color
self.currently_rented = currently_rented
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id' : self.id,
'make': self.make,
'model': self.model,
'year': self.year,
'body_style': self.body_style,
'color': self.color,
'currently_rented': self.currently_rented,
}
class Person(db.Model):
# __tablename__= 'person'
__abstract__ = True
# id = Column(Integer, primary_key=True)
first_name = Column(String, nullable=False)
last_name = Column(String, nullable=False)
address = Column(String, nullable=False)
type = Column(String(50))
__mapper_args__ = {
'polymorphic_on':type,
'polymorphic_identity':'person',
}
class Customer(Person):
__tablename__ = 'customer'
id = Column(Integer, primary_key=True)
reservations = relationship('Reservation', back_populates='customer')
__mapper_args__ = {
'polymorphic_identity':'customer'
}
def __init__(self, first_name, last_name, address, type):
self.first_name = first_name
self.last_name = last_name
self.address = address
self.type = type
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id' : self.id,
'first_name' : self.first_name,
'last_name' : self.last_name,
'address' : self.address,
'type' : self.type,
}
class Manager(Person):
__tablename__ = 'manager'
id = Column(Integer, primary_key=True)
employees = relationship('Employee', back_populates='manager')
__mapper_args__ = {
'polymorphic_identity':'manager'
}
def __init__(self, first_name, last_name, address, type):
self.first_name = first_name
self.last_name = last_name
self.address = address
self.type = type
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id' : self.id,
'first_name' : self.first_name,
'last_name' : self.last_name,
'address' : self.address,
'type' : self.type
}
class Employee(Person, db.Model):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
manager_id = Column(Integer, ForeignKey('manager.id'))
manager = relationship('Manager', back_populates='employees')
reservations = relationship('Reservation', back_populates='employee')
__mapper_args__ = {
'polymorphic_identity':'employee'
}
def __init__(self, first_name, last_name, address, type, manager_id):
self.first_name = first_name
self.last_name = last_name
self.address = address
self.type = type
self.manager_id = manager_id
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id' : self.id,
'first_name' : self.first_name,
'last_name' : self.last_name,
'address' : self.address,
'type' : self.type,
'manager_id' : self.manager_id
}
'''
Helper functions
'''
def get_vehicle(id):
if id <= 0:
return Vehicle.query.all()
else:
return Vehicle.query.filter_by(id=id).first()
def get_customer(id):
if not id:
return Customer.query.all()
else:
return Customer.query.filter_by(id=id).first()
def get_employee(id):
if not id:
return Employee.query.all()
else:
return Employee.query.filter_by(id=id).first()
def get_manager(id):
if not id:
return Manager.query.all()
else:
return Manager.query.filter_by(id=id).first()
def get_reservation():
return Reservation.query.all()
| 28.015974
| 83
| 0.638841
|
import os
import enum
from typing import Counter
from sqlalchemy import Column, String, Integer, create_engine
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import backref, relationship
from sqlalchemy.sql.expression import false, null
from sqlalchemy.sql.schema import ForeignKey, PrimaryKeyConstraint, Table, MetaData
from sqlalchemy.sql.sqltypes import Boolean, Float
from config import init_env_vars
Base = declarative_base()
init_env_vars()
base_path):
app.config["SQLALCHEMY_DATABASE_URI"] = database_path
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.app = app
db.init_app(app)
db.create_all()
Migrate(app, db)
def session_revert():
db.session.rollback()
def session_close():
db.session.close()
class Reservation (db.Model):
__tablename__ = 'reservation'
id = Column(Integer, primary_key=True)
vehicle_id = Column(Integer, ForeignKey('vehicle.id'), nullable=False)
customer_id = Column(Integer, ForeignKey('customer.id'), nullable=False)
employee_id = Column(Integer, ForeignKey('employee.id'), nullable=False)
cost = Column(Float, nullable=False)
reservation_open = Column(Boolean, nullable=False)
vehicle =relationship('Vehicle', uselist=False, foreign_keys=[vehicle_id])
customer=relationship('Customer', uselist=False, foreign_keys=[customer_id])
employee=relationship('Employee', uselist=False, foreign_keys=[employee_id])
def __init__(self, vehicle_id, customer_id,
employee_id, cost, reservation_open):
self.vehicle_id = vehicle_id
self.customer_id = customer_id
self.employee_id = employee_id
self.cost = cost
self.reservation_open = reservation_open
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def get_cust_info(id):
return Customer.query.filter_by(id=id).first()
def get_emp_info(id):
return Employee.query.filter_by(id=id).first()
def get_veh_info(id):
return Vehicle.query.filter_by(id=id).first()
def format(self):
customer = Reservation.get_cust_info(self.customer_id)
employee = Reservation.get_emp_info(self.employee_id)
vehicle = Reservation.get_veh_info(self.vehicle_id)
return {
'id' : self.id,
'cost': self.cost,
'customer_name': customer.first_name + ' ' + customer.last_name,
'employee_name': employee.first_name + ' ' + employee.last_name,
'vehicle_id': self.vehicle_id,
'vehicle_make_and_model': vehicle.make + ' ' + vehicle.model,
'reservation_open' : self.reservation_open
}
class Vehicle(db.Model):
__tablename__= 'vehicle'
id = Column(Integer, primary_key=True)
make = Column(String, nullable=False)
model = Column(String, nullable=False)
year = Column(Integer, nullable=False)
body_style = Column(String)
color = Column(String)
currently_rented = Column(Boolean, nullable=False)
reservations = relationship('Reservation', back_populates='vehicle')
def __init__(self, make, model, year, body_style, color,
currently_rented):
self.make = make
self.model = model
self.year = year
self.body_style = body_style
self.color = color
self.currently_rented = currently_rented
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id' : self.id,
'make': self.make,
'model': self.model,
'year': self.year,
'body_style': self.body_style,
'color': self.color,
'currently_rented': self.currently_rented,
}
class Person(db.Model):
__abstract__ = True
first_name = Column(String, nullable=False)
last_name = Column(String, nullable=False)
address = Column(String, nullable=False)
type = Column(String(50))
__mapper_args__ = {
'polymorphic_on':type,
'polymorphic_identity':'person',
}
class Customer(Person):
__tablename__ = 'customer'
id = Column(Integer, primary_key=True)
reservations = relationship('Reservation', back_populates='customer')
__mapper_args__ = {
'polymorphic_identity':'customer'
}
def __init__(self, first_name, last_name, address, type):
self.first_name = first_name
self.last_name = last_name
self.address = address
self.type = type
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id' : self.id,
'first_name' : self.first_name,
'last_name' : self.last_name,
'address' : self.address,
'type' : self.type,
}
class Manager(Person):
__tablename__ = 'manager'
id = Column(Integer, primary_key=True)
employees = relationship('Employee', back_populates='manager')
__mapper_args__ = {
'polymorphic_identity':'manager'
}
def __init__(self, first_name, last_name, address, type):
self.first_name = first_name
self.last_name = last_name
self.address = address
self.type = type
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id' : self.id,
'first_name' : self.first_name,
'last_name' : self.last_name,
'address' : self.address,
'type' : self.type
}
class Employee(Person, db.Model):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
manager_id = Column(Integer, ForeignKey('manager.id'))
manager = relationship('Manager', back_populates='employees')
reservations = relationship('Reservation', back_populates='employee')
__mapper_args__ = {
'polymorphic_identity':'employee'
}
def __init__(self, first_name, last_name, address, type, manager_id):
self.first_name = first_name
self.last_name = last_name
self.address = address
self.type = type
self.manager_id = manager_id
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id' : self.id,
'first_name' : self.first_name,
'last_name' : self.last_name,
'address' : self.address,
'type' : self.type,
'manager_id' : self.manager_id
}
def get_vehicle(id):
if id <= 0:
return Vehicle.query.all()
else:
return Vehicle.query.filter_by(id=id).first()
def get_customer(id):
if not id:
return Customer.query.all()
else:
return Customer.query.filter_by(id=id).first()
def get_employee(id):
if not id:
return Employee.query.all()
else:
return Employee.query.filter_by(id=id).first()
def get_manager(id):
if not id:
return Manager.query.all()
else:
return Manager.query.filter_by(id=id).first()
def get_reservation():
return Reservation.query.all()
| true
| true
|
f7098a56d5bc500eb89b46ae24674e262cb9574f
| 3,747
|
py
|
Python
|
pdf2pdfocr_multibackground.py
|
browntownington/pdf2pdfocr
|
21b8dc2bdaa9d059b2c858c27dd05a9a26235371
|
[
"Apache-2.0"
] | 136
|
2016-01-03T10:58:24.000Z
|
2022-03-20T23:01:24.000Z
|
pdf2pdfocr_multibackground.py
|
browntownington/pdf2pdfocr
|
21b8dc2bdaa9d059b2c858c27dd05a9a26235371
|
[
"Apache-2.0"
] | 27
|
2016-04-30T05:41:18.000Z
|
2022-02-26T12:00:36.000Z
|
pdf2pdfocr_multibackground.py
|
browntownington/pdf2pdfocr
|
21b8dc2bdaa9d059b2c858c27dd05a9a26235371
|
[
"Apache-2.0"
] | 22
|
2016-04-30T04:34:54.000Z
|
2021-08-30T21:01:13.000Z
|
#!/usr/bin/env python3
##############################################################################
# Copyright (c) 2016: Leonardo Cardoso
# https://github.com/LeoFCardoso/pdf2pdfocr
##############################################################################
# Emulate pdftk multibackground operator
# $1 - first file (foreground)
# $2 - second file (background)
# $3 - output file
# User should pass correct parameters. There is no parameter check.
####
# Depends on PyPDF2
#
import datetime
import sys
from PyPDF2 import PdfFileWriter, PdfFileReader
__author__ = 'Leonardo F. Cardoso'
#
verbose_mode = False # Used for debug
def debug(param):
try:
if verbose_mode:
tstamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
print("[{0}] [DEBUG]\t{1}".format(tstamp, param))
except:
pass
output = PdfFileWriter()
# First file (image)
imagepdf = PdfFileReader(open(sys.argv[1], 'rb'), strict=False)
# Second file (text)
textpdf = PdfFileReader(open(sys.argv[2], 'rb'), strict=False)
# Copy pages to output with text
scale_tolerance = 0.001
for i in range(imagepdf.getNumPages()):
debug("Page: {0}".format(i + 1))
imagepage = imagepdf.getPage(i)
textpage = textpdf.getPage(i)
debug("Img (original): {0}".format(imagepage.mediaBox.upperRight))
debug("Text: {0}".format(textpage.mediaBox.upperRight))
# Handle rotation
rotate_angle = imagepage.get('/Rotate')
debug("Image page rotate angle is {0}".format(rotate_angle))
debug("Text page rotate angle is {0}".format(textpage.get('/Rotate')))
if rotate_angle is None:
rotate_angle = 0
#
image_page_x = imagepage.mediaBox.upperRight[0]
image_page_y = imagepage.mediaBox.upperRight[1]
# With rotated pages (90 or 270 degress), we have to switch x and y, to avoid wrong scale operation
if rotate_angle == 90 or rotate_angle == 270:
image_page_x = imagepage.mediaBox.upperRight[1]
image_page_y = imagepage.mediaBox.upperRight[0]
#
debug("Img (dimensions after rotation): {0}, {1}".format(image_page_x, image_page_y))
factor_x = textpage.mediaBox.upperRight[0] / image_page_x
factor_y = textpage.mediaBox.upperRight[1] / image_page_y
debug("Factors: {0}, {1}".format(factor_x, factor_y))
debug("Corrected Factors: {0}, {1}".format(factor_x - 1, factor_y - 1))
# Try to avoid unnecessary scale operation
if abs(factor_x - 1) > scale_tolerance or abs(factor_y - 1) > scale_tolerance:
debug("Scaling...")
imagepage.scale(float(factor_x), float(factor_y))
# imagepage stay on top
if rotate_angle == 0 or rotate_angle == 360:
debug("Merge simple")
# TODO very slow in some PDFs
textpage.mergePage(imagepage)
else:
debug("Merge rotated")
# Tested values for translation with 90 degrees
if rotate_angle == 90:
textpage.mergeRotatedTranslatedPage(imagepage, (-1 * rotate_angle), image_page_y / 2,
image_page_y / 2, expand=False)
# Tested values for translation with 180 degrees
if rotate_angle == 180:
textpage.mergeRotatedTranslatedPage(imagepage, (-1 * rotate_angle), image_page_x / 2,
image_page_y / 2, expand=False)
# Tested values for translation with 270 degrees
if rotate_angle == 270:
textpage.mergeRotatedTranslatedPage(imagepage, (-1 * rotate_angle), image_page_x / 2,
image_page_x / 2, expand=False)
#
textpage.compressContentStreams()
output.addPage(textpage)
#
with open(sys.argv[3], 'wb') as f:
output.write(f)
#
| 39.03125
| 103
| 0.621564
| true
| true
|
|
f7098a7742e9c2007808daa26f100d2b2cd86f4a
| 2,913
|
py
|
Python
|
echoscope/__main__.py
|
treeyh/echoscope
|
ef8933ce9a5dfe2ac8fb6e82bad8d5fa0d72a6da
|
[
"MIT"
] | 1
|
2022-01-18T09:19:38.000Z
|
2022-01-18T09:19:38.000Z
|
echoscope/__main__.py
|
treeyh/echoscope
|
ef8933ce9a5dfe2ac8fb6e82bad8d5fa0d72a6da
|
[
"MIT"
] | null | null | null |
echoscope/__main__.py
|
treeyh/echoscope
|
ef8933ce9a5dfe2ac8fb6e82bad8d5fa0d72a6da
|
[
"MIT"
] | 1
|
2022-01-18T09:19:39.000Z
|
2022-01-18T09:19:39.000Z
|
# -*- coding: UTF-8 -*-
import sys
import logging
import argparse
import shutil
from typing import Dict, List
from echoscope.util import file_util, log_util
from echoscope.config import config
from echoscope.model import config_model
from echoscope.source import source, mysql_source, clickhouse_source
from echoscope.generate import generate, markdown_generate
from clickhouse_driver import Client, connect
# 源数据导出map
__source_map: Dict[str, source.Source] = {}
# 输出文件类型map
__generate_map: Dict[str, generate.Generate] = {}
__generate = None
def init():
"""初始化
"""
file_util.mkdirs(config.LogPath, False)
log_util.log_init(config.LogPath)
mysqlSource = mysql_source.MysqlSource()
__source_map[config.DsMysql] = mysqlSource
__source_map[config.DsMariaDB] = mysqlSource
__source_map[config.DsClickHouse] = clickhouse_source.ClickhouseSource()
mdGenerate = markdown_generate.MarkdownGenerate(config.TemplatePath, config.MarkdownExportPath)
__generate_map[config.ExportTypeMarkdown] = mdGenerate
def _parse_option():
"""获取命令行参数
Returns:
[type]: [description]
"""
parser = argparse.ArgumentParser(description='Echoscope')
parser.add_argument('-g', '--generate', type=str, default='markdown',
help='generate file type. support: markdown')
options = parser.parse_args()
return options, sys.argv[1:]
def main():
init()
options, args = _parse_option()
shutil.rmtree(path=config.MarkdownExportPath, ignore_errors=True)
confMap: Dict[str, List[config_model.DataSourceConfig]] = {}
# 生成模型文件
for dsConfig in config.exportDsConfig:
logging.info("start generate model file: %s" % dsConfig)
ds = __source_map[dsConfig.dsType].export_model(conf=dsConfig)
dsConfig.ds = ds
filePath = __generate_map[options.generate].generate_index_file(conf=dsConfig, ds=ds)
logging.info("generate model index file path: %s" % filePath)
filePath = __generate_map[options.generate].generate_file(conf=dsConfig, ds=ds)
if confMap.get(dsConfig.dsType, None) == None:
confMap[dsConfig.dsType] = [dsConfig]
else:
confMap[dsConfig.dsType].append(dsConfig)
logging.info("end generate model file path: %s" % filePath)
logging.info("start generate root index file ")
confss: List[List[config_model.DataSourceConfig]] = []
for dsType in config.SupportDsType:
print(dsType)
confs = confMap.get(dsType, None)
if confs == None:
continue
print(dsType)
confss.append(confs)
__generate_map[config.ExportTypeMarkdown].generate_root_file(confss)
logging.info("end generate root index file ")
main()
# conn = connect('clickhouse://default:123456@10.0.3.94:9000/system')
# # client = Client(host='10.0.3.94', port=8123, user='default', password='123456')
# cursor = conn.cursor()
# cursor.execute('select version() as ver;')
# yz = cursor.fetchall()
# print(yz)
| 26.724771
| 97
| 0.727085
|
import sys
import logging
import argparse
import shutil
from typing import Dict, List
from echoscope.util import file_util, log_util
from echoscope.config import config
from echoscope.model import config_model
from echoscope.source import source, mysql_source, clickhouse_source
from echoscope.generate import generate, markdown_generate
from clickhouse_driver import Client, connect
__source_map: Dict[str, source.Source] = {}
__generate_map: Dict[str, generate.Generate] = {}
__generate = None
def init():
file_util.mkdirs(config.LogPath, False)
log_util.log_init(config.LogPath)
mysqlSource = mysql_source.MysqlSource()
__source_map[config.DsMysql] = mysqlSource
__source_map[config.DsMariaDB] = mysqlSource
__source_map[config.DsClickHouse] = clickhouse_source.ClickhouseSource()
mdGenerate = markdown_generate.MarkdownGenerate(config.TemplatePath, config.MarkdownExportPath)
__generate_map[config.ExportTypeMarkdown] = mdGenerate
def _parse_option():
parser = argparse.ArgumentParser(description='Echoscope')
parser.add_argument('-g', '--generate', type=str, default='markdown',
help='generate file type. support: markdown')
options = parser.parse_args()
return options, sys.argv[1:]
def main():
init()
options, args = _parse_option()
shutil.rmtree(path=config.MarkdownExportPath, ignore_errors=True)
confMap: Dict[str, List[config_model.DataSourceConfig]] = {}
for dsConfig in config.exportDsConfig:
logging.info("start generate model file: %s" % dsConfig)
ds = __source_map[dsConfig.dsType].export_model(conf=dsConfig)
dsConfig.ds = ds
filePath = __generate_map[options.generate].generate_index_file(conf=dsConfig, ds=ds)
logging.info("generate model index file path: %s" % filePath)
filePath = __generate_map[options.generate].generate_file(conf=dsConfig, ds=ds)
if confMap.get(dsConfig.dsType, None) == None:
confMap[dsConfig.dsType] = [dsConfig]
else:
confMap[dsConfig.dsType].append(dsConfig)
logging.info("end generate model file path: %s" % filePath)
logging.info("start generate root index file ")
confss: List[List[config_model.DataSourceConfig]] = []
for dsType in config.SupportDsType:
print(dsType)
confs = confMap.get(dsType, None)
if confs == None:
continue
print(dsType)
confss.append(confs)
__generate_map[config.ExportTypeMarkdown].generate_root_file(confss)
logging.info("end generate root index file ")
main()
| true
| true
|
f7098c2080949e808ba6e5b35267749e8ad64cbe
| 3,540
|
py
|
Python
|
lib/kubernetes/client/models/v1_rolling_update_stateful_set_strategy.py
|
splunkenizer/splunk_as_a_service_app
|
97c4aaf927d2171bf131126cf9b70489ac75bc5a
|
[
"Apache-2.0"
] | 7
|
2019-12-21T00:14:14.000Z
|
2021-03-11T14:51:37.000Z
|
lib/kubernetes/client/models/v1_rolling_update_stateful_set_strategy.py
|
splunkenizer/splunk_as_a_service_app
|
97c4aaf927d2171bf131126cf9b70489ac75bc5a
|
[
"Apache-2.0"
] | 29
|
2019-10-09T11:16:21.000Z
|
2020-06-23T09:32:09.000Z
|
lib/kubernetes/client/models/v1_rolling_update_stateful_set_strategy.py
|
splunkenizer/splunk_as_a_service_app
|
97c4aaf927d2171bf131126cf9b70489ac75bc5a
|
[
"Apache-2.0"
] | 1
|
2021-05-07T10:13:31.000Z
|
2021-05-07T10:13:31.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1RollingUpdateStatefulSetStrategy(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'partition': 'int'
}
attribute_map = {
'partition': 'partition'
}
def __init__(self, partition=None):
"""
V1RollingUpdateStatefulSetStrategy - a model defined in Swagger
"""
self._partition = None
self.discriminator = None
if partition is not None:
self.partition = partition
@property
def partition(self):
"""
Gets the partition of this V1RollingUpdateStatefulSetStrategy.
Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.
:return: The partition of this V1RollingUpdateStatefulSetStrategy.
:rtype: int
"""
return self._partition
@partition.setter
def partition(self, partition):
"""
Sets the partition of this V1RollingUpdateStatefulSetStrategy.
Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.
:param partition: The partition of this V1RollingUpdateStatefulSetStrategy.
:type: int
"""
self._partition = partition
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1RollingUpdateStatefulSetStrategy):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.874016
| 108
| 0.55452
|
from pprint import pformat
from six import iteritems
import re
class V1RollingUpdateStatefulSetStrategy(object):
swagger_types = {
'partition': 'int'
}
attribute_map = {
'partition': 'partition'
}
def __init__(self, partition=None):
self._partition = None
self.discriminator = None
if partition is not None:
self.partition = partition
@property
def partition(self):
return self._partition
@partition.setter
def partition(self, partition):
self._partition = partition
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1RollingUpdateStatefulSetStrategy):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f7098c6d7d019f59843b2349c02ec2b299bce038
| 18,212
|
py
|
Python
|
optuna/visualization/matplotlib/_contour.py
|
keisukefukuda/optuna
|
ac4ea8d0c74726f8a603ba2cb0bfb7f4112f736e
|
[
"MIT"
] | null | null | null |
optuna/visualization/matplotlib/_contour.py
|
keisukefukuda/optuna
|
ac4ea8d0c74726f8a603ba2cb0bfb7f4112f736e
|
[
"MIT"
] | null | null | null |
optuna/visualization/matplotlib/_contour.py
|
keisukefukuda/optuna
|
ac4ea8d0c74726f8a603ba2cb0bfb7f4112f736e
|
[
"MIT"
] | null | null | null |
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import scipy
from optuna._experimental import experimental
from optuna.logging import get_logger
from optuna.study import Study
from optuna.study import StudyDirection
from optuna.trial import FrozenTrial
from optuna.trial import TrialState
from optuna.visualization._utils import _check_plot_args
from optuna.visualization._utils import _get_param_values
from optuna.visualization.matplotlib._matplotlib_imports import _imports
from optuna.visualization.matplotlib._utils import _is_log_scale
from optuna.visualization.matplotlib._utils import _is_numerical
if _imports.is_successful():
from optuna.visualization.matplotlib._matplotlib_imports import Axes
from optuna.visualization.matplotlib._matplotlib_imports import Colormap
from optuna.visualization.matplotlib._matplotlib_imports import ContourSet
from optuna.visualization.matplotlib._matplotlib_imports import plt
_logger = get_logger(__name__)
AXES_PADDING_RATIO = 5e-2
@experimental("2.2.0")
def plot_contour(
study: Study,
params: Optional[List[str]] = None,
*,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
) -> "Axes":
"""Plot the parameter relationship as contour plot in a study with Matplotlib.
Note that, if a parameter contains missing values, a trial with missing values is not plotted.
.. seealso::
Please refer to :func:`optuna.visualization.plot_contour` for an example.
Warnings:
Output figures of this Matplotlib-based
:func:`~optuna.visualization.matplotlib.plot_contour` function would be different from
those of the Plotly-based :func:`~optuna.visualization.plot_contour`.
Example:
The following code snippet shows how to plot the parameter relationship as contour plot.
.. plot::
import optuna
def objective(trial):
x = trial.suggest_float("x", -100, 100)
y = trial.suggest_categorical("y", [-1, 0, 1])
return x ** 2 + y
sampler = optuna.samplers.TPESampler(seed=10)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=30)
optuna.visualization.matplotlib.plot_contour(study, params=["x", "y"])
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their target values.
params:
Parameter list to visualize. The default is all parameters.
target:
A function to specify the value to display. If it is :obj:`None` and ``study`` is being
used for single-objective optimization, the objective values are plotted.
.. note::
Specify this argument if ``study`` is being used for multi-objective optimization.
target_name:
Target's name to display on the color bar.
Returns:
A :class:`matplotlib.axes.Axes` object.
Raises:
:exc:`ValueError`:
If ``target`` is :obj:`None` and ``study`` is being used for multi-objective
optimization.
"""
_imports.check()
_check_plot_args(study, target, target_name)
_logger.warning(
"Output figures of this Matplotlib-based `plot_contour` function would be different from "
"those of the Plotly-based `plot_contour`."
)
return _get_contour_plot(study, params, target, target_name)
def _get_contour_plot(
study: Study,
params: Optional[List[str]] = None,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
) -> "Axes":
# Calculate basic numbers for plotting.
trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Your study does not have any completed trials.")
_, ax = plt.subplots()
return ax
all_params = {p_name for t in trials for p_name in t.params.keys()}
if params is None:
sorted_params = sorted(all_params)
elif len(params) <= 1:
_logger.warning("The length of params must be greater than 1.")
_, ax = plt.subplots()
return ax
else:
for input_p_name in params:
if input_p_name not in all_params:
raise ValueError("Parameter {} does not exist in your study.".format(input_p_name))
sorted_params = sorted(set(params))
n_params = len(sorted_params)
plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
if n_params == 2:
# Set up the graph style.
fig, axs = plt.subplots()
axs.set_title("Contour Plot")
cmap = _set_cmap(study, target)
contour_point_num = 100
# Prepare data and draw contour plots.
if params:
x_param = params[0]
y_param = params[1]
else:
x_param = sorted_params[0]
y_param = sorted_params[1]
cs = _generate_contour_subplot(
trials, x_param, y_param, axs, cmap, contour_point_num, target
)
if isinstance(cs, ContourSet):
axcb = fig.colorbar(cs)
axcb.set_label(target_name)
else:
# Set up the graph style.
fig, axs = plt.subplots(n_params, n_params)
fig.suptitle("Contour Plot")
cmap = _set_cmap(study, target)
contour_point_num = 100
# Prepare data and draw contour plots.
cs_list = []
for x_i, x_param in enumerate(sorted_params):
for y_i, y_param in enumerate(sorted_params):
ax = axs[y_i, x_i]
cs = _generate_contour_subplot(
trials, x_param, y_param, ax, cmap, contour_point_num, target
)
if isinstance(cs, ContourSet):
cs_list.append(cs)
if cs_list:
axcb = fig.colorbar(cs_list[0], ax=axs)
axcb.set_label(target_name)
return axs
def _set_cmap(study: Study, target: Optional[Callable[[FrozenTrial], float]]) -> "Colormap":
cmap = "Blues_r" if target is None and study.direction == StudyDirection.MAXIMIZE else "Blues"
return plt.get_cmap(cmap)
class _LabelEncoder:
def __init__(self) -> None:
self.labels: List[str] = []
def fit(self, labels: List[str]) -> "_LabelEncoder":
self.labels = sorted(set(labels))
return self
def transform(self, labels: List[str]) -> List[int]:
return [self.labels.index(label) for label in labels]
def fit_transform(self, labels: List[str]) -> List[int]:
return self.fit(labels).transform(labels)
def get_labels(self) -> List[str]:
return self.labels
def get_indices(self) -> List[int]:
return list(range(len(self.labels)))
def _calculate_griddata(
trials: List[FrozenTrial],
x_param: str,
x_indices: List[Union[str, int, float]],
y_param: str,
y_indices: List[Union[str, int, float]],
contour_point_num: int,
target: Optional[Callable[[FrozenTrial], float]],
) -> Tuple[
np.ndarray,
np.ndarray,
np.ndarray,
List[Union[int, float]],
List[Union[int, float]],
List[Union[int, float]],
List[Union[int, float]],
List[int],
List[str],
List[int],
List[str],
int,
int,
]:
# Extract values for x, y, z axes from each trail.
x_values = []
y_values = []
z_values = []
x_range_values = []
y_range_values = []
for trial in trials:
contains_x_param = x_param in trial.params
if contains_x_param:
x_range_values.append(trial.params[x_param])
contains_y_param = y_param in trial.params
if contains_y_param:
y_range_values.append(trial.params[y_param])
if not contains_x_param or not contains_y_param:
continue
x_values.append(trial.params[x_param])
y_values.append(trial.params[y_param])
if target is None:
value = trial.value
else:
value = target(trial)
if isinstance(value, int):
value = float(value)
elif not isinstance(value, float):
raise ValueError(
"Trial{} has COMPLETE state, but its target value is non-numeric.".format(
trial.number
)
)
z_values.append(value)
# Return empty values when x or y has no value.
if len(x_values) == 0 or len(y_values) == 0:
return (
np.array([]),
np.array([]),
np.array([]),
x_values,
y_values,
[],
[],
[],
[],
[],
[],
0,
0,
)
# Add dummy values for grid data calculation when a parameter has one unique value.
x_values_dummy = []
y_values_dummy = []
if len(set(x_values)) == 1:
x_values_dummy = [x for x in x_indices if x not in x_values]
x_values = x_values + x_values_dummy * len(x_values)
y_values = y_values + (y_values * len(x_values_dummy))
z_values = z_values + (z_values * len(x_values_dummy))
if len(set(y_values)) == 1:
y_values_dummy = [y for y in y_indices if y not in y_values]
y_values = y_values + y_values_dummy * len(y_values)
x_values = x_values + (x_values * len(y_values_dummy))
z_values = z_values + (z_values * len(y_values_dummy))
# Convert categorical values to int.
cat_param_labels_x = [] # type: List[str]
cat_param_pos_x = [] # type: List[int]
cat_param_labels_y = [] # type: List[str]
cat_param_pos_y = [] # type: List[int]
if not _is_numerical(trials, x_param):
enc = _LabelEncoder()
x_range_values = enc.fit_transform(list(map(str, x_range_values)))
x_values = enc.transform(list(map(str, x_values)))
cat_param_labels_x = enc.get_labels()
cat_param_pos_x = enc.get_indices()
if not _is_numerical(trials, y_param):
enc = _LabelEncoder()
y_range_values = enc.fit_transform(list(map(str, y_range_values)))
y_values = enc.transform(list(map(str, y_values)))
cat_param_labels_y = enc.get_labels()
cat_param_pos_y = enc.get_indices()
# Calculate min and max of x and y.
x_values_min = min(x_range_values)
x_values_max = max(x_range_values)
y_values_min = min(y_range_values)
y_values_max = max(y_range_values)
# Calculate grid data points.
# For x and y, create 1-D array of evenly spaced coordinates on linear or log scale.
xi = np.array([])
yi = np.array([])
zi = np.array([])
if _is_log_scale(trials, x_param):
padding_x = (np.log10(x_values_max) - np.log10(x_values_min)) * AXES_PADDING_RATIO
x_values_min = np.power(10, np.log10(x_values_min) - padding_x)
x_values_max = np.power(10, np.log10(x_values_max) + padding_x)
xi = np.logspace(np.log10(x_values_min), np.log10(x_values_max), contour_point_num)
else:
padding_x = (x_values_max - x_values_min) * AXES_PADDING_RATIO
x_values_min -= padding_x
x_values_max += padding_x
xi = np.linspace(x_values_min, x_values_max, contour_point_num)
if _is_log_scale(trials, y_param):
padding_y = (np.log10(y_values_max) - np.log10(y_values_min)) * AXES_PADDING_RATIO
y_values_min = np.power(10, np.log10(y_values_min) - padding_y)
y_values_max = np.power(10, np.log10(y_values_max) + padding_y)
yi = np.logspace(np.log10(y_values_min), np.log10(y_values_max), contour_point_num)
else:
padding_y = (y_values_max - y_values_min) * AXES_PADDING_RATIO
y_values_min -= padding_y
y_values_max += padding_y
yi = np.linspace(y_values_min, y_values_max, contour_point_num)
# create irregularly spaced map of trial values
# and interpolate it with Plotly's interpolation formulation
if x_param != y_param:
zmap = _create_zmap(x_values, y_values, z_values, xi, yi)
zi = _interpolate_zmap(zmap, contour_point_num)
return (
xi,
yi,
zi,
x_values,
y_values,
[x_values_min, x_values_max],
[y_values_min, y_values_max],
cat_param_pos_x,
cat_param_labels_x,
cat_param_pos_y,
cat_param_labels_y,
len(x_values_dummy),
len(y_values_dummy),
)
def _generate_contour_subplot(
trials: List[FrozenTrial],
x_param: str,
y_param: str,
ax: "Axes",
cmap: "Colormap",
contour_point_num: int,
target: Optional[Callable[[FrozenTrial], float]],
) -> "ContourSet":
x_indices = sorted(set(_get_param_values(trials, x_param)))
y_indices = sorted(set(_get_param_values(trials, y_param)))
if len(x_indices) < 2:
_logger.warning("Param {} unique value length is less than 2.".format(x_param))
return ax
if len(y_indices) < 2:
_logger.warning("Param {} unique value length is less than 2.".format(y_param))
return ax
(
xi,
yi,
zi,
x_values,
y_values,
x_values_range,
y_values_range,
x_cat_param_pos,
x_cat_param_label,
y_cat_param_pos,
y_cat_param_label,
x_values_dummy_count,
y_values_dummy_count,
) = _calculate_griddata(
trials, x_param, x_indices, y_param, y_indices, contour_point_num, target
)
cs = None
ax.set(xlabel=x_param, ylabel=y_param)
ax.set_xlim(x_values_range[0], x_values_range[1])
ax.set_ylim(y_values_range[0], y_values_range[1])
if len(zi) > 0:
if _is_log_scale(trials, x_param):
ax.set_xscale("log")
if _is_log_scale(trials, y_param):
ax.set_yscale("log")
if x_param != y_param:
# Contour the gridded data.
ax.contour(xi, yi, zi, 15, linewidths=0.5, colors="k")
cs = ax.contourf(xi, yi, zi, 15, cmap=cmap.reversed())
# Plot data points.
if x_values_dummy_count > 0:
x_org_len = int(len(x_values) / (x_values_dummy_count + 1))
y_org_len = int(len(y_values) / (x_values_dummy_count + 1))
elif y_values_dummy_count > 0:
x_org_len = int(len(x_values) / (y_values_dummy_count + 1))
y_org_len = int(len(y_values) / (y_values_dummy_count + 1))
else:
x_org_len = len(x_values)
y_org_len = len(x_values)
ax.scatter(
x_values[:x_org_len],
y_values[:y_org_len],
marker="o",
c="black",
s=20,
edgecolors="grey",
linewidth=2.0,
)
if x_cat_param_pos:
ax.set_xticks(x_cat_param_pos)
ax.set_xticklabels(x_cat_param_label)
if y_cat_param_pos:
ax.set_yticks(y_cat_param_pos)
ax.set_yticklabels(y_cat_param_label)
ax.label_outer()
return cs
def _create_zmap(
x_values: List[Union[int, float]],
y_values: List[Union[int, float]],
z_values: List[float],
xi: np.ndarray,
yi: np.ndarray,
) -> Dict[Tuple[int, int], float]:
# creates z-map from trial values and params.
# z-map is represented by hashmap of coordinate and trial value pairs
#
# coordinates are represented by tuple of integers, where the first item
# indicates x-axis index and the second item indicates y-axis index
# and refer to a position of trial value on irregular param grid
#
# since params were resampled either with linspace or logspace
# original params might not be on the x and y axes anymore
# so we are going with close approximations of trial value positions
zmap = dict()
for x, y, z in zip(x_values, y_values, z_values):
xindex = int(np.argmin(np.abs(xi - x)))
yindex = int(np.argmin(np.abs(yi - y)))
zmap[(xindex, yindex)] = z
return zmap
def _interpolate_zmap(zmap: Dict[Tuple[int, int], float], contour_plot_num: int) -> np.ndarray:
# implements interpolation formulation used in Plotly
# to interpolate heatmaps and contour plots
# https://github.com/plotly/plotly.js/blob/master/src/traces/heatmap/interp2d.js#L30
# citing their doc:
#
# > Fill in missing data from a 2D array using an iterative
# > poisson equation solver with zero-derivative BC at edges.
# > Amazingly, this just amounts to repeatedly averaging all the existing
# > nearest neighbors
#
# Plotly's algorithm is equivalent to solve the following linear simultaneous equation.
# It is discretization form of the Poisson equation.
#
# z[x, y] = zmap[(x, y)] (if zmap[(x, y)] is given)
# 4 * z[x, y] = z[x-1, y] + z[x+1, y] + z[x, y-1] + z[x, y+1] (if zmap[(x, y)] is not given)
a_data = []
a_row = []
a_col = []
b = np.zeros(contour_plot_num**2)
for x in range(contour_plot_num):
for y in range(contour_plot_num):
grid_index = y * contour_plot_num + x
if (x, y) in zmap:
a_data.append(1)
a_row.append(grid_index)
a_col.append(grid_index)
b[grid_index] = zmap[(x, y)]
else:
for dx, dy in ((-1, 0), (1, 0), (0, -1), (0, 1)):
if 0 <= x + dx < contour_plot_num and 0 <= y + dy < contour_plot_num:
a_data.append(1)
a_row.append(grid_index)
a_col.append(grid_index)
a_data.append(-1)
a_row.append(grid_index)
a_col.append(grid_index + dy * contour_plot_num + dx)
z = scipy.sparse.linalg.spsolve(scipy.sparse.csc_matrix((a_data, (a_row, a_col))), b)
return z.reshape((contour_plot_num, contour_plot_num))
| 34.82218
| 99
| 0.621623
|
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import scipy
from optuna._experimental import experimental
from optuna.logging import get_logger
from optuna.study import Study
from optuna.study import StudyDirection
from optuna.trial import FrozenTrial
from optuna.trial import TrialState
from optuna.visualization._utils import _check_plot_args
from optuna.visualization._utils import _get_param_values
from optuna.visualization.matplotlib._matplotlib_imports import _imports
from optuna.visualization.matplotlib._utils import _is_log_scale
from optuna.visualization.matplotlib._utils import _is_numerical
if _imports.is_successful():
from optuna.visualization.matplotlib._matplotlib_imports import Axes
from optuna.visualization.matplotlib._matplotlib_imports import Colormap
from optuna.visualization.matplotlib._matplotlib_imports import ContourSet
from optuna.visualization.matplotlib._matplotlib_imports import plt
_logger = get_logger(__name__)
AXES_PADDING_RATIO = 5e-2
@experimental("2.2.0")
def plot_contour(
study: Study,
params: Optional[List[str]] = None,
*,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
) -> "Axes":
_imports.check()
_check_plot_args(study, target, target_name)
_logger.warning(
"Output figures of this Matplotlib-based `plot_contour` function would be different from "
"those of the Plotly-based `plot_contour`."
)
return _get_contour_plot(study, params, target, target_name)
def _get_contour_plot(
study: Study,
params: Optional[List[str]] = None,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
) -> "Axes":
trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Your study does not have any completed trials.")
_, ax = plt.subplots()
return ax
all_params = {p_name for t in trials for p_name in t.params.keys()}
if params is None:
sorted_params = sorted(all_params)
elif len(params) <= 1:
_logger.warning("The length of params must be greater than 1.")
_, ax = plt.subplots()
return ax
else:
for input_p_name in params:
if input_p_name not in all_params:
raise ValueError("Parameter {} does not exist in your study.".format(input_p_name))
sorted_params = sorted(set(params))
n_params = len(sorted_params)
plt.style.use("ggplot")
if n_params == 2:
fig, axs = plt.subplots()
axs.set_title("Contour Plot")
cmap = _set_cmap(study, target)
contour_point_num = 100
if params:
x_param = params[0]
y_param = params[1]
else:
x_param = sorted_params[0]
y_param = sorted_params[1]
cs = _generate_contour_subplot(
trials, x_param, y_param, axs, cmap, contour_point_num, target
)
if isinstance(cs, ContourSet):
axcb = fig.colorbar(cs)
axcb.set_label(target_name)
else:
fig, axs = plt.subplots(n_params, n_params)
fig.suptitle("Contour Plot")
cmap = _set_cmap(study, target)
contour_point_num = 100
cs_list = []
for x_i, x_param in enumerate(sorted_params):
for y_i, y_param in enumerate(sorted_params):
ax = axs[y_i, x_i]
cs = _generate_contour_subplot(
trials, x_param, y_param, ax, cmap, contour_point_num, target
)
if isinstance(cs, ContourSet):
cs_list.append(cs)
if cs_list:
axcb = fig.colorbar(cs_list[0], ax=axs)
axcb.set_label(target_name)
return axs
def _set_cmap(study: Study, target: Optional[Callable[[FrozenTrial], float]]) -> "Colormap":
cmap = "Blues_r" if target is None and study.direction == StudyDirection.MAXIMIZE else "Blues"
return plt.get_cmap(cmap)
class _LabelEncoder:
def __init__(self) -> None:
self.labels: List[str] = []
def fit(self, labels: List[str]) -> "_LabelEncoder":
self.labels = sorted(set(labels))
return self
def transform(self, labels: List[str]) -> List[int]:
return [self.labels.index(label) for label in labels]
def fit_transform(self, labels: List[str]) -> List[int]:
return self.fit(labels).transform(labels)
def get_labels(self) -> List[str]:
return self.labels
def get_indices(self) -> List[int]:
return list(range(len(self.labels)))
def _calculate_griddata(
trials: List[FrozenTrial],
x_param: str,
x_indices: List[Union[str, int, float]],
y_param: str,
y_indices: List[Union[str, int, float]],
contour_point_num: int,
target: Optional[Callable[[FrozenTrial], float]],
) -> Tuple[
np.ndarray,
np.ndarray,
np.ndarray,
List[Union[int, float]],
List[Union[int, float]],
List[Union[int, float]],
List[Union[int, float]],
List[int],
List[str],
List[int],
List[str],
int,
int,
]:
x_values = []
y_values = []
z_values = []
x_range_values = []
y_range_values = []
for trial in trials:
contains_x_param = x_param in trial.params
if contains_x_param:
x_range_values.append(trial.params[x_param])
contains_y_param = y_param in trial.params
if contains_y_param:
y_range_values.append(trial.params[y_param])
if not contains_x_param or not contains_y_param:
continue
x_values.append(trial.params[x_param])
y_values.append(trial.params[y_param])
if target is None:
value = trial.value
else:
value = target(trial)
if isinstance(value, int):
value = float(value)
elif not isinstance(value, float):
raise ValueError(
"Trial{} has COMPLETE state, but its target value is non-numeric.".format(
trial.number
)
)
z_values.append(value)
if len(x_values) == 0 or len(y_values) == 0:
return (
np.array([]),
np.array([]),
np.array([]),
x_values,
y_values,
[],
[],
[],
[],
[],
[],
0,
0,
)
x_values_dummy = []
y_values_dummy = []
if len(set(x_values)) == 1:
x_values_dummy = [x for x in x_indices if x not in x_values]
x_values = x_values + x_values_dummy * len(x_values)
y_values = y_values + (y_values * len(x_values_dummy))
z_values = z_values + (z_values * len(x_values_dummy))
if len(set(y_values)) == 1:
y_values_dummy = [y for y in y_indices if y not in y_values]
y_values = y_values + y_values_dummy * len(y_values)
x_values = x_values + (x_values * len(y_values_dummy))
z_values = z_values + (z_values * len(y_values_dummy))
cat_param_labels_x = []
cat_param_pos_x = []
cat_param_labels_y = []
cat_param_pos_y = []
if not _is_numerical(trials, x_param):
enc = _LabelEncoder()
x_range_values = enc.fit_transform(list(map(str, x_range_values)))
x_values = enc.transform(list(map(str, x_values)))
cat_param_labels_x = enc.get_labels()
cat_param_pos_x = enc.get_indices()
if not _is_numerical(trials, y_param):
enc = _LabelEncoder()
y_range_values = enc.fit_transform(list(map(str, y_range_values)))
y_values = enc.transform(list(map(str, y_values)))
cat_param_labels_y = enc.get_labels()
cat_param_pos_y = enc.get_indices()
x_values_min = min(x_range_values)
x_values_max = max(x_range_values)
y_values_min = min(y_range_values)
y_values_max = max(y_range_values)
xi = np.array([])
yi = np.array([])
zi = np.array([])
if _is_log_scale(trials, x_param):
padding_x = (np.log10(x_values_max) - np.log10(x_values_min)) * AXES_PADDING_RATIO
x_values_min = np.power(10, np.log10(x_values_min) - padding_x)
x_values_max = np.power(10, np.log10(x_values_max) + padding_x)
xi = np.logspace(np.log10(x_values_min), np.log10(x_values_max), contour_point_num)
else:
padding_x = (x_values_max - x_values_min) * AXES_PADDING_RATIO
x_values_min -= padding_x
x_values_max += padding_x
xi = np.linspace(x_values_min, x_values_max, contour_point_num)
if _is_log_scale(trials, y_param):
padding_y = (np.log10(y_values_max) - np.log10(y_values_min)) * AXES_PADDING_RATIO
y_values_min = np.power(10, np.log10(y_values_min) - padding_y)
y_values_max = np.power(10, np.log10(y_values_max) + padding_y)
yi = np.logspace(np.log10(y_values_min), np.log10(y_values_max), contour_point_num)
else:
padding_y = (y_values_max - y_values_min) * AXES_PADDING_RATIO
y_values_min -= padding_y
y_values_max += padding_y
yi = np.linspace(y_values_min, y_values_max, contour_point_num)
if x_param != y_param:
zmap = _create_zmap(x_values, y_values, z_values, xi, yi)
zi = _interpolate_zmap(zmap, contour_point_num)
return (
xi,
yi,
zi,
x_values,
y_values,
[x_values_min, x_values_max],
[y_values_min, y_values_max],
cat_param_pos_x,
cat_param_labels_x,
cat_param_pos_y,
cat_param_labels_y,
len(x_values_dummy),
len(y_values_dummy),
)
def _generate_contour_subplot(
trials: List[FrozenTrial],
x_param: str,
y_param: str,
ax: "Axes",
cmap: "Colormap",
contour_point_num: int,
target: Optional[Callable[[FrozenTrial], float]],
) -> "ContourSet":
x_indices = sorted(set(_get_param_values(trials, x_param)))
y_indices = sorted(set(_get_param_values(trials, y_param)))
if len(x_indices) < 2:
_logger.warning("Param {} unique value length is less than 2.".format(x_param))
return ax
if len(y_indices) < 2:
_logger.warning("Param {} unique value length is less than 2.".format(y_param))
return ax
(
xi,
yi,
zi,
x_values,
y_values,
x_values_range,
y_values_range,
x_cat_param_pos,
x_cat_param_label,
y_cat_param_pos,
y_cat_param_label,
x_values_dummy_count,
y_values_dummy_count,
) = _calculate_griddata(
trials, x_param, x_indices, y_param, y_indices, contour_point_num, target
)
cs = None
ax.set(xlabel=x_param, ylabel=y_param)
ax.set_xlim(x_values_range[0], x_values_range[1])
ax.set_ylim(y_values_range[0], y_values_range[1])
if len(zi) > 0:
if _is_log_scale(trials, x_param):
ax.set_xscale("log")
if _is_log_scale(trials, y_param):
ax.set_yscale("log")
if x_param != y_param:
# Contour the gridded data.
ax.contour(xi, yi, zi, 15, linewidths=0.5, colors="k")
cs = ax.contourf(xi, yi, zi, 15, cmap=cmap.reversed())
# Plot data points.
if x_values_dummy_count > 0:
x_org_len = int(len(x_values) / (x_values_dummy_count + 1))
y_org_len = int(len(y_values) / (x_values_dummy_count + 1))
elif y_values_dummy_count > 0:
x_org_len = int(len(x_values) / (y_values_dummy_count + 1))
y_org_len = int(len(y_values) / (y_values_dummy_count + 1))
else:
x_org_len = len(x_values)
y_org_len = len(x_values)
ax.scatter(
x_values[:x_org_len],
y_values[:y_org_len],
marker="o",
c="black",
s=20,
edgecolors="grey",
linewidth=2.0,
)
if x_cat_param_pos:
ax.set_xticks(x_cat_param_pos)
ax.set_xticklabels(x_cat_param_label)
if y_cat_param_pos:
ax.set_yticks(y_cat_param_pos)
ax.set_yticklabels(y_cat_param_label)
ax.label_outer()
return cs
def _create_zmap(
x_values: List[Union[int, float]],
y_values: List[Union[int, float]],
z_values: List[float],
xi: np.ndarray,
yi: np.ndarray,
) -> Dict[Tuple[int, int], float]:
# creates z-map from trial values and params.
# z-map is represented by hashmap of coordinate and trial value pairs
#
# coordinates are represented by tuple of integers, where the first item
# indicates x-axis index and the second item indicates y-axis index
# and refer to a position of trial value on irregular param grid
#
# since params were resampled either with linspace or logspace
# original params might not be on the x and y axes anymore
# so we are going with close approximations of trial value positions
zmap = dict()
for x, y, z in zip(x_values, y_values, z_values):
xindex = int(np.argmin(np.abs(xi - x)))
yindex = int(np.argmin(np.abs(yi - y)))
zmap[(xindex, yindex)] = z
return zmap
def _interpolate_zmap(zmap: Dict[Tuple[int, int], float], contour_plot_num: int) -> np.ndarray:
# implements interpolation formulation used in Plotly
# to interpolate heatmaps and contour plots
# https://github.com/plotly/plotly.js/blob/master/src/traces/heatmap/interp2d.js#L30
# citing their doc:
#
# > Fill in missing data from a 2D array using an iterative
# > poisson equation solver with zero-derivative BC at edges.
# > Amazingly, this just amounts to repeatedly averaging all the existing
# > nearest neighbors
#
# Plotly's algorithm is equivalent to solve the following linear simultaneous equation.
a_data = []
a_row = []
a_col = []
b = np.zeros(contour_plot_num**2)
for x in range(contour_plot_num):
for y in range(contour_plot_num):
grid_index = y * contour_plot_num + x
if (x, y) in zmap:
a_data.append(1)
a_row.append(grid_index)
a_col.append(grid_index)
b[grid_index] = zmap[(x, y)]
else:
for dx, dy in ((-1, 0), (1, 0), (0, -1), (0, 1)):
if 0 <= x + dx < contour_plot_num and 0 <= y + dy < contour_plot_num:
a_data.append(1)
a_row.append(grid_index)
a_col.append(grid_index)
a_data.append(-1)
a_row.append(grid_index)
a_col.append(grid_index + dy * contour_plot_num + dx)
z = scipy.sparse.linalg.spsolve(scipy.sparse.csc_matrix((a_data, (a_row, a_col))), b)
return z.reshape((contour_plot_num, contour_plot_num))
| true
| true
|
f7098cc1020c0449102ed02912eddc4ceb9787ee
| 58
|
py
|
Python
|
hello world.py
|
RJ722/Repo-with-spaces
|
7efd9dcb35a760d7fd02ef88f9bdde3b26a846bb
|
[
"MIT"
] | null | null | null |
hello world.py
|
RJ722/Repo-with-spaces
|
7efd9dcb35a760d7fd02ef88f9bdde3b26a846bb
|
[
"MIT"
] | null | null | null |
hello world.py
|
RJ722/Repo-with-spaces
|
7efd9dcb35a760d7fd02ef88f9bdde3b26a846bb
|
[
"MIT"
] | null | null | null |
import this
def hello_world():
print("Hello World")
| 9.666667
| 24
| 0.672414
|
import this
def hello_world():
print("Hello World")
| true
| true
|
f7098d8278771772864212ad3ce454b1a9c954ea
| 710
|
py
|
Python
|
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/sysconfig/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | 1
|
2021-05-24T10:08:51.000Z
|
2021-05-24T10:08:51.000Z
|
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/sysconfig/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | null | null | null |
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/sysconfig/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | 1
|
2021-01-28T01:57:41.000Z
|
2021-01-28T01:57:41.000Z
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""System configuration library.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.framework.versions import CXX11_ABI_FLAG
from tensorflow.python.framework.versions import MONOLITHIC_BUILD
from tensorflow.python.platform.sysconfig import get_build_info
from tensorflow.python.platform.sysconfig import get_compile_flags
from tensorflow.python.platform.sysconfig import get_include
from tensorflow.python.platform.sysconfig import get_lib
from tensorflow.python.platform.sysconfig import get_link_flags
del _print_function
| 37.368421
| 82
| 0.856338
|
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.framework.versions import CXX11_ABI_FLAG
from tensorflow.python.framework.versions import MONOLITHIC_BUILD
from tensorflow.python.platform.sysconfig import get_build_info
from tensorflow.python.platform.sysconfig import get_compile_flags
from tensorflow.python.platform.sysconfig import get_include
from tensorflow.python.platform.sysconfig import get_lib
from tensorflow.python.platform.sysconfig import get_link_flags
del _print_function
| true
| true
|
f7098dfc12b93cf391e09b7933418a63cee34e7a
| 5,447
|
py
|
Python
|
pysnmp/CISCO-MGX82XX-MODULE-RSRC-PART-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/CISCO-MGX82XX-MODULE-RSRC-PART-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/CISCO-MGX82XX-MODULE-RSRC-PART-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module CISCO-MGX82XX-MODULE-RSRC-PART-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-MGX82XX-MODULE-RSRC-PART-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:50:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint")
cardGeneric, = mibBuilder.importSymbols("BASIS-MIB", "cardGeneric")
ciscoWan, = mibBuilder.importSymbols("CISCOWAN-SMI", "ciscoWan")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
Counter32, Unsigned32, TimeTicks, Counter64, ModuleIdentity, Gauge32, Integer32, NotificationType, IpAddress, ObjectIdentity, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Unsigned32", "TimeTicks", "Counter64", "ModuleIdentity", "Gauge32", "Integer32", "NotificationType", "IpAddress", "ObjectIdentity", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Bits")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoMgx82xxModuleRsrcPartMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 351, 150, 73))
ciscoMgx82xxModuleRsrcPartMIB.setRevisions(('2003-04-18 00:00',))
if mibBuilder.loadTexts: ciscoMgx82xxModuleRsrcPartMIB.setLastUpdated('200304180000Z')
if mibBuilder.loadTexts: ciscoMgx82xxModuleRsrcPartMIB.setOrganization('Cisco Systems, Inc.')
cardResourcePartition = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 110, 2, 9))
cardLcnPartitionType = MibScalar((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noPartition", 1), ("controllerBased", 2), ("portControllerBased", 3))).clone('noPartition')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cardLcnPartitionType.setStatus('current')
cardResPartGrpTable = MibTable((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2), )
if mibBuilder.loadTexts: cardResPartGrpTable.setStatus('current')
cardResPartGrpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2, 1), ).setIndexNames((0, "CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardResPartCtrlrNum"))
if mibBuilder.loadTexts: cardResPartGrpEntry.setStatus('current')
cardResPartCtrlrNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("par", 1), ("pnni", 2), ("tag", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cardResPartCtrlrNum.setStatus('current')
cardResPartRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("add", 1), ("del", 2), ("mod", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cardResPartRowStatus.setStatus('current')
cardResPartNumOfLcnAvail = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cardResPartNumOfLcnAvail.setStatus('current')
cmmRsrcPartMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 73, 2))
cmmRsrcPartMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 73, 2, 1))
cmmRsrcPartMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 73, 2, 2))
cmmRsrcPartCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 351, 150, 73, 2, 1, 1)).setObjects(("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cmmRsrcPartGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cmmRsrcPartCompliance = cmmRsrcPartCompliance.setStatus('current')
cmmRsrcPartGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 73, 2, 2, 1)).setObjects(("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardLcnPartitionType"), ("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardResPartCtrlrNum"), ("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardResPartRowStatus"), ("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardResPartNumOfLcnAvail"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cmmRsrcPartGroup = cmmRsrcPartGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", cardResPartGrpTable=cardResPartGrpTable, ciscoMgx82xxModuleRsrcPartMIB=ciscoMgx82xxModuleRsrcPartMIB, cmmRsrcPartMIBConformance=cmmRsrcPartMIBConformance, cmmRsrcPartMIBCompliances=cmmRsrcPartMIBCompliances, cmmRsrcPartGroup=cmmRsrcPartGroup, cardResPartNumOfLcnAvail=cardResPartNumOfLcnAvail, cardResourcePartition=cardResourcePartition, cmmRsrcPartMIBGroups=cmmRsrcPartMIBGroups, cmmRsrcPartCompliance=cmmRsrcPartCompliance, cardResPartRowStatus=cardResPartRowStatus, cardResPartCtrlrNum=cardResPartCtrlrNum, cardLcnPartitionType=cardLcnPartitionType, PYSNMP_MODULE_ID=ciscoMgx82xxModuleRsrcPartMIB, cardResPartGrpEntry=cardResPartGrpEntry)
| 123.795455
| 705
| 0.77382
|
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint")
cardGeneric, = mibBuilder.importSymbols("BASIS-MIB", "cardGeneric")
ciscoWan, = mibBuilder.importSymbols("CISCOWAN-SMI", "ciscoWan")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
Counter32, Unsigned32, TimeTicks, Counter64, ModuleIdentity, Gauge32, Integer32, NotificationType, IpAddress, ObjectIdentity, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Unsigned32", "TimeTicks", "Counter64", "ModuleIdentity", "Gauge32", "Integer32", "NotificationType", "IpAddress", "ObjectIdentity", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Bits")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoMgx82xxModuleRsrcPartMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 351, 150, 73))
ciscoMgx82xxModuleRsrcPartMIB.setRevisions(('2003-04-18 00:00',))
if mibBuilder.loadTexts: ciscoMgx82xxModuleRsrcPartMIB.setLastUpdated('200304180000Z')
if mibBuilder.loadTexts: ciscoMgx82xxModuleRsrcPartMIB.setOrganization('Cisco Systems, Inc.')
cardResourcePartition = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 110, 2, 9))
cardLcnPartitionType = MibScalar((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noPartition", 1), ("controllerBased", 2), ("portControllerBased", 3))).clone('noPartition')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cardLcnPartitionType.setStatus('current')
cardResPartGrpTable = MibTable((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2), )
if mibBuilder.loadTexts: cardResPartGrpTable.setStatus('current')
cardResPartGrpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2, 1), ).setIndexNames((0, "CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardResPartCtrlrNum"))
if mibBuilder.loadTexts: cardResPartGrpEntry.setStatus('current')
cardResPartCtrlrNum = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("par", 1), ("pnni", 2), ("tag", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cardResPartCtrlrNum.setStatus('current')
cardResPartRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("add", 1), ("del", 2), ("mod", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cardResPartRowStatus.setStatus('current')
cardResPartNumOfLcnAvail = MibTableColumn((1, 3, 6, 1, 4, 1, 351, 110, 2, 9, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cardResPartNumOfLcnAvail.setStatus('current')
cmmRsrcPartMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 73, 2))
cmmRsrcPartMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 73, 2, 1))
cmmRsrcPartMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 351, 150, 73, 2, 2))
cmmRsrcPartCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 351, 150, 73, 2, 1, 1)).setObjects(("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cmmRsrcPartGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cmmRsrcPartCompliance = cmmRsrcPartCompliance.setStatus('current')
cmmRsrcPartGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 351, 150, 73, 2, 2, 1)).setObjects(("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardLcnPartitionType"), ("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardResPartCtrlrNum"), ("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardResPartRowStatus"), ("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", "cardResPartNumOfLcnAvail"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cmmRsrcPartGroup = cmmRsrcPartGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-MGX82XX-MODULE-RSRC-PART-MIB", cardResPartGrpTable=cardResPartGrpTable, ciscoMgx82xxModuleRsrcPartMIB=ciscoMgx82xxModuleRsrcPartMIB, cmmRsrcPartMIBConformance=cmmRsrcPartMIBConformance, cmmRsrcPartMIBCompliances=cmmRsrcPartMIBCompliances, cmmRsrcPartGroup=cmmRsrcPartGroup, cardResPartNumOfLcnAvail=cardResPartNumOfLcnAvail, cardResourcePartition=cardResourcePartition, cmmRsrcPartMIBGroups=cmmRsrcPartMIBGroups, cmmRsrcPartCompliance=cmmRsrcPartCompliance, cardResPartRowStatus=cardResPartRowStatus, cardResPartCtrlrNum=cardResPartCtrlrNum, cardLcnPartitionType=cardLcnPartitionType, PYSNMP_MODULE_ID=ciscoMgx82xxModuleRsrcPartMIB, cardResPartGrpEntry=cardResPartGrpEntry)
| true
| true
|
f7098ebb8ff1dfc1223860cb85b7c2ab64d03675
| 5,733
|
py
|
Python
|
mlpipeline_analyzer/visualizer/PipelineDiagram.py
|
sravankr96/ml-pipeline-analyzer-ws
|
6e6e336f2172643fdeb8034ea324362841dcade1
|
[
"MIT"
] | null | null | null |
mlpipeline_analyzer/visualizer/PipelineDiagram.py
|
sravankr96/ml-pipeline-analyzer-ws
|
6e6e336f2172643fdeb8034ea324362841dcade1
|
[
"MIT"
] | null | null | null |
mlpipeline_analyzer/visualizer/PipelineDiagram.py
|
sravankr96/ml-pipeline-analyzer-ws
|
6e6e336f2172643fdeb8034ea324362841dcade1
|
[
"MIT"
] | null | null | null |
from diagrams import Cluster, Diagram
from graphviz import Digraph
from .PipelineNode import PipelineNode
import sklearn
from sklearn import *
import regex as re
import warnings
#warnings.filterwarnings("ignore")
class PipelineDiagram:
def __init__(self, pipeline, file_name='ml_pipeline.png'):
self.pipe = pipeline
self.title = 'Machine Learning Pipeline'
self.title_param = 'Machine Learning Parameters Pipeline'
self.view = True
self.file_name = file_name
self.cn = PipelineNode()
def show(self, title=None):
self.title = title if title else self.title
self.pipe_len = len(list(self.pipe))
return self.create_diagram()
def show_params(self, title=None):
self.title_param = title if title else self.title_param
return self.create_param_diagram()
@staticmethod
def parent_classes(level=0, base='sklearn'):
if level != 0:
base = 'sklearn.' + base
return list(filter(lambda x: not re.search(r'^_.*', x), dir(eval(base))))
def all_classes(self):
l = self.parent_classes()
for i in self.parent_classes():
try:
eval(i)
except:
l.remove(i)
class_list = {x: [eval('sklearn.' + x + '.' + y) for y in self.parent_classes(1, x)] for x in l}
return class_list
def get_link(self, path):
reg = re.findall(r"'(.*)'", str(path))[0]
link = 'https://scikit-learn.org/stable/modules/generated/{0}.html'.format(re.sub("".join(re.findall(r'\.(_.*\.)',reg)),'',reg))
return link
def find_category(self, obj):
temp = self.all_classes()
try:
comp = str(type(obj)).split('.')[1]
if type(obj) in temp[comp] and comp!='pipeline':
return (comp, obj, self.get_link(type(obj)))
if comp=='pipeline':
return list(map(self.find_category, [x[1] for x in obj.transformer_list]))
except:
return ('Custom Function', obj, 'Function')
def find_category_params(self, obj):
try:
comp = str(type(obj)).split('.')[1]
if comp!='pipeline':
return (obj, self.get_param(obj))
if comp=='pipeline':
return list(map(self.find_category_params, [x[1] for x in obj.transformer_list]))
except:
return (obj, 'Custom Function')
def get_param(self, obj):
try:
s = list(obj.get_params().items())
reg = re.sub(r"(,\s)\'","\l'",str(dict(filter(lambda x: '__' not in x[0] , s))))
return re.sub('(\(.*\))', '', str(obj))+'\n\n'+re.sub('{|}', '', reg)
except:
return str(obj)
def all_params(self):
return list(map(self.find_category_params, self.pipe))
def all_categories(self):
return list(map(self.find_category, self.pipe))
def create_diagram(self):
with Diagram(self.title, show=False, filename=self.file_name) as pipe_diag:
inputs = [("data","Train Data"), ("data", "Validation Data"), ("data","Test Data")]
start = self.create_cluster("Input Data", inputs) >> self.cn.create_node(("Data Stream","Data Stream"))
self.traverse_pipeline(start)
return pipe_diag
def create_param_diagram(self):
self.g = Digraph('G', filename='ml_pipeline_params.gv')
self.g.graph_attr["rankdir"] = "LR"
self.create_cluster_params('Inputs', ['Train Data', 'Validation Data', 'Test Data'])
#self.g.edge('input','streamin')
#self.g.edge('streamout','Model')
self.traverse_pipeline_params()
self.g.view()
return self
def traverse_pipeline(self, curr):
self.descriptions = list(self.all_categories())
for i in self.descriptions:
if type(i) == list:
curr = curr >> self.create_cluster("Transformers", i)
else:
curr = curr >> self.cn.create_node(i)
return curr
def traverse_pipeline_params(self):
self.params = self.all_params()
for i in self.params:
if type(i) == list:
self.create_cluster_params('Transformers', [x[1] for x in i])
else:
self.g.node(str(i[0]), label=i[1], shape='box')
self.g.edge(self.input, str(i[0]))
self.input = str(i[0])
return self
def create_cluster(self, cluster_name, node_names):
with Cluster(cluster_name):
return list(map(self.cn.create_node, node_names))
def create_cluster_params(self, cluster_name, node_names):
with self.g.subgraph(name='cluster_'+cluster_name) as c:
inlabel = 'streamin_' + cluster_name
outlabel = 'streamout_' + cluster_name
c.attr(style='filled', color='green', URL='https://stackoverflow.com')
c.node_attr.update(style='filled', color='white')
c.node(outlabel, label='Stream', shape='box')
if cluster_name != 'Inputs':
c.node(inlabel, label='Stream', shape='box')
self.g.edge(self.input, inlabel)
c.node(outlabel, label='Union', shape='box')
for i in range(len(node_names)):
c.node(cluster_name+str(i), label=node_names[i], shape='box')
if cluster_name!='Inputs':
c.edge(inlabel, str(cluster_name+str(i)))
c.edge(cluster_name+str(i), outlabel)
self.input = outlabel
c.attr(label=cluster_name, URL='https://stackoverflow.com')
| 39.267123
| 136
| 0.570382
|
from diagrams import Cluster, Diagram
from graphviz import Digraph
from .PipelineNode import PipelineNode
import sklearn
from sklearn import *
import regex as re
import warnings
class PipelineDiagram:
def __init__(self, pipeline, file_name='ml_pipeline.png'):
self.pipe = pipeline
self.title = 'Machine Learning Pipeline'
self.title_param = 'Machine Learning Parameters Pipeline'
self.view = True
self.file_name = file_name
self.cn = PipelineNode()
def show(self, title=None):
self.title = title if title else self.title
self.pipe_len = len(list(self.pipe))
return self.create_diagram()
def show_params(self, title=None):
self.title_param = title if title else self.title_param
return self.create_param_diagram()
@staticmethod
def parent_classes(level=0, base='sklearn'):
if level != 0:
base = 'sklearn.' + base
return list(filter(lambda x: not re.search(r'^_.*', x), dir(eval(base))))
def all_classes(self):
l = self.parent_classes()
for i in self.parent_classes():
try:
eval(i)
except:
l.remove(i)
class_list = {x: [eval('sklearn.' + x + '.' + y) for y in self.parent_classes(1, x)] for x in l}
return class_list
def get_link(self, path):
reg = re.findall(r"'(.*)'", str(path))[0]
link = 'https://scikit-learn.org/stable/modules/generated/{0}.html'.format(re.sub("".join(re.findall(r'\.(_.*\.)',reg)),'',reg))
return link
def find_category(self, obj):
temp = self.all_classes()
try:
comp = str(type(obj)).split('.')[1]
if type(obj) in temp[comp] and comp!='pipeline':
return (comp, obj, self.get_link(type(obj)))
if comp=='pipeline':
return list(map(self.find_category, [x[1] for x in obj.transformer_list]))
except:
return ('Custom Function', obj, 'Function')
def find_category_params(self, obj):
try:
comp = str(type(obj)).split('.')[1]
if comp!='pipeline':
return (obj, self.get_param(obj))
if comp=='pipeline':
return list(map(self.find_category_params, [x[1] for x in obj.transformer_list]))
except:
return (obj, 'Custom Function')
def get_param(self, obj):
try:
s = list(obj.get_params().items())
reg = re.sub(r"(,\s)\'","\l'",str(dict(filter(lambda x: '__' not in x[0] , s))))
return re.sub('(\(.*\))', '', str(obj))+'\n\n'+re.sub('{|}', '', reg)
except:
return str(obj)
def all_params(self):
return list(map(self.find_category_params, self.pipe))
def all_categories(self):
return list(map(self.find_category, self.pipe))
def create_diagram(self):
with Diagram(self.title, show=False, filename=self.file_name) as pipe_diag:
inputs = [("data","Train Data"), ("data", "Validation Data"), ("data","Test Data")]
start = self.create_cluster("Input Data", inputs) >> self.cn.create_node(("Data Stream","Data Stream"))
self.traverse_pipeline(start)
return pipe_diag
def create_param_diagram(self):
self.g = Digraph('G', filename='ml_pipeline_params.gv')
self.g.graph_attr["rankdir"] = "LR"
self.create_cluster_params('Inputs', ['Train Data', 'Validation Data', 'Test Data'])
self.traverse_pipeline_params()
self.g.view()
return self
def traverse_pipeline(self, curr):
self.descriptions = list(self.all_categories())
for i in self.descriptions:
if type(i) == list:
curr = curr >> self.create_cluster("Transformers", i)
else:
curr = curr >> self.cn.create_node(i)
return curr
def traverse_pipeline_params(self):
self.params = self.all_params()
for i in self.params:
if type(i) == list:
self.create_cluster_params('Transformers', [x[1] for x in i])
else:
self.g.node(str(i[0]), label=i[1], shape='box')
self.g.edge(self.input, str(i[0]))
self.input = str(i[0])
return self
def create_cluster(self, cluster_name, node_names):
with Cluster(cluster_name):
return list(map(self.cn.create_node, node_names))
def create_cluster_params(self, cluster_name, node_names):
with self.g.subgraph(name='cluster_'+cluster_name) as c:
inlabel = 'streamin_' + cluster_name
outlabel = 'streamout_' + cluster_name
c.attr(style='filled', color='green', URL='https://stackoverflow.com')
c.node_attr.update(style='filled', color='white')
c.node(outlabel, label='Stream', shape='box')
if cluster_name != 'Inputs':
c.node(inlabel, label='Stream', shape='box')
self.g.edge(self.input, inlabel)
c.node(outlabel, label='Union', shape='box')
for i in range(len(node_names)):
c.node(cluster_name+str(i), label=node_names[i], shape='box')
if cluster_name!='Inputs':
c.edge(inlabel, str(cluster_name+str(i)))
c.edge(cluster_name+str(i), outlabel)
self.input = outlabel
c.attr(label=cluster_name, URL='https://stackoverflow.com')
| true
| true
|
f7098fb90676aab0b7bddddef5e52fc6f77ed958
| 1,642
|
py
|
Python
|
sdks/python/apache_beam/typehints/row_type.py
|
NarimanAB/beam
|
6cedbac5bb42304f4af88634edd276b0b78e4e4e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 5,279
|
2016-12-29T04:00:44.000Z
|
2022-03-31T22:56:45.000Z
|
sdks/python/apache_beam/typehints/row_type.py
|
NarimanAB/beam
|
6cedbac5bb42304f4af88634edd276b0b78e4e4e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 14,149
|
2016-12-28T00:43:50.000Z
|
2022-03-31T23:50:22.000Z
|
sdks/python/apache_beam/typehints/row_type.py
|
NarimanAB/beam
|
6cedbac5bb42304f4af88634edd276b0b78e4e4e
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 3,763
|
2016-12-29T04:06:10.000Z
|
2022-03-31T22:25:49.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from apache_beam.typehints import typehints
class RowTypeConstraint(typehints.TypeConstraint):
def __init__(self, fields):
self._fields = tuple(fields)
def _consistent_with_check_(self, sub):
return self == sub
def type_check(self, instance):
from apache_beam import Row
return isinstance(instance, Row)
def _inner_types(self):
"""Iterates over the inner types of the composite type."""
return [field[1] for field in self._fields]
def __eq__(self, other):
return type(self) == type(other) and self._fields == other._fields
def __hash__(self):
return hash(self._fields)
def __repr__(self):
return 'Row(%s)' % ', '.join(
'%s=%s' % (name, typehints._unified_repr(t)) for name,
t in self._fields)
def get_type_for(self, name):
return dict(self._fields)[name]
| 32.196078
| 74
| 0.727771
|
from apache_beam.typehints import typehints
class RowTypeConstraint(typehints.TypeConstraint):
def __init__(self, fields):
self._fields = tuple(fields)
def _consistent_with_check_(self, sub):
return self == sub
def type_check(self, instance):
from apache_beam import Row
return isinstance(instance, Row)
def _inner_types(self):
return [field[1] for field in self._fields]
def __eq__(self, other):
return type(self) == type(other) and self._fields == other._fields
def __hash__(self):
return hash(self._fields)
def __repr__(self):
return 'Row(%s)' % ', '.join(
'%s=%s' % (name, typehints._unified_repr(t)) for name,
t in self._fields)
def get_type_for(self, name):
return dict(self._fields)[name]
| true
| true
|
f70990d5f14342ae48272d9f8af48d74029ae394
| 18,002
|
py
|
Python
|
external-deps/python-lsp-server/test/plugins/test_completion.py
|
Earthman100/spyder
|
949ce0f9100a69504c70a5678e8589a05aee7d38
|
[
"MIT"
] | 493
|
2021-04-11T19:38:09.000Z
|
2022-03-31T16:24:55.000Z
|
external-deps/python-lsp-server/test/plugins/test_completion.py
|
Earthman100/spyder
|
949ce0f9100a69504c70a5678e8589a05aee7d38
|
[
"MIT"
] | 134
|
2021-04-10T00:09:00.000Z
|
2022-03-31T06:41:05.000Z
|
external-deps/python-lsp-server/test/plugins/test_completion.py
|
Earthman100/spyder
|
949ce0f9100a69504c70a5678e8589a05aee7d38
|
[
"MIT"
] | 69
|
2021-04-14T21:09:17.000Z
|
2022-03-30T05:55:38.000Z
|
# Copyright 2017-2020 Palantir Technologies, Inc.
# Copyright 2021- Python Language Server Contributors.
import math
import os
import sys
from pathlib import Path
from typing import NamedTuple, Dict
import pytest
from pylsp import uris, lsp
from pylsp.workspace import Document
from pylsp.plugins.jedi_completion import pylsp_completions as pylsp_jedi_completions
from pylsp.plugins.jedi_completion import pylsp_completion_item_resolve as pylsp_jedi_completion_item_resolve
from pylsp.plugins.rope_completion import pylsp_completions as pylsp_rope_completions
from pylsp._utils import JEDI_VERSION
PY2 = sys.version[0] == '2'
LINUX = sys.platform.startswith('linux')
CI = os.environ.get('CI')
LOCATION = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__))
)
DOC_URI = uris.from_fs_path(__file__)
DOC = """import os
print os.path.isabs("/tmp")
def hello():
pass
def _a_hello():
pass
class Hello():
@property
def world(self):
return None
def everyone(self, a, b, c=None, d=2):
pass
print Hello().world
print Hello().every
def documented_hello():
\"\"\"Sends a polite greeting\"\"\"
pass
"""
def test_rope_import_completion(config, workspace):
com_position = {'line': 0, 'character': 7}
doc = Document(DOC_URI, workspace, DOC)
items = pylsp_rope_completions(config, workspace, doc, com_position)
assert items is None
class TypeCase(NamedTuple):
document: str
position: dict
label: str
expected: lsp.CompletionItemKind
TYPE_CASES: Dict[str, TypeCase] = {
'variable': TypeCase(
document='test = 1\ntes',
position={'line': 1, 'character': 3},
label='test',
expected=lsp.CompletionItemKind.Variable
),
'function': TypeCase(
document='def test():\n pass\ntes',
position={'line': 2, 'character': 3},
label='test()',
expected=lsp.CompletionItemKind.Function
),
'keyword': TypeCase(
document='fro',
position={'line': 0, 'character': 3},
label='from',
expected=lsp.CompletionItemKind.Keyword
),
'file': TypeCase(
document='"' + __file__[:-2].replace('"', '\\"') + '"',
position={'line': 0, 'character': len(__file__) - 2},
label=Path(__file__).name + '"',
expected=lsp.CompletionItemKind.File
),
'module': TypeCase(
document='import statis',
position={'line': 0, 'character': 13},
label='statistics',
expected=lsp.CompletionItemKind.Module
),
'class': TypeCase(
document='KeyErr',
position={'line': 0, 'character': 6},
label='KeyError',
expected=lsp.CompletionItemKind.Class
),
'property': TypeCase(
document=(
'class A:\n'
' @property\n'
' def test(self):\n'
' pass\n'
'A().tes'
),
position={'line': 4, 'character': 5},
label='test',
expected=lsp.CompletionItemKind.Property
)
}
@pytest.mark.parametrize('case', list(TYPE_CASES.values()), ids=list(TYPE_CASES.keys()))
def test_jedi_completion_type(case, config, workspace):
# property support was introduced in 0.18
if case.expected == lsp.CompletionItemKind.Property and JEDI_VERSION.startswith('0.17'):
return
doc = Document(DOC_URI, workspace, case.document)
items = pylsp_jedi_completions(config, doc, case.position)
items = {i['label']: i for i in items}
assert items[case.label]['kind'] == case.expected
def test_jedi_completion(config, workspace):
# Over 'i' in os.path.isabs(...)
com_position = {'line': 1, 'character': 15}
doc = Document(DOC_URI, workspace, DOC)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
labels = [i['label'] for i in items]
assert 'isfile(path)' in labels
# Test we don't throw with big character
pylsp_jedi_completions(config, doc, {'line': 1, 'character': 1000})
def test_jedi_completion_item_resolve(config, workspace):
# Over the blank line
com_position = {'line': 8, 'character': 0}
doc = Document(DOC_URI, workspace, DOC)
config.update({'plugins': {'jedi_completion': {'resolve_at_most': math.inf}}})
completions = pylsp_jedi_completions(config, doc, com_position)
items = {c['label']: c for c in completions}
documented_hello_item = items['documented_hello()']
assert 'documentation' not in documented_hello_item
assert 'detail' not in documented_hello_item
resolved_documented_hello = pylsp_jedi_completion_item_resolve(
completion_item=documented_hello_item,
document=doc
)
assert 'Sends a polite greeting' in resolved_documented_hello['documentation']
def test_jedi_completion_with_fuzzy_enabled(config, workspace):
# Over 'i' in os.path.isabs(...)
config.update({'plugins': {'jedi_completion': {'fuzzy': True}}})
com_position = {'line': 1, 'character': 15}
doc = Document(DOC_URI, workspace, DOC)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
expected = 'commonprefix(m)'
if JEDI_VERSION == '0.18.0':
expected = 'commonprefix(list)'
assert items[0]['label'] == expected
# Test we don't throw with big character
pylsp_jedi_completions(config, doc, {'line': 1, 'character': 1000})
def test_jedi_completion_resolve_at_most(config, workspace):
# Over 'i' in os.path.isabs(...)
com_position = {'line': 1, 'character': 15}
doc = Document(DOC_URI, workspace, DOC)
# Do not resolve any labels
config.update({'plugins': {'jedi_completion': {'resolve_at_most': 0}}})
items = pylsp_jedi_completions(config, doc, com_position)
labels = {i['label'] for i in items}
assert 'isabs' in labels
# Resolve all items
config.update({'plugins': {'jedi_completion': {'resolve_at_most': math.inf}}})
items = pylsp_jedi_completions(config, doc, com_position)
labels = {i['label'] for i in items}
assert 'isfile(path)' in labels
def test_rope_completion(config, workspace):
# Over 'i' in os.path.isabs(...)
com_position = {'line': 1, 'character': 15}
workspace.put_document(DOC_URI, source=DOC)
doc = workspace.get_document(DOC_URI)
items = pylsp_rope_completions(config, workspace, doc, com_position)
assert items
assert items[0]['label'] == 'isabs'
def test_jedi_completion_ordering(config, workspace):
# Over the blank line
com_position = {'line': 8, 'character': 0}
doc = Document(DOC_URI, workspace, DOC)
config.update({'plugins': {'jedi_completion': {'resolve_at_most': math.inf}}})
completions = pylsp_jedi_completions(config, doc, com_position)
items = {c['label']: c['sortText'] for c in completions}
# And that 'hidden' functions come after unhidden ones
assert items['hello()'] < items['_a_hello()']
def test_jedi_property_completion(config, workspace):
# Over the 'w' in 'print Hello().world'
com_position = {'line': 18, 'character': 15}
doc = Document(DOC_URI, workspace, DOC)
completions = pylsp_jedi_completions(config, doc, com_position)
items = {c['label']: c['sortText'] for c in completions}
# Ensure we can complete the 'world' property
assert 'world' in list(items.keys())[0]
def test_jedi_method_completion(config, workspace):
# Over the 'y' in 'print Hello().every'
com_position = {'line': 20, 'character': 19}
doc = Document(DOC_URI, workspace, DOC)
config.capabilities['textDocument'] = {'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
completions = pylsp_jedi_completions(config, doc, com_position)
everyone_method = [completion for completion in completions if completion['label'] == 'everyone(a, b, c, d)'][0]
# Ensure we only generate snippets for positional args
assert everyone_method['insertTextFormat'] == lsp.InsertTextFormat.Snippet
assert everyone_method['insertText'] == 'everyone(${1:a}, ${2:b})$0'
# Disable param snippets
config.update({'plugins': {'jedi_completion': {'include_params': False}}})
completions = pylsp_jedi_completions(config, doc, com_position)
everyone_method = [completion for completion in completions if completion['label'] == 'everyone(a, b, c, d)'][0]
assert 'insertTextFormat' not in everyone_method
assert everyone_method['insertText'] == 'everyone'
@pytest.mark.skipif(PY2 or (sys.platform.startswith('linux') and os.environ.get('CI') is not None),
reason="Test in Python 3 and not on CIs on Linux because wheels don't work on them.")
def test_pyqt_completion(config, workspace):
# Over 'QA' in 'from PyQt5.QtWidgets import QApplication'
doc_pyqt = "from PyQt5.QtWidgets import QA"
com_position = {'line': 0, 'character': len(doc_pyqt)}
doc = Document(DOC_URI, workspace, doc_pyqt)
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions is not None
def test_numpy_completions(config, workspace):
doc_numpy = "import numpy as np; np."
com_position = {'line': 0, 'character': len(doc_numpy)}
doc = Document(DOC_URI, workspace, doc_numpy)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
assert any('array' in i['label'] for i in items)
def test_pandas_completions(config, workspace):
doc_pandas = "import pandas as pd; pd."
com_position = {'line': 0, 'character': len(doc_pandas)}
doc = Document(DOC_URI, workspace, doc_pandas)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
assert any('DataFrame' in i['label'] for i in items)
def test_matplotlib_completions(config, workspace):
doc_mpl = "import matplotlib.pyplot as plt; plt."
com_position = {'line': 0, 'character': len(doc_mpl)}
doc = Document(DOC_URI, workspace, doc_mpl)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
assert any('plot' in i['label'] for i in items)
def test_snippets_completion(config, workspace):
doc_snippets = 'from collections import defaultdict \na=defaultdict'
com_position = {'line': 0, 'character': 35}
doc = Document(DOC_URI, workspace, doc_snippets)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions[0]['insertText'] == 'defaultdict'
com_position = {'line': 1, 'character': len(doc_snippets)}
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions[0]['insertText'] == 'defaultdict($0)'
assert completions[0]['insertTextFormat'] == lsp.InsertTextFormat.Snippet
def test_snippets_completion_at_most(config, workspace):
doc_snippets = 'from collections import defaultdict \na=defaultdict'
doc = Document(DOC_URI, workspace, doc_snippets)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
config.update({'plugins': {'jedi_completion': {'resolve_at_most': 0}}})
com_position = {'line': 1, 'character': len(doc_snippets)}
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions[0]['insertText'] == 'defaultdict'
assert not completions[0].get('insertTextFormat', None)
def test_completion_with_class_objects(config, workspace):
doc_text = 'class FOOBAR(Object): pass\nFOOB'
com_position = {'line': 1, 'character': 4}
doc = Document(DOC_URI, workspace, doc_text)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {
'include_params': True,
'include_class_objects': True,
}}})
completions = pylsp_jedi_completions(config, doc, com_position)
assert len(completions) == 2
assert completions[0]['label'] == 'FOOBAR'
assert completions[0]['kind'] == lsp.CompletionItemKind.Class
assert completions[1]['label'] == 'FOOBAR object'
assert completions[1]['kind'] == lsp.CompletionItemKind.TypeParameter
def test_snippet_parsing(config, workspace):
doc = 'divmod'
completion_position = {'line': 0, 'character': 6}
doc = Document(DOC_URI, workspace, doc)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
completions = pylsp_jedi_completions(config, doc, completion_position)
out = 'divmod(${1:x}, ${2:y})$0'
if JEDI_VERSION == '0.18.0':
out = 'divmod(${1:a}, ${2:b})$0'
assert completions[0]['insertText'] == out
def test_multiline_import_snippets(config, workspace):
document = 'from datetime import(\n date,\n datetime)\na=date'
doc = Document(DOC_URI, workspace, document)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'date'
position = {'line': 2, 'character': 9}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'datetime'
def test_multiline_snippets(config, workspace):
document = 'from datetime import\\\n date,\\\n datetime \na=date'
doc = Document(DOC_URI, workspace, document)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'date'
position = {'line': 2, 'character': 9}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'datetime'
def test_multistatement_snippet(config, workspace):
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
document = 'a = 1; from datetime import date'
doc = Document(DOC_URI, workspace, document)
position = {'line': 0, 'character': len(document)}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'date'
document = 'from math import fmod; a = fmod'
doc = Document(DOC_URI, workspace, document)
position = {'line': 0, 'character': len(document)}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'fmod(${1:x}, ${2:y})$0'
def test_jedi_completion_extra_paths(tmpdir, workspace):
# Create a tempfile with some content and pass to extra_paths
temp_doc_content = '''
def spam():
pass
'''
p = tmpdir.mkdir("extra_path")
extra_paths = [str(p)]
p = p.join("foo.py")
p.write(temp_doc_content)
# Content of doc to test completion
doc_content = """import foo
foo.s"""
doc = Document(DOC_URI, workspace, doc_content)
# After 'foo.s' without extra paths
com_position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions is None
# Update config extra paths
settings = {'pylsp': {'plugins': {'jedi': {'extra_paths': extra_paths}}}}
doc.update_config(settings)
# After 'foo.s' with extra paths
com_position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions[0]['label'] == 'spam()'
@pytest.mark.skipif(PY2 or not LINUX or not CI, reason="tested on linux and python 3 only")
def test_jedi_completion_environment(workspace):
# Content of doc to test completion
doc_content = '''import logh
'''
doc = Document(DOC_URI, workspace, doc_content)
# After 'import logh' with default environment
com_position = {'line': 0, 'character': 11}
assert os.path.isdir('/tmp/pyenv/')
settings = {'pylsp': {'plugins': {'jedi': {'environment': None}}}}
doc.update_config(settings)
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions is None
# Update config extra environment
env_path = '/tmp/pyenv/bin/python'
settings = {'pylsp': {'plugins': {'jedi': {'environment': env_path}}}}
doc.update_config(settings)
# After 'import logh' with new environment
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions[0]['label'] == 'loghub'
resolved = pylsp_jedi_completion_item_resolve(completions[0], doc)
assert 'changelog generator' in resolved['documentation'].lower()
def test_document_path_completions(tmpdir, workspace_other_root_path):
# Create a dummy module out of the workspace's root_path and try to get
# completions for it in another file placed next to it.
module_content = '''
def foo():
pass
'''
p = tmpdir.join("mymodule.py")
p.write(module_content)
# Content of doc to test completion
doc_content = """import mymodule
mymodule.f"""
doc_path = str(tmpdir) + os.path.sep + 'myfile.py'
doc_uri = uris.from_fs_path(doc_path)
doc = Document(doc_uri, workspace_other_root_path, doc_content)
com_position = {'line': 1, 'character': 10}
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions[0]['label'] == 'foo()'
| 35.577075
| 116
| 0.679313
|
import math
import os
import sys
from pathlib import Path
from typing import NamedTuple, Dict
import pytest
from pylsp import uris, lsp
from pylsp.workspace import Document
from pylsp.plugins.jedi_completion import pylsp_completions as pylsp_jedi_completions
from pylsp.plugins.jedi_completion import pylsp_completion_item_resolve as pylsp_jedi_completion_item_resolve
from pylsp.plugins.rope_completion import pylsp_completions as pylsp_rope_completions
from pylsp._utils import JEDI_VERSION
PY2 = sys.version[0] == '2'
LINUX = sys.platform.startswith('linux')
CI = os.environ.get('CI')
LOCATION = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__))
)
DOC_URI = uris.from_fs_path(__file__)
DOC = """import os
print os.path.isabs("/tmp")
def hello():
pass
def _a_hello():
pass
class Hello():
@property
def world(self):
return None
def everyone(self, a, b, c=None, d=2):
pass
print Hello().world
print Hello().every
def documented_hello():
\"\"\"Sends a polite greeting\"\"\"
pass
"""
def test_rope_import_completion(config, workspace):
com_position = {'line': 0, 'character': 7}
doc = Document(DOC_URI, workspace, DOC)
items = pylsp_rope_completions(config, workspace, doc, com_position)
assert items is None
class TypeCase(NamedTuple):
document: str
position: dict
label: str
expected: lsp.CompletionItemKind
TYPE_CASES: Dict[str, TypeCase] = {
'variable': TypeCase(
document='test = 1\ntes',
position={'line': 1, 'character': 3},
label='test',
expected=lsp.CompletionItemKind.Variable
),
'function': TypeCase(
document='def test():\n pass\ntes',
position={'line': 2, 'character': 3},
label='test()',
expected=lsp.CompletionItemKind.Function
),
'keyword': TypeCase(
document='fro',
position={'line': 0, 'character': 3},
label='from',
expected=lsp.CompletionItemKind.Keyword
),
'file': TypeCase(
document='"' + __file__[:-2].replace('"', '\\"') + '"',
position={'line': 0, 'character': len(__file__) - 2},
label=Path(__file__).name + '"',
expected=lsp.CompletionItemKind.File
),
'module': TypeCase(
document='import statis',
position={'line': 0, 'character': 13},
label='statistics',
expected=lsp.CompletionItemKind.Module
),
'class': TypeCase(
document='KeyErr',
position={'line': 0, 'character': 6},
label='KeyError',
expected=lsp.CompletionItemKind.Class
),
'property': TypeCase(
document=(
'class A:\n'
' @property\n'
' def test(self):\n'
' pass\n'
'A().tes'
),
position={'line': 4, 'character': 5},
label='test',
expected=lsp.CompletionItemKind.Property
)
}
@pytest.mark.parametrize('case', list(TYPE_CASES.values()), ids=list(TYPE_CASES.keys()))
def test_jedi_completion_type(case, config, workspace):
# property support was introduced in 0.18
if case.expected == lsp.CompletionItemKind.Property and JEDI_VERSION.startswith('0.17'):
return
doc = Document(DOC_URI, workspace, case.document)
items = pylsp_jedi_completions(config, doc, case.position)
items = {i['label']: i for i in items}
assert items[case.label]['kind'] == case.expected
def test_jedi_completion(config, workspace):
# Over 'i' in os.path.isabs(...)
com_position = {'line': 1, 'character': 15}
doc = Document(DOC_URI, workspace, DOC)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
labels = [i['label'] for i in items]
assert 'isfile(path)' in labels
# Test we don't throw with big character
pylsp_jedi_completions(config, doc, {'line': 1, 'character': 1000})
def test_jedi_completion_item_resolve(config, workspace):
# Over the blank line
com_position = {'line': 8, 'character': 0}
doc = Document(DOC_URI, workspace, DOC)
config.update({'plugins': {'jedi_completion': {'resolve_at_most': math.inf}}})
completions = pylsp_jedi_completions(config, doc, com_position)
items = {c['label']: c for c in completions}
documented_hello_item = items['documented_hello()']
assert 'documentation' not in documented_hello_item
assert 'detail' not in documented_hello_item
resolved_documented_hello = pylsp_jedi_completion_item_resolve(
completion_item=documented_hello_item,
document=doc
)
assert 'Sends a polite greeting' in resolved_documented_hello['documentation']
def test_jedi_completion_with_fuzzy_enabled(config, workspace):
# Over 'i' in os.path.isabs(...)
config.update({'plugins': {'jedi_completion': {'fuzzy': True}}})
com_position = {'line': 1, 'character': 15}
doc = Document(DOC_URI, workspace, DOC)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
expected = 'commonprefix(m)'
if JEDI_VERSION == '0.18.0':
expected = 'commonprefix(list)'
assert items[0]['label'] == expected
# Test we don't throw with big character
pylsp_jedi_completions(config, doc, {'line': 1, 'character': 1000})
def test_jedi_completion_resolve_at_most(config, workspace):
# Over 'i' in os.path.isabs(...)
com_position = {'line': 1, 'character': 15}
doc = Document(DOC_URI, workspace, DOC)
# Do not resolve any labels
config.update({'plugins': {'jedi_completion': {'resolve_at_most': 0}}})
items = pylsp_jedi_completions(config, doc, com_position)
labels = {i['label'] for i in items}
assert 'isabs' in labels
# Resolve all items
config.update({'plugins': {'jedi_completion': {'resolve_at_most': math.inf}}})
items = pylsp_jedi_completions(config, doc, com_position)
labels = {i['label'] for i in items}
assert 'isfile(path)' in labels
def test_rope_completion(config, workspace):
# Over 'i' in os.path.isabs(...)
com_position = {'line': 1, 'character': 15}
workspace.put_document(DOC_URI, source=DOC)
doc = workspace.get_document(DOC_URI)
items = pylsp_rope_completions(config, workspace, doc, com_position)
assert items
assert items[0]['label'] == 'isabs'
def test_jedi_completion_ordering(config, workspace):
# Over the blank line
com_position = {'line': 8, 'character': 0}
doc = Document(DOC_URI, workspace, DOC)
config.update({'plugins': {'jedi_completion': {'resolve_at_most': math.inf}}})
completions = pylsp_jedi_completions(config, doc, com_position)
items = {c['label']: c['sortText'] for c in completions}
# And that 'hidden' functions come after unhidden ones
assert items['hello()'] < items['_a_hello()']
def test_jedi_property_completion(config, workspace):
# Over the 'w' in 'print Hello().world'
com_position = {'line': 18, 'character': 15}
doc = Document(DOC_URI, workspace, DOC)
completions = pylsp_jedi_completions(config, doc, com_position)
items = {c['label']: c['sortText'] for c in completions}
# Ensure we can complete the 'world' property
assert 'world' in list(items.keys())[0]
def test_jedi_method_completion(config, workspace):
# Over the 'y' in 'print Hello().every'
com_position = {'line': 20, 'character': 19}
doc = Document(DOC_URI, workspace, DOC)
config.capabilities['textDocument'] = {'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
completions = pylsp_jedi_completions(config, doc, com_position)
everyone_method = [completion for completion in completions if completion['label'] == 'everyone(a, b, c, d)'][0]
# Ensure we only generate snippets for positional args
assert everyone_method['insertTextFormat'] == lsp.InsertTextFormat.Snippet
assert everyone_method['insertText'] == 'everyone(${1:a}, ${2:b})$0'
# Disable param snippets
config.update({'plugins': {'jedi_completion': {'include_params': False}}})
completions = pylsp_jedi_completions(config, doc, com_position)
everyone_method = [completion for completion in completions if completion['label'] == 'everyone(a, b, c, d)'][0]
assert 'insertTextFormat' not in everyone_method
assert everyone_method['insertText'] == 'everyone'
@pytest.mark.skipif(PY2 or (sys.platform.startswith('linux') and os.environ.get('CI') is not None),
reason="Test in Python 3 and not on CIs on Linux because wheels don't work on them.")
def test_pyqt_completion(config, workspace):
# Over 'QA' in 'from PyQt5.QtWidgets import QApplication'
doc_pyqt = "from PyQt5.QtWidgets import QA"
com_position = {'line': 0, 'character': len(doc_pyqt)}
doc = Document(DOC_URI, workspace, doc_pyqt)
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions is not None
def test_numpy_completions(config, workspace):
doc_numpy = "import numpy as np; np."
com_position = {'line': 0, 'character': len(doc_numpy)}
doc = Document(DOC_URI, workspace, doc_numpy)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
assert any('array' in i['label'] for i in items)
def test_pandas_completions(config, workspace):
doc_pandas = "import pandas as pd; pd."
com_position = {'line': 0, 'character': len(doc_pandas)}
doc = Document(DOC_URI, workspace, doc_pandas)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
assert any('DataFrame' in i['label'] for i in items)
def test_matplotlib_completions(config, workspace):
doc_mpl = "import matplotlib.pyplot as plt; plt."
com_position = {'line': 0, 'character': len(doc_mpl)}
doc = Document(DOC_URI, workspace, doc_mpl)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
assert any('plot' in i['label'] for i in items)
def test_snippets_completion(config, workspace):
doc_snippets = 'from collections import defaultdict \na=defaultdict'
com_position = {'line': 0, 'character': 35}
doc = Document(DOC_URI, workspace, doc_snippets)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions[0]['insertText'] == 'defaultdict'
com_position = {'line': 1, 'character': len(doc_snippets)}
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions[0]['insertText'] == 'defaultdict($0)'
assert completions[0]['insertTextFormat'] == lsp.InsertTextFormat.Snippet
def test_snippets_completion_at_most(config, workspace):
doc_snippets = 'from collections import defaultdict \na=defaultdict'
doc = Document(DOC_URI, workspace, doc_snippets)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
config.update({'plugins': {'jedi_completion': {'resolve_at_most': 0}}})
com_position = {'line': 1, 'character': len(doc_snippets)}
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions[0]['insertText'] == 'defaultdict'
assert not completions[0].get('insertTextFormat', None)
def test_completion_with_class_objects(config, workspace):
doc_text = 'class FOOBAR(Object): pass\nFOOB'
com_position = {'line': 1, 'character': 4}
doc = Document(DOC_URI, workspace, doc_text)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {
'include_params': True,
'include_class_objects': True,
}}})
completions = pylsp_jedi_completions(config, doc, com_position)
assert len(completions) == 2
assert completions[0]['label'] == 'FOOBAR'
assert completions[0]['kind'] == lsp.CompletionItemKind.Class
assert completions[1]['label'] == 'FOOBAR object'
assert completions[1]['kind'] == lsp.CompletionItemKind.TypeParameter
def test_snippet_parsing(config, workspace):
doc = 'divmod'
completion_position = {'line': 0, 'character': 6}
doc = Document(DOC_URI, workspace, doc)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
completions = pylsp_jedi_completions(config, doc, completion_position)
out = 'divmod(${1:x}, ${2:y})$0'
if JEDI_VERSION == '0.18.0':
out = 'divmod(${1:a}, ${2:b})$0'
assert completions[0]['insertText'] == out
def test_multiline_import_snippets(config, workspace):
document = 'from datetime import(\n date,\n datetime)\na=date'
doc = Document(DOC_URI, workspace, document)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'date'
position = {'line': 2, 'character': 9}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'datetime'
def test_multiline_snippets(config, workspace):
document = 'from datetime import\\\n date,\\\n datetime \na=date'
doc = Document(DOC_URI, workspace, document)
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'date'
position = {'line': 2, 'character': 9}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'datetime'
def test_multistatement_snippet(config, workspace):
config.capabilities['textDocument'] = {
'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
document = 'a = 1; from datetime import date'
doc = Document(DOC_URI, workspace, document)
position = {'line': 0, 'character': len(document)}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'date'
document = 'from math import fmod; a = fmod'
doc = Document(DOC_URI, workspace, document)
position = {'line': 0, 'character': len(document)}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]['insertText'] == 'fmod(${1:x}, ${2:y})$0'
def test_jedi_completion_extra_paths(tmpdir, workspace):
# Create a tempfile with some content and pass to extra_paths
temp_doc_content = '''
def spam():
pass
'''
p = tmpdir.mkdir("extra_path")
extra_paths = [str(p)]
p = p.join("foo.py")
p.write(temp_doc_content)
# Content of doc to test completion
doc_content = """import foo
foo.s"""
doc = Document(DOC_URI, workspace, doc_content)
# After 'foo.s' without extra paths
com_position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions is None
# Update config extra paths
settings = {'pylsp': {'plugins': {'jedi': {'extra_paths': extra_paths}}}}
doc.update_config(settings)
# After 'foo.s' with extra paths
com_position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions[0]['label'] == 'spam()'
@pytest.mark.skipif(PY2 or not LINUX or not CI, reason="tested on linux and python 3 only")
def test_jedi_completion_environment(workspace):
# Content of doc to test completion
doc_content = '''import logh
'''
doc = Document(DOC_URI, workspace, doc_content)
# After 'import logh' with default environment
com_position = {'line': 0, 'character': 11}
assert os.path.isdir('/tmp/pyenv/')
settings = {'pylsp': {'plugins': {'jedi': {'environment': None}}}}
doc.update_config(settings)
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions is None
# Update config extra environment
env_path = '/tmp/pyenv/bin/python'
settings = {'pylsp': {'plugins': {'jedi': {'environment': env_path}}}}
doc.update_config(settings)
# After 'import logh' with new environment
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions[0]['label'] == 'loghub'
resolved = pylsp_jedi_completion_item_resolve(completions[0], doc)
assert 'changelog generator' in resolved['documentation'].lower()
def test_document_path_completions(tmpdir, workspace_other_root_path):
# Create a dummy module out of the workspace's root_path and try to get
# completions for it in another file placed next to it.
module_content = '''
def foo():
pass
'''
p = tmpdir.join("mymodule.py")
p.write(module_content)
# Content of doc to test completion
doc_content = """import mymodule
mymodule.f"""
doc_path = str(tmpdir) + os.path.sep + 'myfile.py'
doc_uri = uris.from_fs_path(doc_path)
doc = Document(doc_uri, workspace_other_root_path, doc_content)
com_position = {'line': 1, 'character': 10}
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions[0]['label'] == 'foo()'
| true
| true
|
f709914a96349d3d19379fbb812de6ce94a78611
| 1,591
|
py
|
Python
|
youtube_mp3.py
|
sliatecinos/saladeaula
|
35ee1a47f3a62c6e17d831b8b08bf209eab3305d
|
[
"Unlicense"
] | 1
|
2022-01-11T21:05:33.000Z
|
2022-01-11T21:05:33.000Z
|
youtube_mp3.py
|
sliatecinos/saladeaula
|
35ee1a47f3a62c6e17d831b8b08bf209eab3305d
|
[
"Unlicense"
] | null | null | null |
youtube_mp3.py
|
sliatecinos/saladeaula
|
35ee1a47f3a62c6e17d831b8b08bf209eab3305d
|
[
"Unlicense"
] | null | null | null |
# ================================================================
# YouTube Downloader (.MP4 para .MP3) :: github.com/sliatecinos
# ================================================================
from pytube import YouTube
from pydub import AudioSegment
import os
import time
link = input('\nEntre com o link:')
yt = YouTube(link)
# Title of the video:
print('Titulo:\t',yt.title)
# Nro. of views:
print('Nro. de views:\t',yt.views)
# Lenght of the video:
print('Tamanho:\t',yt.length)
# # Description of the video:
# print('Descricao:\t',yt.description)
# Rating:
print('Avaliaçoes:\t',yt.rating)
# Author:
print('Publicado por:\t',yt.author)
# Getting only audio from video:
video = yt.streams.filter(only_audio=True).first()
res = input('Continuar?(y/n):\t')
destino = 'C:\\Users\\sliatecinos\\Músicas\\'
if res.lower() == 'y':
# Starting download:
print('Time start:', time.strftime("%b %d %Y %H:%M:%S", time.localtime()))
print('Download em andamento....')
out_file = video.download(destino)
print('Download completado!!')
mp4_audio = AudioSegment.from_file(out_file, format="mp4")
base, ext = os.path.splitext(out_file)
mp4_audio.export(base + '.mp3', format="mp3")
print('Conversao pra MP3, com sucesso!!!')
files_in_directory = os.listdir(destino)
filtered_files = [file for file in files_in_directory if file.endswith(".mp4")]
for file in filtered_files:
path_to_file = os.path.join(destino, file)
os.remove(path_to_file)
print('Time end:', time.strftime("%b %d %Y %H:%M:%S", time.localtime()))
| 27.431034
| 83
| 0.615965
|
from pytube import YouTube
from pydub import AudioSegment
import os
import time
link = input('\nEntre com o link:')
yt = YouTube(link)
print('Titulo:\t',yt.title)
print('Nro. de views:\t',yt.views)
print('Tamanho:\t',yt.length)
t.rating)
print('Publicado por:\t',yt.author)
video = yt.streams.filter(only_audio=True).first()
res = input('Continuar?(y/n):\t')
destino = 'C:\\Users\\sliatecinos\\Músicas\\'
if res.lower() == 'y':
print('Time start:', time.strftime("%b %d %Y %H:%M:%S", time.localtime()))
print('Download em andamento....')
out_file = video.download(destino)
print('Download completado!!')
mp4_audio = AudioSegment.from_file(out_file, format="mp4")
base, ext = os.path.splitext(out_file)
mp4_audio.export(base + '.mp3', format="mp3")
print('Conversao pra MP3, com sucesso!!!')
files_in_directory = os.listdir(destino)
filtered_files = [file for file in files_in_directory if file.endswith(".mp4")]
for file in filtered_files:
path_to_file = os.path.join(destino, file)
os.remove(path_to_file)
print('Time end:', time.strftime("%b %d %Y %H:%M:%S", time.localtime()))
| true
| true
|
f709916599da40d4a4b314b76e753b26221d2c76
| 2,398
|
py
|
Python
|
how_to_use_custom_keras_objects.py
|
XiaowanYi/Attention_vgg16
|
32c68ae048ea3f3de96c74a1df78d1f58894eee7
|
[
"MIT"
] | 3
|
2020-12-13T12:50:14.000Z
|
2021-09-19T09:28:42.000Z
|
how_to_use_custom_keras_objects.py
|
XiaowanYi/Attention_vgg16
|
32c68ae048ea3f3de96c74a1df78d1f58894eee7
|
[
"MIT"
] | null | null | null |
how_to_use_custom_keras_objects.py
|
XiaowanYi/Attention_vgg16
|
32c68ae048ea3f3de96c74a1df78d1f58894eee7
|
[
"MIT"
] | 1
|
2021-05-29T08:43:28.000Z
|
2021-05-29T08:43:28.000Z
|
"""
This script has a few examples about how to use custom keras objects
which are defined in `keras_custom_objects`
"""
'''
1. Use a custom EarlyStopping criteria:
In our case, it is RelativeEarlyStopping which is to terminate training
if the monitored improvement between two epochs is less than 0.1%
'''
import keras_custom_objects as KO
custom_earlystopping = KO.RelativeEarlyStopping(monitor='val_loss',
min_perc_delta=0.001, # perc means percentage
patience=patience,
verbose=2,
mode='min'
)
'''
2. Use custom fitting function:
In our case, we want to extend the original fit_generator with extra functionalities
such as not to use multiprocessing for validation to avoid validation data duplication,
and to be able to re-weight validation instances the same way if training instances are
weighted under certain scheme.
The way I created these custom keras functions are by no means the most accurate/elegant way
of achieving the goal. Feel free to modify or do it your way and do let me know if you find a better
way to do so. Thanks!
'''
import keras_custom_objects as KO
# because the custom functions are defined under the CustomModel class which is inherited
# from the Model class, we now must define our model using CustomModel
model = CustomModel(inputs=some_layer.input, outputs=some_other_layer.output)
# and then you can call custom fitting no different to the original case
model.fit_generator_custom(train_generator,
steps_per_epoch=train_steps,
epochs=epochs,
validation_data=val_generator,
validation_steps=val_steps,
class_weight=class_weighting, # this weight will now also apply to validation instances
verbose=1,
callbacks=[tensorboard, earlystopping, checkpoint],
max_queue_size=40,
workers=14,
use_multiprocessing=True) # in fact use_multiprocessing=False for validation set
| 47.96
| 115
| 0.610926
|
import keras_custom_objects as KO
custom_earlystopping = KO.RelativeEarlyStopping(monitor='val_loss',
min_perc_delta=0.001,
patience=patience,
verbose=2,
mode='min'
)
import keras_custom_objects as KO
model = CustomModel(inputs=some_layer.input, outputs=some_other_layer.output)
model.fit_generator_custom(train_generator,
steps_per_epoch=train_steps,
epochs=epochs,
validation_data=val_generator,
validation_steps=val_steps,
class_weight=class_weighting,
verbose=1,
callbacks=[tensorboard, earlystopping, checkpoint],
max_queue_size=40,
workers=14,
use_multiprocessing=True)
| true
| true
|
f7099171822142f65064fc71bc2ffdaf986681bf
| 689
|
py
|
Python
|
nw/tests/__init__.py
|
valhuber/Logic-Bank
|
3f31b47786ce3fae53fd96af8795cd693e20547b
|
[
"BSD-3-Clause"
] | 1
|
2021-06-28T20:37:09.000Z
|
2021-06-28T20:37:09.000Z
|
nw/tests/__init__.py
|
valhuber/Logic-Bank
|
3f31b47786ce3fae53fd96af8795cd693e20547b
|
[
"BSD-3-Clause"
] | 2
|
2020-09-30T14:10:54.000Z
|
2020-09-30T14:11:43.000Z
|
nw/tests/__init__.py
|
valhuber/Logic-Bank
|
3f31b47786ce3fae53fd96af8795cd693e20547b
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from shutil import copyfile
from logic_bank.util import prt
def setup_db():
""" copy db/database-gold.db over db/database.db"""
print("\n" + prt("restoring database-gold\n"))
basedir = os.path.abspath(os.path.dirname(__file__))
basedir = os.path.dirname(basedir)
print("\n********************************\n"
" IMPORTANT - create database.db from database-gold.db in " + basedir + "/nw/db/\n" +
" - from -- " + prt("") +
"\n********************************")
nw_loc = os.path.join(basedir, "db/database.db")
nw_source = os.path.join(basedir, "db/database-gold.db")
copyfile(src=nw_source, dst=nw_loc)
| 32.809524
| 96
| 0.555878
|
import os
from shutil import copyfile
from logic_bank.util import prt
def setup_db():
print("\n" + prt("restoring database-gold\n"))
basedir = os.path.abspath(os.path.dirname(__file__))
basedir = os.path.dirname(basedir)
print("\n********************************\n"
" IMPORTANT - create database.db from database-gold.db in " + basedir + "/nw/db/\n" +
" - from -- " + prt("") +
"\n********************************")
nw_loc = os.path.join(basedir, "db/database.db")
nw_source = os.path.join(basedir, "db/database-gold.db")
copyfile(src=nw_source, dst=nw_loc)
| true
| true
|
f70991fbf0ccbff4d648e3f23be1363474a8332b
| 6,435
|
py
|
Python
|
py_work/AI/ML/FeatureSelection.py
|
kotori-y/kotori_work
|
51ebfdf49571ae34c246dc5b37cc86e25f4ccf3d
|
[
"MIT"
] | 6
|
2020-05-14T09:47:04.000Z
|
2021-06-05T03:03:45.000Z
|
py_work/AI/ML/FeatureSelection.py
|
kotori-y/kotori_work
|
51ebfdf49571ae34c246dc5b37cc86e25f4ccf3d
|
[
"MIT"
] | null | null | null |
py_work/AI/ML/FeatureSelection.py
|
kotori-y/kotori_work
|
51ebfdf49571ae34c246dc5b37cc86e25f4ccf3d
|
[
"MIT"
] | 4
|
2020-04-20T13:17:27.000Z
|
2021-08-07T19:44:50.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 24 21:46:41 2019
You are not expected to understand my codes!
@Author: Kotori_Y
@Blog: blog.moyule.me
@Weibo: Kotori-Y
@Mail: yzjkid9@gmail.com
I love Megumi forerver!
"""
print(__doc__)
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split,KFold
from sklearn.metrics import accuracy_score,precision_score,recall_score
import pandas as pd
import time
import os
from tqdm import tqdm
kf = KFold(n_splits=5)#kfold
start = time.clock()
#os.chdir(r'E:\student\yzy\Importance')
#files = os.listdir()
#os.makedirs('FeatureAna')
#df = df.sample(frac=1).reset_index(drop=True)
#df.drop('SMILES',axis=1,inplace=True)
#y = df.pop('Label')
#fold = 0
####################################### 5-Fold #######################################
#df_i = pd.DataFrame()#creat a dataframe for importance
#df_m = pd.DataFrame()#creat a dataframe for metrics
#for train_index, test_index in kf.split(df):
# col = list(df.columns)
# fold += 1
# X_train, x_test = df.iloc[train_index], df.iloc[test_index]
# Y_train, y_test = y.iloc[train_index], y.iloc[test_index]
# X = X_train.copy()
# x = x_test.copy()
#
# for _ in tqdm(range(len(df.columns))):
#
# rfc = RandomForestClassifier(n_estimators=500,n_jobs=-1)
## print('----------------Fitting----------------')
# rfc.fit(X,Y_train)
#
# fea = pd.DataFrame(
# {
# 'Feature':col,
# 'Importance':rfc.feature_importances_,
# 'Fold':'fold_{}'.format(fold),
# 'Class':len(col)
# }
# )
# fea.sort_values('Importance',ascending=False,inplace=True)
# df_i = pd.concat([df_i,fea],ignore_index=True)
#
# #cal correlate metrics
# acc = accuracy_score(y_test,rfc.predict(x))
# pre = precision_score(y_test,rfc.predict(x))
# rec = recall_score(y_test,rfc.predict(x))
#
# me = pd.DataFrame(
# {
# 'Precision':[pre],
# 'Recall':[rec],
# 'Accuracy':[acc],
# 'Fold':['fold_{}'.format(fold)],
# 'Class':[len(col)]
# }
# )
# df_m = pd.concat([df_m,me],ignore_index=True)
#
# #drop the most unimportant feature
# drop = list(fea['Feature'])[-1]
#
# X.drop(drop,axis=1,inplace=True)
# x.drop(drop,axis=1,inplace=True)
# col.remove(drop)
#
# del rfc,fea,me
#
#
#end = time.clock()
#
#print(end-start)
#
#df_i.to_csv('Importances.csv')
#df_m.to_csv('Metrics.csv')
###########################################################################################
####################################### ONCE #######################################
def Selection(file,filepath):
os.chdir(filepath)
print('-----{} start-----'.format(file.replace('.csv','')))
df_i = pd.DataFrame()#creat a dataframe for importance
df_m = pd.DataFrame()#creat a dataframe for metrics
#df_1 = pd.read_csv(r'E:\student\kotori\Lemon\backup\2C9_In_MACCS-1.csv')
#df_0 = pd.read_csv(r'E:\student\kotori\Lemon\backup\2C9_In_MACCS-0.csv')
#df_1 = df_1.sample(len(df_0),replace=True)
#df = pd.concat([df_1,df_0],ignore_index=True,sort=False)
df = pd.read_csv(file)
df = df.sample(frac=1).reset_index(drop=True)
# df = df.iloc[:,3:]
# try:
# df.drop('SMILES',axis=1,inplace=True)
# except:
# df.drop('Smiles',axis=1,inplace=True)
y = df.pop('grades')
col = list(df.columns)
X_train,x_test,Y_train,y_test = train_test_split(df,y,test_size=0.2)
X = X_train.copy()
x = x_test.copy()
for _ in tqdm(range(len(df.columns))):
rfc = RandomForestClassifier(n_estimators=500,n_jobs=-1)
# print('----------------Fitting----------------')
rfc.fit(X,Y_train)
fea = pd.DataFrame(
{
'Feature':col
,'Importance':rfc.feature_importances_
,'Class':len(col)
}
)
fea.sort_values('Importance',ascending=False,inplace=True)
df_i = pd.concat([df_i,fea],ignore_index=True,sort=False)
#cal correlate metrics
acc = accuracy_score(y_test,rfc.predict(x))
pre = precision_score(y_test,rfc.predict(x))
rec = recall_score(y_test,rfc.predict(x))
me = pd.DataFrame(
{
'Precision':[pre]
,'Recall':[rec]
,'Accuracy':[acc]
#,'Fold':['fold_{}'.format(fold)]
,'Class':[len(col)]
}
)
df_m = pd.concat([df_m,me],ignore_index=True,sort=False)
#drop the most unimportant feature
drop = list(fea['Feature'])[-1]
X.drop(drop,axis=1,inplace=True)
x.drop(drop,axis=1,inplace=True)
col.remove(drop)
del rfc,fea,me
#file = '2C9_In_MACCS'
#df_i.to_csv('FeatureAna/{}_Importances_oversampling.csv'.format(file),index=False)
#df_m.to_csv('FeatureAna/{}_Metrics_oversampling.csv'.format(file),index=False)
return df_i,df_m
def main():
tempt = print("Input the absolute path of your file locate and ensure the file only contain 'SMILES', 'Label' and the features vector\n")
filepath = input("The absolute path: ")
files = os.listdir(filepath)
for file in files:
df_i, df_m = Selection(file,filepath)
# os.chdir(r'E:\student\yzy\All')
#
# part_1_class = list(range(1000,1717))
#
# df_i_a = df_i[df_i['Class'].isin(part_1_class)]
# df_i_b = df_i[~df_i['Class'].isin(part_1_class)]
# df_i.iloc[:,:].to_csv(file.replace('.csv','') + '_Importances.csv',index=False)
# df_m.to_csv(file.replace('.csv','') + '_Metrics.csv',index=False)
df_i.to_csv('{}_Importances.csv'.format(file.replace('.csv','')))
if '__main__' == __name__:
main()
#,'Fold':'fold_{}'.format(fold)
| 30.9375
| 141
| 0.524165
|
print(__doc__)
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split,KFold
from sklearn.metrics import accuracy_score,precision_score,recall_score
import pandas as pd
import time
import os
from tqdm import tqdm
kf = KFold(n_splits=5)
start = time.clock()
| true
| true
|
f70993686240ee83242c38feb61999ddede668e5
| 8,456
|
py
|
Python
|
anvil/utils/generic.py
|
AndresMWeber/Anvil
|
9cd202183ac998983c2bf6e55cc46bbc0ca1a78e
|
[
"Apache-2.0"
] | 3
|
2019-11-22T04:38:06.000Z
|
2022-01-19T08:27:18.000Z
|
anvil/utils/generic.py
|
AndresMWeber/Anvil
|
9cd202183ac998983c2bf6e55cc46bbc0ca1a78e
|
[
"Apache-2.0"
] | 28
|
2018-02-01T20:39:42.000Z
|
2018-04-26T17:25:23.000Z
|
anvil/utils/generic.py
|
AndresMWeber/Anvil
|
9cd202183ac998983c2bf6e55cc46bbc0ca1a78e
|
[
"Apache-2.0"
] | 1
|
2018-03-11T06:47:26.000Z
|
2018-03-11T06:47:26.000Z
|
from six import iteritems, itervalues
from collections import OrderedDict, MutableMapping, Iterable
from functools import wraps
import anvil.config as cfg
def to_list(query):
if isinstance(query, list):
return query
elif isinstance(query, str):
return [query]
elif isinstance(query, dict):
return [query]
elif not query:
return list()
try:
return list(query)
except TypeError:
return [query]
def to_size_list(query, desired_length):
query_list = to_list(query) if query else [None]
if len(query_list) > desired_length:
return query_list[:desired_length]
else:
return query_list + [query_list[-1]] * (desired_length - len(query_list))
def to_camel_case(input_string):
tokens = input_string.split('_')
return tokens[0] + ''.join([token.capitalize() for token in tokens[1:]])
def gen_flatten_dict_depth_two(d):
"""Taken from:
https://stackoverflow.com/questions/3835192/flatten-a-dictionary-of-dictionaries-2-levels-deep-of-lists-in-python
Given the d_inner, return an iterator that provides all the nodes from within.
"""
for d_inner in itervalues(d):
if isinstance(d_inner, dict):
for nodes in itervalues(d_inner):
print('nodes ', nodes)
for node in to_list(nodes):
print(node)
yield node
else:
for node in to_list(d_inner):
print('node ', node)
yield node
def get_dict_depth(d=None, level=0):
"""Returns maximum depth of the hierarchy"""
if not isinstance(d, dict) or not d:
return level
return max(get_dict_depth(d[k], level=level + 1) for k in d)
def get_dict_key_matches(key, dictionary):
for k, v in iteritems(dictionary):
if k == key:
return {k: v}
elif isinstance(v, dict):
return get_dict_key_matches(key, v)
def dict_to_keys_list(d, keys=None):
keys = keys if keys is not None else []
if isinstance(d, dict):
for k, v in iteritems(d):
keys.append(k)
dict_to_keys_list(v, keys)
else:
keys.append(d)
return keys
def dict_deep_sort(cls, obj):
"""Recursively sort list or dict nested lists
Taken from: http://goo.gl/tQfDP6
"""
if isinstance(obj, dict):
_sorted = OrderedDict()
for key in sorted(list(obj)):
_sorted[key] = cls.deep_sort(obj[key])
elif isinstance(obj, list):
new_list = []
for val in obj:
new_list.append(cls.deep_sort(val))
_sorted = sorted(new_list)
else:
_sorted = obj
return _sorted
def to_str_dict(d):
data = {}
for k, v in iteritems(d):
try:
data.update({str(k): str(v)})
except TypeError:
pass
return data
def pop_dict_keys(d, keys):
popped = []
for key in keys:
try:
popped.append(d.pop(key))
except KeyError:
pass
return popped
def merge_dicts(*args, **kwargs):
"""Outputs a merged dictionary from inputs. Overwrites data if there are conflicts from left to right.
:param args: (dict), tuple of input dictionaries
:param kwargs: dict, input kwargs to merge
:return: dict, combined data.
"""
data = {}
for input_dict in [arg for arg in args if isinstance(arg, dict)] + [kwargs]:
data.update(input_dict)
return data
def dict_compare(d1, d2):
"""Taken from: https://stackoverflow.com/questions/4527942/comparing-two-dictionaries-in-python"""
d1_keys = set(list(d1))
d2_keys = set(list(d2))
intersect_keys = d1_keys.intersection(d2_keys)
added = d1_keys - d2_keys
removed = d2_keys - d1_keys
modified = {o: (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]}
same = set(o for o in intersect_keys if d1[o] == d2[o])
return added, removed, modified, same
def dict_to_flat_dict(d, full_path=True, parent_key='', sep='_'):
"""Got from https://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys
:param d: dict, input dictionary
:param full_path: bool, whether to store the full path as the key or the final key for that dictionary item.
:param parent_key: str, keeps track of the dictionary path taken, do not set.
:param sep: str, arbitary separator to delineate path separation in the parent_key string.
:return: dict, flat dictionary with all keys as full path keys.
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key and full_path else k
if isinstance(v, MutableMapping):
items.extend(dict_to_flat_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
merge_value_LUT = {
dict: lambda d1, d2: merge_dicts(d2),
list: lambda l1, l2: l1 + to_list(l2),
str: lambda s1, s2: s1 + str(s2),
'replace': lambda e1, e2: e2,
}
class Map(dict):
"""A dot notation accessible dictionary class extension.
Taken from: https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary
Example:
m = Map({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(Map, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in iteritems(arg):
self[k] = v
if kwargs:
for k, v in iteritems(kwargs):
self[k] = v
def deep_update(self, d, path=None):
if path is None:
path = []
for k, v in iteritems(d):
if isinstance(v, dict):
self.deep_update(v, path=path + [k])
else:
self._merge_value(path + [k], v)
def flatten(self):
return gen_flatten_dict_depth_two(self)
def to_flat_dict(self, full_path=False):
return dict_to_flat_dict(self, full_path=full_path)
def to_value_list(self):
result = []
map(result.extend, [n if isinstance(n, Iterable) else to_list(n) for n in itervalues(self.to_flat_dict())])
return result
def _merge_value(self, path, v):
"""Stably merge values without overwriting or messing up Map object.
This is used since we have a slightly customized way of adding entries and do not want the base Map object
to start getting stale data. If a path does not exist, we will add a default Map object in that place
unless it is the final path, in which case we merge with the existing (or not) value.
:param path: list, list of keys we will traverse down.
:param v: object, any type of object we are adding to that nested/base dict.
"""
current_map = self
for p in path[:-1]:
current_map = current_map.setdefault(p, self.__class__())
current_v = current_map.setdefault(path[-1], None)
current_map[path[-1]] = merge_value_LUT.get(type(current_v), merge_value_LUT['replace'])(current_v, v)
def __getattr__(self, attr):
"""Passthrough function for dictionary.get"""
return self.get(attr)
def __setattr__(self, key, value):
"""Passthrough function for dictionary item setter"""
self.__setitem__(key, value)
def __setitem__(self, key, value):
"""Updates both setitem and instance dictionary key value"""
super(Map, self).__setitem__(key, value)
self.__dict__[key] = value
def __delattr__(self, item):
"""Passthrough for dictionary delete item."""
self.__delitem__(item)
def __delitem__(self, key):
"""Deletes both the attribute and the instance dictionary"""
super(Map, self).__delitem__(key)
del self.__dict__[key]
def __eq__(self, other):
"""Determines if the dictionary is equivalent to the other dictionary."""
return dict_compare(self.__dict__, other)
def extend_parent_kwarg(number_of_parents):
def inner(f):
@wraps(f)
def wrapper(abstract_grouping, *args, **kwargs):
kwargs[cfg.PARENT] = iter(to_size_list(kwargs.get(cfg.PARENT), number_of_parents))
return f(abstract_grouping, *args, **kwargs)
return wrapper
return inner
| 31.670412
| 117
| 0.626301
|
from six import iteritems, itervalues
from collections import OrderedDict, MutableMapping, Iterable
from functools import wraps
import anvil.config as cfg
def to_list(query):
if isinstance(query, list):
return query
elif isinstance(query, str):
return [query]
elif isinstance(query, dict):
return [query]
elif not query:
return list()
try:
return list(query)
except TypeError:
return [query]
def to_size_list(query, desired_length):
query_list = to_list(query) if query else [None]
if len(query_list) > desired_length:
return query_list[:desired_length]
else:
return query_list + [query_list[-1]] * (desired_length - len(query_list))
def to_camel_case(input_string):
tokens = input_string.split('_')
return tokens[0] + ''.join([token.capitalize() for token in tokens[1:]])
def gen_flatten_dict_depth_two(d):
for d_inner in itervalues(d):
if isinstance(d_inner, dict):
for nodes in itervalues(d_inner):
print('nodes ', nodes)
for node in to_list(nodes):
print(node)
yield node
else:
for node in to_list(d_inner):
print('node ', node)
yield node
def get_dict_depth(d=None, level=0):
if not isinstance(d, dict) or not d:
return level
return max(get_dict_depth(d[k], level=level + 1) for k in d)
def get_dict_key_matches(key, dictionary):
for k, v in iteritems(dictionary):
if k == key:
return {k: v}
elif isinstance(v, dict):
return get_dict_key_matches(key, v)
def dict_to_keys_list(d, keys=None):
keys = keys if keys is not None else []
if isinstance(d, dict):
for k, v in iteritems(d):
keys.append(k)
dict_to_keys_list(v, keys)
else:
keys.append(d)
return keys
def dict_deep_sort(cls, obj):
if isinstance(obj, dict):
_sorted = OrderedDict()
for key in sorted(list(obj)):
_sorted[key] = cls.deep_sort(obj[key])
elif isinstance(obj, list):
new_list = []
for val in obj:
new_list.append(cls.deep_sort(val))
_sorted = sorted(new_list)
else:
_sorted = obj
return _sorted
def to_str_dict(d):
data = {}
for k, v in iteritems(d):
try:
data.update({str(k): str(v)})
except TypeError:
pass
return data
def pop_dict_keys(d, keys):
popped = []
for key in keys:
try:
popped.append(d.pop(key))
except KeyError:
pass
return popped
def merge_dicts(*args, **kwargs):
data = {}
for input_dict in [arg for arg in args if isinstance(arg, dict)] + [kwargs]:
data.update(input_dict)
return data
def dict_compare(d1, d2):
d1_keys = set(list(d1))
d2_keys = set(list(d2))
intersect_keys = d1_keys.intersection(d2_keys)
added = d1_keys - d2_keys
removed = d2_keys - d1_keys
modified = {o: (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]}
same = set(o for o in intersect_keys if d1[o] == d2[o])
return added, removed, modified, same
def dict_to_flat_dict(d, full_path=True, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key and full_path else k
if isinstance(v, MutableMapping):
items.extend(dict_to_flat_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
merge_value_LUT = {
dict: lambda d1, d2: merge_dicts(d2),
list: lambda l1, l2: l1 + to_list(l2),
str: lambda s1, s2: s1 + str(s2),
'replace': lambda e1, e2: e2,
}
class Map(dict):
def __init__(self, *args, **kwargs):
super(Map, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in iteritems(arg):
self[k] = v
if kwargs:
for k, v in iteritems(kwargs):
self[k] = v
def deep_update(self, d, path=None):
if path is None:
path = []
for k, v in iteritems(d):
if isinstance(v, dict):
self.deep_update(v, path=path + [k])
else:
self._merge_value(path + [k], v)
def flatten(self):
return gen_flatten_dict_depth_two(self)
def to_flat_dict(self, full_path=False):
return dict_to_flat_dict(self, full_path=full_path)
def to_value_list(self):
result = []
map(result.extend, [n if isinstance(n, Iterable) else to_list(n) for n in itervalues(self.to_flat_dict())])
return result
def _merge_value(self, path, v):
current_map = self
for p in path[:-1]:
current_map = current_map.setdefault(p, self.__class__())
current_v = current_map.setdefault(path[-1], None)
current_map[path[-1]] = merge_value_LUT.get(type(current_v), merge_value_LUT['replace'])(current_v, v)
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(Map, self).__setitem__(key, value)
self.__dict__[key] = value
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(Map, self).__delitem__(key)
del self.__dict__[key]
def __eq__(self, other):
return dict_compare(self.__dict__, other)
def extend_parent_kwarg(number_of_parents):
def inner(f):
@wraps(f)
def wrapper(abstract_grouping, *args, **kwargs):
kwargs[cfg.PARENT] = iter(to_size_list(kwargs.get(cfg.PARENT), number_of_parents))
return f(abstract_grouping, *args, **kwargs)
return wrapper
return inner
| true
| true
|
f709949ef53472ddbc169d6ea68a3922395cec12
| 49,392
|
py
|
Python
|
core/domain/rights_manager.py
|
mzaman07/oppia
|
cac5737ba63a0a209d47d20f3b464495da12bd59
|
[
"Apache-2.0"
] | 1
|
2022-02-22T09:27:22.000Z
|
2022-02-22T09:27:22.000Z
|
core/domain/rights_manager.py
|
mzaman07/oppia
|
cac5737ba63a0a209d47d20f3b464495da12bd59
|
[
"Apache-2.0"
] | null | null | null |
core/domain/rights_manager.py
|
mzaman07/oppia
|
cac5737ba63a0a209d47d20f3b464495da12bd59
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects and functions that manage rights for various user actions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import copy
import logging
from constants import constants
from core.domain import activity_services
from core.domain import change_domain
from core.domain import role_services
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
import feconf
import python_utils
import utils
current_user_services = models.Registry.import_current_user_services()
(collection_models, exp_models,) = models.Registry.import_models([
models.NAMES.collection, models.NAMES.exploration
])
# IMPORTANT: Ensure that all changes to how these cmds are interpreted preserve
# backward-compatibility with previous exploration snapshots in the datastore.
# Do not modify the definitions of CMD keys that already exist.
CMD_CREATE_NEW = 'create_new'
CMD_CHANGE_ROLE = 'change_role'
CMD_CHANGE_EXPLORATION_STATUS = 'change_exploration_status'
CMD_CHANGE_COLLECTION_STATUS = 'change_collection_status'
CMD_CHANGE_PRIVATE_VIEWABILITY = 'change_private_viewability'
CMD_RELEASE_OWNERSHIP = 'release_ownership'
CMD_UPDATE_FIRST_PUBLISHED_MSEC = 'update_first_published_msec'
ACTIVITY_STATUS_PRIVATE = constants.ACTIVITY_STATUS_PRIVATE
ACTIVITY_STATUS_PUBLIC = constants.ACTIVITY_STATUS_PUBLIC
ROLE_OWNER = 'owner'
ROLE_EDITOR = 'editor'
ROLE_VOICE_ARTIST = 'voice artist'
ROLE_VIEWER = 'viewer'
ROLE_NONE = 'none'
ROLE_ADMIN = 'admin'
ROLE_MODERATOR = 'moderator'
# The allowed list of roles which can be used in change_role command.
ALLOWED_ROLES = [ROLE_OWNER, ROLE_EDITOR, ROLE_VOICE_ARTIST, ROLE_VIEWER]
# The allowed list of status which can be used in change_exploration_status
# and change_collection_status commands.
ALLOWED_STATUS = [ACTIVITY_STATUS_PRIVATE, ACTIVITY_STATUS_PUBLIC]
COMMON_ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': [],
'optional_attribute_names': []
}, {
'name': CMD_CHANGE_ROLE,
'required_attribute_names': ['assignee_id', 'old_role', 'new_role'],
'optional_attribute_names': [],
'allowed_values': {'new_role': ALLOWED_ROLES, 'old_role': ALLOWED_ROLES}
}, {
'name': CMD_CHANGE_PRIVATE_VIEWABILITY,
'required_attribute_names': [
'old_viewable_if_private', 'new_viewable_if_private'],
'optional_attribute_names': []
}, {
'name': CMD_RELEASE_OWNERSHIP,
'required_attribute_names': [],
'optional_attribute_names': [],
}, {
'name': CMD_UPDATE_FIRST_PUBLISHED_MSEC,
'required_attribute_names': [
'old_first_published_msec', 'new_first_published_msec'],
'optional_attribute_names': [],
}]
class ActivityRights(python_utils.OBJECT):
"""Domain object for the rights/publication status of an activity (an
exploration or a collection).
"""
def __init__(
self, exploration_id, owner_ids, editor_ids, voice_artist_ids,
viewer_ids, community_owned=False, cloned_from=None,
status=ACTIVITY_STATUS_PRIVATE, viewable_if_private=False,
first_published_msec=None):
self.id = exploration_id
self.owner_ids = owner_ids
self.editor_ids = editor_ids
self.voice_artist_ids = voice_artist_ids
self.viewer_ids = viewer_ids
self.community_owned = community_owned
self.cloned_from = cloned_from
self.status = status
self.viewable_if_private = viewable_if_private
self.first_published_msec = first_published_msec
def validate(self):
"""Validates an ActivityRights object.
Raises:
utils.ValidationError: if any of the owners, editors, voice artists
and viewers lists overlap, or if a community-owned exploration
has owners, editors, voice artists or viewers specified.
"""
if self.community_owned:
if (self.owner_ids or self.editor_ids or self.voice_artist_ids or
self.viewer_ids):
raise utils.ValidationError(
'Community-owned explorations should have no owners, '
'editors, voice artists or viewers specified.')
if self.community_owned and self.status == ACTIVITY_STATUS_PRIVATE:
raise utils.ValidationError(
'Community-owned explorations cannot be private.')
if self.status != ACTIVITY_STATUS_PRIVATE and self.viewer_ids:
raise utils.ValidationError(
'Public explorations should have no viewers specified.')
owner_editor = set(self.owner_ids) & set(self.editor_ids)
owner_voice_artist = set(self.owner_ids) & set(self.voice_artist_ids)
owner_viewer = set(self.owner_ids) & set(self.viewer_ids)
editor_voice_artist = set(self.editor_ids) & set(self.voice_artist_ids)
editor_viewer = set(self.editor_ids) & set(self.viewer_ids)
voice_artist_viewer = set(self.voice_artist_ids) & set(self.viewer_ids)
if owner_editor:
raise utils.ValidationError(
'A user cannot be both an owner and an editor: %s' %
owner_editor)
if owner_voice_artist:
raise utils.ValidationError(
'A user cannot be both an owner and a voice artist: %s' %
owner_voice_artist)
if owner_viewer:
raise utils.ValidationError(
'A user cannot be both an owner and a viewer: %s' %
owner_viewer)
if editor_voice_artist:
raise utils.ValidationError(
'A user cannot be both an editor and a voice artist: %s' %
editor_voice_artist)
if editor_viewer:
raise utils.ValidationError(
'A user cannot be both an editor and a viewer: %s' %
editor_viewer)
if voice_artist_viewer:
raise utils.ValidationError(
'A user cannot be both a voice artist and a viewer: %s' %
voice_artist_viewer)
def to_dict(self):
"""Returns a dict suitable for use by the frontend.
Returns:
dict. A dict version of ActivityRights suitable for use by the
frontend.
"""
if self.community_owned:
return {
'cloned_from': self.cloned_from,
'status': self.status,
'community_owned': True,
'owner_names': [],
'editor_names': [],
'voice_artist_names': [],
'viewer_names': [],
'viewable_if_private': self.viewable_if_private,
}
else:
return {
'cloned_from': self.cloned_from,
'status': self.status,
'community_owned': False,
'owner_names': user_services.get_human_readable_user_ids(
self.owner_ids),
'editor_names': user_services.get_human_readable_user_ids(
self.editor_ids),
'voice_artist_names': user_services.get_human_readable_user_ids(
self.voice_artist_ids),
'viewer_names': user_services.get_human_readable_user_ids(
self.viewer_ids),
'viewable_if_private': self.viewable_if_private,
}
def is_owner(self, user_id):
"""Checks whether given user is owner of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity owner.
"""
return bool(user_id in self.owner_ids)
def is_editor(self, user_id):
"""Checks whether given user is editor of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity editor.
"""
return bool(user_id in self.editor_ids)
def is_voice_artist(self, user_id):
"""Checks whether given user is voice artist of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity voice artist.
"""
return bool(user_id in self.voice_artist_ids)
def is_viewer(self, user_id):
"""Checks whether given user is viewer of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity viewer.
"""
return bool(user_id in self.viewer_ids)
def is_published(self):
"""Checks whether activity is published.
Returns:
bool. Whether activity is published.
"""
return bool(self.status == ACTIVITY_STATUS_PUBLIC)
def is_private(self):
"""Checks whether activity is private.
Returns:
bool. Whether activity is private.
"""
return bool(self.status == ACTIVITY_STATUS_PRIVATE)
class ActivityRightsChange(change_domain.BaseChange):
"""Domain object class for an activity rights change.
The allowed commands, together with the attributes:
- 'create_new'
- 'change_role' (with assignee_id, old_role, new_role)
- 'change_exploration_status' (with old_status, new_status)
- 'change_collection_status' (with old_status, new_status)
- 'change_private_viewability' (with
old_viewable_if_private, new_viewable_if_private)
- 'release_ownership'
- 'update_first_published_msec' (with
old_first_published_msec, new_first_published_msec)
A role must be one of the ALLOWED_ROLES.
A status must be one of the ALLOWED_STATUS.
"""
ALLOWED_COMMANDS = COMMON_ALLOWED_COMMANDS
class ExplorationRightsChange(ActivityRightsChange):
"""Domain object class for an exploration rights change."""
ALLOWED_COMMANDS = copy.deepcopy(COMMON_ALLOWED_COMMANDS)
ALLOWED_COMMANDS.append({
'name': CMD_CHANGE_EXPLORATION_STATUS,
'required_attribute_names': ['old_status', 'new_status'],
'optional_attribute_names': [],
'allowed_values': {
'old_status': ALLOWED_STATUS, 'new_status': ALLOWED_STATUS}
})
class CollectionRightsChange(ActivityRightsChange):
"""Domain object class for an collection rights change."""
ALLOWED_COMMANDS = copy.deepcopy(COMMON_ALLOWED_COMMANDS)
ALLOWED_COMMANDS.append({
'name': CMD_CHANGE_COLLECTION_STATUS,
'required_attribute_names': ['old_status', 'new_status'],
'optional_attribute_names': [],
'allowed_values': {
'old_status': ALLOWED_STATUS, 'new_status': ALLOWED_STATUS}
})
def get_activity_rights_from_model(activity_rights_model, activity_type):
"""Constructs an ActivityRights object from the given activity rights model.
Args:
activity_rights_model: ActivityRightsModel. Activity rights from the
datastore.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
Returns:
ActivityRights. The rights object created from the model.
"""
return ActivityRights(
activity_rights_model.id,
activity_rights_model.owner_ids,
activity_rights_model.editor_ids,
activity_rights_model.voice_artist_ids,
activity_rights_model.viewer_ids,
community_owned=activity_rights_model.community_owned,
cloned_from=(
activity_rights_model.cloned_from
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION else None),
status=activity_rights_model.status,
viewable_if_private=activity_rights_model.viewable_if_private,
first_published_msec=activity_rights_model.first_published_msec
)
def _save_activity_rights(
committer_id, activity_rights, activity_type, commit_message,
commit_cmds):
"""Saves an ExplorationRights or CollectionRights domain object to the
datastore.
Args:
committer_id: str. ID of the committer.
activity_rights: ActivityRights. The rights object for the given
activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
commit_message: str. Descriptive message for the commit.
commit_cmds: list(dict). A list of commands describing what kind of
commit was done.
"""
activity_rights.validate()
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
model_cls = exp_models.ExplorationRightsModel
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
model_cls = collection_models.CollectionRightsModel
model = model_cls.get(activity_rights.id, strict=False)
model.owner_ids = activity_rights.owner_ids
model.editor_ids = activity_rights.editor_ids
model.viewer_ids = activity_rights.viewer_ids
model.voice_artist_ids = activity_rights.voice_artist_ids
model.community_owned = activity_rights.community_owned
model.status = activity_rights.status
model.viewable_if_private = activity_rights.viewable_if_private
model.first_published_msec = activity_rights.first_published_msec
model.commit(committer_id, commit_message, commit_cmds)
def _update_exploration_summary(activity_rights):
"""Updates the exploration summary for the activity associated with the
given rights object.
The ID of rights object is the same as the ID of associated activity.
Args:
activity_rights: ActivityRights. The rights object for the given
activity.
"""
# TODO(msl): Get rid of inline imports by refactoring code.
from core.domain import exp_services
exp_services.update_exploration_summary(
activity_rights.id, None)
def _update_collection_summary(activity_rights):
"""Updates the collection summary for the given activity associated with
the given rights object.
The ID of rights object is the same as the ID of associated activity.
Args:
activity_rights: ActivityRights. The rights object for the given
activity.
"""
from core.domain import collection_services
collection_services.update_collection_summary(
activity_rights.id, None)
def _update_activity_summary(activity_type, activity_rights):
"""Updates the activity summary for the given activity associated with
the given rights object.
The ID of rights object is the same as the ID of associated activity.
Args:
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
activity_rights: ActivityRights. The rights object for the given
activity.
"""
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
_update_exploration_summary(activity_rights)
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
_update_collection_summary(activity_rights)
def update_activity_first_published_msec(
activity_type, activity_id, first_published_msec):
"""Updates the first_published_msec field for the given activity.
The caller is responsible for ensuring that this value is not already
set before updating it.
Args:
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
activity_id: str. ID of the activity.
first_published_msec: float. First publication time in milliseconds
since the Epoch.
"""
activity_rights = _get_activity_rights(activity_type, activity_id)
commit_cmds = [{
'cmd': CMD_UPDATE_FIRST_PUBLISHED_MSEC,
'old_first_published_msec': activity_rights.first_published_msec,
'new_first_published_msec': first_published_msec
}]
activity_rights.first_published_msec = first_published_msec
_save_activity_rights(
feconf.SYSTEM_COMMITTER_ID, activity_rights, activity_type,
'set first published time in msec', commit_cmds)
def create_new_exploration_rights(exploration_id, committer_id):
"""Creates a new exploration rights object and saves it to the datastore.
Subscribes the committer to the new exploration.
Args:
exploration_id: str. ID of the exploration.
committer_id: str. ID of the committer.
"""
exploration_rights = ActivityRights(
exploration_id, [committer_id], [], [], [])
commit_cmds = [{'cmd': CMD_CREATE_NEW}]
exp_models.ExplorationRightsModel(
id=exploration_rights.id,
owner_ids=exploration_rights.owner_ids,
editor_ids=exploration_rights.editor_ids,
voice_artist_ids=exploration_rights.voice_artist_ids,
viewer_ids=exploration_rights.viewer_ids,
community_owned=exploration_rights.community_owned,
status=exploration_rights.status,
viewable_if_private=exploration_rights.viewable_if_private,
first_published_msec=exploration_rights.first_published_msec,
).commit(committer_id, 'Created new exploration', commit_cmds)
subscription_services.subscribe_to_exploration(
committer_id, exploration_id)
def get_exploration_rights(exploration_id, strict=True):
"""Retrieves the rights for this exploration from the datastore.
Args:
exploration_id: str. ID of the exploration.
strict: bool. Whether to raise an error if there is no exploration
matching the given ID.
Returns:
ActivityRights. The rights object for the given exploration.
Raises:
EntityNotFoundError. The exploration with ID exploration_id was not
found in the datastore.
"""
model = exp_models.ExplorationRightsModel.get(
exploration_id, strict=strict)
if model is None:
return None
return get_activity_rights_from_model(
model, constants.ACTIVITY_TYPE_EXPLORATION)
def get_multiple_exploration_rights_by_ids(exp_ids):
"""Returns a list of ActivityRights objects for given exploration ids.
Args:
exp_ids: list(str). List of exploration ids.
Returns:
list(ActivityRights or None). List of rights object --> ActivityRights
objects for existing exploration or None.
"""
exp_rights_models = exp_models.ExplorationRightsModel.get_multi(
exp_ids)
exp_models_list = []
for model in exp_rights_models:
if model is None:
exp_models_list.append(None)
else:
exp_models_list.append(
get_activity_rights_from_model(
model, constants.ACTIVITY_TYPE_EXPLORATION))
return exp_models_list
def is_exploration_private(exploration_id):
"""Returns whether exploration is private.
Args:
exploration_id: str. ID of the exploration.
Returns:
bool. Whether the exploration is private or not.
"""
exploration_rights = get_exploration_rights(exploration_id)
return exploration_rights.status == ACTIVITY_STATUS_PRIVATE
def is_exploration_public(exploration_id):
"""Returns whether exploration is public.
Args:
exploration_id: str. ID of the exploration.
Returns:
bool. Whether the exploration is public.
"""
exploration_rights = get_exploration_rights(exploration_id)
return exploration_rights.status == ACTIVITY_STATUS_PUBLIC
def is_exploration_cloned(exploration_id):
"""Returns whether the exploration is a clone of another exploration.
Args:
exploration_id: str. ID of the exploration.
Returns:
bool. Whether the exploration is a clone of another exploration.
"""
exploration_rights = get_exploration_rights(exploration_id)
return bool(exploration_rights.cloned_from)
def create_new_collection_rights(collection_id, committer_id):
"""Creates a new collection rights object and saves it to the datastore.
Subscribes the committer to the new collection.
Args:
collection_id: str. ID of the collection.
committer_id: str. ID of the committer.
"""
collection_rights = ActivityRights(
collection_id, [committer_id], [], [], [])
commit_cmds = [{'cmd': CMD_CREATE_NEW}]
collection_models.CollectionRightsModel(
id=collection_rights.id,
owner_ids=collection_rights.owner_ids,
editor_ids=collection_rights.editor_ids,
voice_artist_ids=collection_rights.voice_artist_ids,
viewer_ids=collection_rights.viewer_ids,
community_owned=collection_rights.community_owned,
status=collection_rights.status,
viewable_if_private=collection_rights.viewable_if_private,
first_published_msec=collection_rights.first_published_msec
).commit(committer_id, 'Created new collection', commit_cmds)
subscription_services.subscribe_to_collection(committer_id, collection_id)
def get_collection_rights(collection_id, strict=True):
"""Retrieves the rights for this collection from the datastore.
Args:
collection_id: str. ID of the collection.
strict: bool. Whether to raise an error if ID is not found.
Returns:
ActivityRights. The rights object for the collection.
Raises:
EntityNotFoundError. The collection with ID collection_id is not found
in the datastore.
"""
model = collection_models.CollectionRightsModel.get(
collection_id, strict=strict)
if model is None:
return None
return get_activity_rights_from_model(
model, constants.ACTIVITY_TYPE_COLLECTION)
def get_collection_owner_names(collection_id):
"""Retrieves the owners for this collection from the datastore.
Args:
collection_id: str. ID of the collection.
Returns:
list(str). Human-readable usernames (or truncated email addresses) of
owners for this collection.
"""
collection_rights = get_collection_rights(collection_id)
return user_services.get_human_readable_user_ids(
collection_rights.owner_ids)
def is_collection_private(collection_id):
"""Returns whether the collection is private.
Args:
collection_id: str. ID of the collection.
Returns:
bool. Whether the collection is private.
"""
collection_rights = get_collection_rights(collection_id)
return collection_rights.status == ACTIVITY_STATUS_PRIVATE
def is_collection_public(collection_id):
"""Returns whether the collection is public.
Args:
collection_id: str. ID of the collection.
Returns:
bool. Whether the collection is public.
"""
collection_rights = get_collection_rights(collection_id)
return collection_rights.status == ACTIVITY_STATUS_PUBLIC
def _get_activity_rights(activity_type, activity_id):
"""Retrieves the rights object for the given activity
based on its type.
Args:
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
activity_id: str. ID of the activity.
Returns:
ActivityRights. The rights object associated with the given activity.
Raises:
Exception. activity_type provided is unknown.
"""
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
return get_exploration_rights(activity_id, strict=False)
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
return get_collection_rights(activity_id, strict=False)
else:
raise Exception(
'Cannot get activity rights for unknown activity type: %s' % (
activity_type))
def check_can_access_activity(user, activity_rights):
"""Checks whether the user can access given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: AcitivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the given activity can be accessed by the given user.
"""
if activity_rights is None:
return False
elif activity_rights.is_published():
return bool(
role_services.ACTION_PLAY_ANY_PUBLIC_ACTIVITY in user.actions)
elif activity_rights.is_private():
return bool(
(role_services.ACTION_PLAY_ANY_PRIVATE_ACTIVITY in user.actions) or
activity_rights.is_viewer(user.user_id) or
activity_rights.is_owner(user.user_id) or
activity_rights.is_editor(user.user_id) or
activity_rights.is_voice_artist(user.user_id) or
activity_rights.viewable_if_private)
def check_can_edit_activity(user, activity_rights):
"""Checks whether the user can edit given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the given user can edit this activity.
"""
if activity_rights is None:
return False
if role_services.ACTION_EDIT_OWNED_ACTIVITY not in user.actions:
return False
if (activity_rights.is_owner(user.user_id) or
activity_rights.is_editor(user.user_id)):
return True
if (activity_rights.community_owned or
(role_services.ACTION_EDIT_ANY_ACTIVITY in user.actions)):
return True
if (activity_rights.is_published() and
(role_services.ACTION_EDIT_ANY_PUBLIC_ACTIVITY in
user.actions)):
return True
return False
def check_can_voiceover_activity(user, activity_rights):
"""Checks whether the user can voiceover given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the given user can voiceover this activity.
"""
if activity_rights is None:
return False
if role_services.ACTION_EDIT_OWNED_ACTIVITY not in user.actions:
return False
if (activity_rights.is_owner(user.user_id) or
activity_rights.is_editor(user.user_id) or
activity_rights.is_voice_artist(user.user_id)):
return True
if (activity_rights.community_owned or
(role_services.ACTION_EDIT_ANY_ACTIVITY in user.actions)):
return True
if (activity_rights.is_published() and
(role_services.ACTION_EDIT_ANY_PUBLIC_ACTIVITY in
user.actions)):
return True
return False
def check_can_save_activity(user, activity_rights):
"""Checks whether the user can save given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can save given activity.
"""
return (check_can_edit_activity(user, activity_rights) or (
check_can_voiceover_activity(user, activity_rights)))
def check_can_delete_activity(user, activity_rights):
"""Checks whether the user can delete given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can delete given activity.
"""
if activity_rights is None:
return False
if role_services.ACTION_DELETE_ANY_ACTIVITY in user.actions:
return True
elif (activity_rights.is_private() and
(role_services.ACTION_DELETE_OWNED_PRIVATE_ACTIVITY in user.actions)
and activity_rights.is_owner(user.user_id)):
return True
elif (activity_rights.is_published() and
(role_services.ACTION_DELETE_ANY_PUBLIC_ACTIVITY in user.actions)):
return True
return False
def check_can_modify_activity_roles(user, activity_rights):
"""Checks whether the user can modify roles for given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can modify roles for given activity.
"""
if activity_rights is None:
return False
if (activity_rights.community_owned or
activity_rights.cloned_from):
return False
if (role_services.ACTION_MODIFY_ROLES_FOR_ANY_ACTIVITY in
user.actions):
return True
if (role_services.ACTION_MODIFY_ROLES_FOR_OWNED_ACTIVITY in
user.actions):
if activity_rights.is_owner(user.user_id):
return True
return False
def check_can_release_ownership(user, activity_rights):
"""Checks whether the user can release ownership for given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can release ownership for given activity.
"""
if activity_rights is None:
return False
if activity_rights.is_private():
return False
return check_can_modify_activity_roles(
user, activity_rights)
def check_can_publish_activity(user, activity_rights):
"""Checks whether the user can publish given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can publish given activity.
"""
if activity_rights is None:
return False
if activity_rights.cloned_from:
return False
if activity_rights.is_published():
return False
if role_services.ACTION_PUBLISH_ANY_ACTIVITY in user.actions:
return True
if role_services.ACTION_PUBLISH_OWNED_ACTIVITY in user.actions:
if activity_rights.is_owner(user.user_id):
return True
return False
def check_can_unpublish_activity(user, activity_rights):
"""Checks whether the user can unpublish given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can unpublish given activity.
"""
if activity_rights is None:
return False
if activity_rights.community_owned:
return False
if activity_rights.is_published():
if role_services.ACTION_UNPUBLISH_ANY_PUBLIC_ACTIVITY in user.actions:
return True
return False
def _assign_role(
committer, assignee_id, new_role, activity_id, activity_type):
"""Assigns a new role to the user.
Args:
committer: UserActionsInfo. UserActionInfo object for the user
who is performing the action.
assignee_id: str. ID of the user whose role is being changed.
new_role: str. The name of the new role: One of
ROLE_OWNER
ROLE_EDITOR
ROLE_VOICE_ARTIST
ROLE_VIEWER
activity_id: str. ID of the activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
Raises:
Exception. The committer does not have rights to modify a role.
Exception. The user already owns the activity.
Exception. The user can already edit the activity.
Exception. The user can already voiceover the activity.
Exception. The activity is already publicly editable.
Exception. The activity is already publicly translatable.
Exception. The user can already view the activity.
Exception. The activity is already publicly viewable.
Exception. The role is invalid.
"""
committer_id = committer.user_id
activity_rights = _get_activity_rights(activity_type, activity_id)
if not check_can_modify_activity_roles(committer, activity_rights):
logging.error(
'User %s tried to allow user %s to be a(n) %s of activity %s '
'but was refused permission.' % (
committer_id, assignee_id, new_role, activity_id))
raise Exception(
'UnauthorizedUserException: Could not assign new role.')
assignee_username = user_services.get_username(assignee_id)
old_role = ROLE_NONE
if new_role == ROLE_OWNER:
if activity_rights.is_owner(assignee_id):
raise Exception('This user already owns this %s.' % activity_type)
activity_rights.owner_ids.append(assignee_id)
if assignee_id in activity_rights.viewer_ids:
activity_rights.viewer_ids.remove(assignee_id)
old_role = ROLE_VIEWER
if assignee_id in activity_rights.editor_ids:
activity_rights.editor_ids.remove(assignee_id)
old_role = ROLE_EDITOR
if assignee_id in activity_rights.voice_artist_ids:
activity_rights.voice_artist_ids.remove(assignee_id)
old_role = ROLE_VOICE_ARTIST
elif new_role == ROLE_EDITOR:
if (activity_rights.is_editor(assignee_id) or
activity_rights.is_owner(assignee_id)):
raise Exception(
'This user already can edit this %s.' % activity_type)
activity_rights.editor_ids.append(assignee_id)
if assignee_id in activity_rights.voice_artist_ids:
activity_rights.voice_artist_ids.remove(assignee_id)
old_role = ROLE_VOICE_ARTIST
if assignee_id in activity_rights.viewer_ids:
activity_rights.viewer_ids.remove(assignee_id)
old_role = ROLE_VIEWER
elif new_role == ROLE_VOICE_ARTIST:
if (activity_rights.is_editor(assignee_id) or
activity_rights.is_voice_artist(assignee_id) or
activity_rights.is_owner(assignee_id)):
raise Exception(
'This user already can voiceover this %s.' % activity_type)
activity_rights.voice_artist_ids.append(assignee_id)
if assignee_id in activity_rights.viewer_ids:
activity_rights.viewer_ids.remove(assignee_id)
old_role = ROLE_VIEWER
elif new_role == ROLE_VIEWER:
if (activity_rights.is_owner(assignee_id) or
activity_rights.is_editor(assignee_id) or
activity_rights.is_viewer(assignee_id)):
raise Exception(
'This user already can view this %s.' % activity_type)
if activity_rights.status != ACTIVITY_STATUS_PRIVATE:
raise Exception(
'Public %ss can be viewed by anyone.' % activity_type)
activity_rights.viewer_ids.append(assignee_id)
else:
raise Exception('Invalid role: %s' % new_role)
commit_message = 'Changed role of %s from %s to %s' % (
assignee_username, old_role, new_role)
commit_cmds = [{
'cmd': CMD_CHANGE_ROLE,
'assignee_id': assignee_id,
'old_role': old_role,
'new_role': new_role
}]
_save_activity_rights(
committer_id, activity_rights, activity_type,
commit_message, commit_cmds)
_update_activity_summary(activity_type, activity_rights)
def _release_ownership_of_activity(committer, activity_id, activity_type):
"""Releases ownership of the given activity to the community.
Args:
committer: UserActionsInfo. UserActionsInfo object for the user who
is performing the action.
activity_id: str. ID of the activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
Raise:
Exception. The committer does not have release rights.
"""
committer_id = committer.user_id
activity_rights = _get_activity_rights(activity_type, activity_id)
if not check_can_release_ownership(committer, activity_rights):
logging.error(
'User %s tried to release ownership of %s %s but was '
'refused permission.' % (committer_id, activity_type, activity_id))
raise Exception(
'The ownership of this %s cannot be released.' % activity_type)
activity_rights.community_owned = True
activity_rights.owner_ids = []
activity_rights.editor_ids = []
activity_rights.viewer_ids = []
commit_cmds = [{
'cmd': CMD_RELEASE_OWNERSHIP,
}]
_save_activity_rights(
committer_id, activity_rights, activity_type,
'%s ownership released to the community.' % activity_type, commit_cmds)
_update_activity_summary(activity_type, activity_rights)
def _change_activity_status(
committer_id, activity_id, activity_type, new_status, commit_message):
"""Changes the status of the given activity.
Args:
committer_id: str. ID of the user who is performing the update action.
activity_id: str. ID of the activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
new_status: str. The new status of the activity.
commit_message: str. The human-written commit message for this change.
"""
activity_rights = _get_activity_rights(activity_type, activity_id)
old_status = activity_rights.status
activity_rights.status = new_status
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
cmd_type = CMD_CHANGE_EXPLORATION_STATUS
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
cmd_type = CMD_CHANGE_COLLECTION_STATUS
commit_cmds = [{
'cmd': cmd_type,
'old_status': old_status,
'new_status': new_status
}]
if new_status != ACTIVITY_STATUS_PRIVATE:
activity_rights.viewer_ids = []
if activity_rights.first_published_msec is None:
activity_rights.first_published_msec = (
utils.get_current_time_in_millisecs())
_save_activity_rights(
committer_id, activity_rights, activity_type, commit_message,
commit_cmds)
_update_activity_summary(activity_type, activity_rights)
def _publish_activity(committer, activity_id, activity_type):
"""Publishes the given activity.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
activity_id: str. ID of the activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
Raises:
Exception. The committer does not have rights to publish the
activity.
"""
committer_id = committer.user_id
activity_rights = _get_activity_rights(activity_type, activity_id)
if not check_can_publish_activity(committer, activity_rights):
logging.error(
'User %s tried to publish %s %s but was refused '
'permission.' % (committer_id, activity_type, activity_id))
raise Exception('This %s cannot be published.' % activity_type)
_change_activity_status(
committer_id, activity_id, activity_type, ACTIVITY_STATUS_PUBLIC,
'%s published.' % activity_type)
def _unpublish_activity(committer, activity_id, activity_type):
"""Unpublishes the given activity.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
activity_id: str. ID of the activity.
activity_type: str. The type of activity. Possible values:
constants.ACTIVITY_TYPE_EXPLORATION
constants.ACTIVITY_TYPE_COLLECTION
Raises:
Exception. The committer does not have rights to unpublish the
activity.
"""
committer_id = committer.user_id
activity_rights = _get_activity_rights(activity_type, activity_id)
if not check_can_unpublish_activity(committer, activity_rights):
logging.error(
'User %s tried to unpublish %s %s but was refused '
'permission.' % (committer_id, activity_type, activity_id))
raise Exception('This %s cannot be unpublished.' % activity_type)
_change_activity_status(
committer_id, activity_id, activity_type, ACTIVITY_STATUS_PRIVATE,
'%s unpublished.' % activity_type)
activity_services.remove_featured_activity(activity_type, activity_id)
# Rights functions for activities.
def assign_role_for_exploration(
committer, exploration_id, assignee_id, new_role):
"""Assigns a user to the given role and subscribes the assignee to future
exploration updates.
The caller should ensure that assignee_id corresponds to a valid user in
the system.
Args:
committer: UserActionsInfo. The UserActionsInfo object for the
committer.
exploration_id: str. ID of the exploration.
assignee_id: str. ID of the user whose role is being changed.
new_role: str. The name of the new role: One of
ROLE_OWNER
ROLE_EDITOR
ROLE_VOICE_ARTIST
Raises:
Exception. This could potentially throw an exception from
_assign_role.
"""
_assign_role(
committer, assignee_id, new_role, exploration_id,
constants.ACTIVITY_TYPE_EXPLORATION)
if new_role in [ROLE_OWNER, ROLE_EDITOR, ROLE_VOICE_ARTIST]:
subscription_services.subscribe_to_exploration(
assignee_id, exploration_id)
def release_ownership_of_exploration(committer, exploration_id):
"""Releases ownership of the given exploration to the community.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
exploration_id: str. ID of the exploration.
Raises:
Exception. This could potentially throw an exception from
_release_ownership_of_activity.
"""
_release_ownership_of_activity(
committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION)
def set_private_viewability_of_exploration(
committer, exploration_id, viewable_if_private):
"""Sets the viewable_if_private attribute for the given exploration's rights
object.
If viewable_if_private is True, this allows a private exploration
to be viewed by anyone with the link.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
exploration_id: str. ID of the exploration.
viewable_if_private: bool. Whether the exploration should be made
viewable (by anyone with the link).
Raises:
Exception. The committer does not have the permission to perform change
action.
Exception. If the viewable_if_private property is already as desired.
"""
committer_id = committer.user_id
exploration_rights = get_exploration_rights(exploration_id)
# The user who can publish activity can change its private viewability.
if not check_can_publish_activity(committer, exploration_rights):
logging.error(
'User %s tried to change private viewability of exploration %s '
'but was refused permission.' % (committer_id, exploration_id))
raise Exception(
'The viewability status of this exploration cannot be changed.')
old_viewable_if_private = exploration_rights.viewable_if_private
if old_viewable_if_private == viewable_if_private:
raise Exception(
'Trying to change viewability status of this exploration to %s, '
'but that is already the current value.' % viewable_if_private)
exploration_rights.viewable_if_private = viewable_if_private
commit_cmds = [{
'cmd': CMD_CHANGE_PRIVATE_VIEWABILITY,
'old_viewable_if_private': old_viewable_if_private,
'new_viewable_if_private': viewable_if_private,
}]
commit_message = (
'Made exploration viewable to anyone with the link.'
if viewable_if_private else
'Made exploration viewable only to invited playtesters.')
_save_activity_rights(
committer_id, exploration_rights, constants.ACTIVITY_TYPE_EXPLORATION,
commit_message, commit_cmds)
_update_exploration_summary(exploration_rights)
def publish_exploration(committer, exploration_id):
"""Publishes the given exploration.
It is the responsibility of the caller to check that the exploration is
valid prior to publication.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
exploration_id: str. ID of the exploration.
Raises:
Exception. This could potentially throw an exception from
_publish_activity.
"""
_publish_activity(
committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION)
def unpublish_exploration(committer, exploration_id):
"""Unpublishes the given exploration.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
exploration_id: str. ID of the exploration.
Raises:
Exception. This could potentially throw an exception from
_unpublish_activity.
"""
_unpublish_activity(
committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION)
# Rights functions for collections.
def assign_role_for_collection(
committer, collection_id, assignee_id, new_role):
"""Assign the given user to the given role and subscribes the assignee
to future collection updates.
The caller should ensure that assignee_id corresponds to a valid user in
the system.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
collection_id: str. ID of the collection.
assignee_id: str. ID of the user whose role is being changed.
new_role: str. The name of the new role: One of
ROLE_OWNER
ROLE_EDITOR
Raises:
Exception. This could potentially throw an exception from
_assign_role.
"""
_assign_role(
committer, assignee_id, new_role, collection_id,
constants.ACTIVITY_TYPE_COLLECTION)
if new_role in [ROLE_OWNER, ROLE_EDITOR]:
subscription_services.subscribe_to_collection(
assignee_id, collection_id)
def release_ownership_of_collection(committer, collection_id):
"""Releases ownership of the given collection to the community.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
collection_id: str. ID of the collection.
Raises:
Exception. This could potentially throw an exception from
_release_ownership_of_activity.
"""
_release_ownership_of_activity(
committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION)
def publish_collection(committer, collection_id):
"""Publishes the given collection.
It is the responsibility of the caller to check that the collection is
valid prior to publication.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
collection_id: str. ID of the collection.
Raises:
Exception. This could potentially throw an exception from
_publish_activity.
"""
_publish_activity(
committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION)
def unpublish_collection(committer, collection_id):
"""Unpublishes the given collection.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
collection_id: str. ID of the collection.
Raises:
Exception. This could potentially throw an exception from
_unpublish_activity.
"""
_unpublish_activity(
committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION)
| 35.381089
| 80
| 0.695396
|
from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import logging
from constants import constants
from core.domain import activity_services
from core.domain import change_domain
from core.domain import role_services
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
import feconf
import python_utils
import utils
current_user_services = models.Registry.import_current_user_services()
(collection_models, exp_models,) = models.Registry.import_models([
models.NAMES.collection, models.NAMES.exploration
])
CMD_CREATE_NEW = 'create_new'
CMD_CHANGE_ROLE = 'change_role'
CMD_CHANGE_EXPLORATION_STATUS = 'change_exploration_status'
CMD_CHANGE_COLLECTION_STATUS = 'change_collection_status'
CMD_CHANGE_PRIVATE_VIEWABILITY = 'change_private_viewability'
CMD_RELEASE_OWNERSHIP = 'release_ownership'
CMD_UPDATE_FIRST_PUBLISHED_MSEC = 'update_first_published_msec'
ACTIVITY_STATUS_PRIVATE = constants.ACTIVITY_STATUS_PRIVATE
ACTIVITY_STATUS_PUBLIC = constants.ACTIVITY_STATUS_PUBLIC
ROLE_OWNER = 'owner'
ROLE_EDITOR = 'editor'
ROLE_VOICE_ARTIST = 'voice artist'
ROLE_VIEWER = 'viewer'
ROLE_NONE = 'none'
ROLE_ADMIN = 'admin'
ROLE_MODERATOR = 'moderator'
ALLOWED_ROLES = [ROLE_OWNER, ROLE_EDITOR, ROLE_VOICE_ARTIST, ROLE_VIEWER]
ALLOWED_STATUS = [ACTIVITY_STATUS_PRIVATE, ACTIVITY_STATUS_PUBLIC]
COMMON_ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': [],
'optional_attribute_names': []
}, {
'name': CMD_CHANGE_ROLE,
'required_attribute_names': ['assignee_id', 'old_role', 'new_role'],
'optional_attribute_names': [],
'allowed_values': {'new_role': ALLOWED_ROLES, 'old_role': ALLOWED_ROLES}
}, {
'name': CMD_CHANGE_PRIVATE_VIEWABILITY,
'required_attribute_names': [
'old_viewable_if_private', 'new_viewable_if_private'],
'optional_attribute_names': []
}, {
'name': CMD_RELEASE_OWNERSHIP,
'required_attribute_names': [],
'optional_attribute_names': [],
}, {
'name': CMD_UPDATE_FIRST_PUBLISHED_MSEC,
'required_attribute_names': [
'old_first_published_msec', 'new_first_published_msec'],
'optional_attribute_names': [],
}]
class ActivityRights(python_utils.OBJECT):
def __init__(
self, exploration_id, owner_ids, editor_ids, voice_artist_ids,
viewer_ids, community_owned=False, cloned_from=None,
status=ACTIVITY_STATUS_PRIVATE, viewable_if_private=False,
first_published_msec=None):
self.id = exploration_id
self.owner_ids = owner_ids
self.editor_ids = editor_ids
self.voice_artist_ids = voice_artist_ids
self.viewer_ids = viewer_ids
self.community_owned = community_owned
self.cloned_from = cloned_from
self.status = status
self.viewable_if_private = viewable_if_private
self.first_published_msec = first_published_msec
def validate(self):
if self.community_owned:
if (self.owner_ids or self.editor_ids or self.voice_artist_ids or
self.viewer_ids):
raise utils.ValidationError(
'Community-owned explorations should have no owners, '
'editors, voice artists or viewers specified.')
if self.community_owned and self.status == ACTIVITY_STATUS_PRIVATE:
raise utils.ValidationError(
'Community-owned explorations cannot be private.')
if self.status != ACTIVITY_STATUS_PRIVATE and self.viewer_ids:
raise utils.ValidationError(
'Public explorations should have no viewers specified.')
owner_editor = set(self.owner_ids) & set(self.editor_ids)
owner_voice_artist = set(self.owner_ids) & set(self.voice_artist_ids)
owner_viewer = set(self.owner_ids) & set(self.viewer_ids)
editor_voice_artist = set(self.editor_ids) & set(self.voice_artist_ids)
editor_viewer = set(self.editor_ids) & set(self.viewer_ids)
voice_artist_viewer = set(self.voice_artist_ids) & set(self.viewer_ids)
if owner_editor:
raise utils.ValidationError(
'A user cannot be both an owner and an editor: %s' %
owner_editor)
if owner_voice_artist:
raise utils.ValidationError(
'A user cannot be both an owner and a voice artist: %s' %
owner_voice_artist)
if owner_viewer:
raise utils.ValidationError(
'A user cannot be both an owner and a viewer: %s' %
owner_viewer)
if editor_voice_artist:
raise utils.ValidationError(
'A user cannot be both an editor and a voice artist: %s' %
editor_voice_artist)
if editor_viewer:
raise utils.ValidationError(
'A user cannot be both an editor and a viewer: %s' %
editor_viewer)
if voice_artist_viewer:
raise utils.ValidationError(
'A user cannot be both a voice artist and a viewer: %s' %
voice_artist_viewer)
def to_dict(self):
if self.community_owned:
return {
'cloned_from': self.cloned_from,
'status': self.status,
'community_owned': True,
'owner_names': [],
'editor_names': [],
'voice_artist_names': [],
'viewer_names': [],
'viewable_if_private': self.viewable_if_private,
}
else:
return {
'cloned_from': self.cloned_from,
'status': self.status,
'community_owned': False,
'owner_names': user_services.get_human_readable_user_ids(
self.owner_ids),
'editor_names': user_services.get_human_readable_user_ids(
self.editor_ids),
'voice_artist_names': user_services.get_human_readable_user_ids(
self.voice_artist_ids),
'viewer_names': user_services.get_human_readable_user_ids(
self.viewer_ids),
'viewable_if_private': self.viewable_if_private,
}
def is_owner(self, user_id):
return bool(user_id in self.owner_ids)
def is_editor(self, user_id):
return bool(user_id in self.editor_ids)
def is_voice_artist(self, user_id):
return bool(user_id in self.voice_artist_ids)
def is_viewer(self, user_id):
return bool(user_id in self.viewer_ids)
def is_published(self):
return bool(self.status == ACTIVITY_STATUS_PUBLIC)
def is_private(self):
return bool(self.status == ACTIVITY_STATUS_PRIVATE)
class ActivityRightsChange(change_domain.BaseChange):
ALLOWED_COMMANDS = COMMON_ALLOWED_COMMANDS
class ExplorationRightsChange(ActivityRightsChange):
ALLOWED_COMMANDS = copy.deepcopy(COMMON_ALLOWED_COMMANDS)
ALLOWED_COMMANDS.append({
'name': CMD_CHANGE_EXPLORATION_STATUS,
'required_attribute_names': ['old_status', 'new_status'],
'optional_attribute_names': [],
'allowed_values': {
'old_status': ALLOWED_STATUS, 'new_status': ALLOWED_STATUS}
})
class CollectionRightsChange(ActivityRightsChange):
ALLOWED_COMMANDS = copy.deepcopy(COMMON_ALLOWED_COMMANDS)
ALLOWED_COMMANDS.append({
'name': CMD_CHANGE_COLLECTION_STATUS,
'required_attribute_names': ['old_status', 'new_status'],
'optional_attribute_names': [],
'allowed_values': {
'old_status': ALLOWED_STATUS, 'new_status': ALLOWED_STATUS}
})
def get_activity_rights_from_model(activity_rights_model, activity_type):
return ActivityRights(
activity_rights_model.id,
activity_rights_model.owner_ids,
activity_rights_model.editor_ids,
activity_rights_model.voice_artist_ids,
activity_rights_model.viewer_ids,
community_owned=activity_rights_model.community_owned,
cloned_from=(
activity_rights_model.cloned_from
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION else None),
status=activity_rights_model.status,
viewable_if_private=activity_rights_model.viewable_if_private,
first_published_msec=activity_rights_model.first_published_msec
)
def _save_activity_rights(
committer_id, activity_rights, activity_type, commit_message,
commit_cmds):
activity_rights.validate()
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
model_cls = exp_models.ExplorationRightsModel
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
model_cls = collection_models.CollectionRightsModel
model = model_cls.get(activity_rights.id, strict=False)
model.owner_ids = activity_rights.owner_ids
model.editor_ids = activity_rights.editor_ids
model.viewer_ids = activity_rights.viewer_ids
model.voice_artist_ids = activity_rights.voice_artist_ids
model.community_owned = activity_rights.community_owned
model.status = activity_rights.status
model.viewable_if_private = activity_rights.viewable_if_private
model.first_published_msec = activity_rights.first_published_msec
model.commit(committer_id, commit_message, commit_cmds)
def _update_exploration_summary(activity_rights):
from core.domain import exp_services
exp_services.update_exploration_summary(
activity_rights.id, None)
def _update_collection_summary(activity_rights):
from core.domain import collection_services
collection_services.update_collection_summary(
activity_rights.id, None)
def _update_activity_summary(activity_type, activity_rights):
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
_update_exploration_summary(activity_rights)
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
_update_collection_summary(activity_rights)
def update_activity_first_published_msec(
activity_type, activity_id, first_published_msec):
activity_rights = _get_activity_rights(activity_type, activity_id)
commit_cmds = [{
'cmd': CMD_UPDATE_FIRST_PUBLISHED_MSEC,
'old_first_published_msec': activity_rights.first_published_msec,
'new_first_published_msec': first_published_msec
}]
activity_rights.first_published_msec = first_published_msec
_save_activity_rights(
feconf.SYSTEM_COMMITTER_ID, activity_rights, activity_type,
'set first published time in msec', commit_cmds)
def create_new_exploration_rights(exploration_id, committer_id):
exploration_rights = ActivityRights(
exploration_id, [committer_id], [], [], [])
commit_cmds = [{'cmd': CMD_CREATE_NEW}]
exp_models.ExplorationRightsModel(
id=exploration_rights.id,
owner_ids=exploration_rights.owner_ids,
editor_ids=exploration_rights.editor_ids,
voice_artist_ids=exploration_rights.voice_artist_ids,
viewer_ids=exploration_rights.viewer_ids,
community_owned=exploration_rights.community_owned,
status=exploration_rights.status,
viewable_if_private=exploration_rights.viewable_if_private,
first_published_msec=exploration_rights.first_published_msec,
).commit(committer_id, 'Created new exploration', commit_cmds)
subscription_services.subscribe_to_exploration(
committer_id, exploration_id)
def get_exploration_rights(exploration_id, strict=True):
model = exp_models.ExplorationRightsModel.get(
exploration_id, strict=strict)
if model is None:
return None
return get_activity_rights_from_model(
model, constants.ACTIVITY_TYPE_EXPLORATION)
def get_multiple_exploration_rights_by_ids(exp_ids):
exp_rights_models = exp_models.ExplorationRightsModel.get_multi(
exp_ids)
exp_models_list = []
for model in exp_rights_models:
if model is None:
exp_models_list.append(None)
else:
exp_models_list.append(
get_activity_rights_from_model(
model, constants.ACTIVITY_TYPE_EXPLORATION))
return exp_models_list
def is_exploration_private(exploration_id):
exploration_rights = get_exploration_rights(exploration_id)
return exploration_rights.status == ACTIVITY_STATUS_PRIVATE
def is_exploration_public(exploration_id):
exploration_rights = get_exploration_rights(exploration_id)
return exploration_rights.status == ACTIVITY_STATUS_PUBLIC
def is_exploration_cloned(exploration_id):
exploration_rights = get_exploration_rights(exploration_id)
return bool(exploration_rights.cloned_from)
def create_new_collection_rights(collection_id, committer_id):
collection_rights = ActivityRights(
collection_id, [committer_id], [], [], [])
commit_cmds = [{'cmd': CMD_CREATE_NEW}]
collection_models.CollectionRightsModel(
id=collection_rights.id,
owner_ids=collection_rights.owner_ids,
editor_ids=collection_rights.editor_ids,
voice_artist_ids=collection_rights.voice_artist_ids,
viewer_ids=collection_rights.viewer_ids,
community_owned=collection_rights.community_owned,
status=collection_rights.status,
viewable_if_private=collection_rights.viewable_if_private,
first_published_msec=collection_rights.first_published_msec
).commit(committer_id, 'Created new collection', commit_cmds)
subscription_services.subscribe_to_collection(committer_id, collection_id)
def get_collection_rights(collection_id, strict=True):
model = collection_models.CollectionRightsModel.get(
collection_id, strict=strict)
if model is None:
return None
return get_activity_rights_from_model(
model, constants.ACTIVITY_TYPE_COLLECTION)
def get_collection_owner_names(collection_id):
collection_rights = get_collection_rights(collection_id)
return user_services.get_human_readable_user_ids(
collection_rights.owner_ids)
def is_collection_private(collection_id):
collection_rights = get_collection_rights(collection_id)
return collection_rights.status == ACTIVITY_STATUS_PRIVATE
def is_collection_public(collection_id):
collection_rights = get_collection_rights(collection_id)
return collection_rights.status == ACTIVITY_STATUS_PUBLIC
def _get_activity_rights(activity_type, activity_id):
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
return get_exploration_rights(activity_id, strict=False)
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
return get_collection_rights(activity_id, strict=False)
else:
raise Exception(
'Cannot get activity rights for unknown activity type: %s' % (
activity_type))
def check_can_access_activity(user, activity_rights):
if activity_rights is None:
return False
elif activity_rights.is_published():
return bool(
role_services.ACTION_PLAY_ANY_PUBLIC_ACTIVITY in user.actions)
elif activity_rights.is_private():
return bool(
(role_services.ACTION_PLAY_ANY_PRIVATE_ACTIVITY in user.actions) or
activity_rights.is_viewer(user.user_id) or
activity_rights.is_owner(user.user_id) or
activity_rights.is_editor(user.user_id) or
activity_rights.is_voice_artist(user.user_id) or
activity_rights.viewable_if_private)
def check_can_edit_activity(user, activity_rights):
if activity_rights is None:
return False
if role_services.ACTION_EDIT_OWNED_ACTIVITY not in user.actions:
return False
if (activity_rights.is_owner(user.user_id) or
activity_rights.is_editor(user.user_id)):
return True
if (activity_rights.community_owned or
(role_services.ACTION_EDIT_ANY_ACTIVITY in user.actions)):
return True
if (activity_rights.is_published() and
(role_services.ACTION_EDIT_ANY_PUBLIC_ACTIVITY in
user.actions)):
return True
return False
def check_can_voiceover_activity(user, activity_rights):
if activity_rights is None:
return False
if role_services.ACTION_EDIT_OWNED_ACTIVITY not in user.actions:
return False
if (activity_rights.is_owner(user.user_id) or
activity_rights.is_editor(user.user_id) or
activity_rights.is_voice_artist(user.user_id)):
return True
if (activity_rights.community_owned or
(role_services.ACTION_EDIT_ANY_ACTIVITY in user.actions)):
return True
if (activity_rights.is_published() and
(role_services.ACTION_EDIT_ANY_PUBLIC_ACTIVITY in
user.actions)):
return True
return False
def check_can_save_activity(user, activity_rights):
return (check_can_edit_activity(user, activity_rights) or (
check_can_voiceover_activity(user, activity_rights)))
def check_can_delete_activity(user, activity_rights):
if activity_rights is None:
return False
if role_services.ACTION_DELETE_ANY_ACTIVITY in user.actions:
return True
elif (activity_rights.is_private() and
(role_services.ACTION_DELETE_OWNED_PRIVATE_ACTIVITY in user.actions)
and activity_rights.is_owner(user.user_id)):
return True
elif (activity_rights.is_published() and
(role_services.ACTION_DELETE_ANY_PUBLIC_ACTIVITY in user.actions)):
return True
return False
def check_can_modify_activity_roles(user, activity_rights):
if activity_rights is None:
return False
if (activity_rights.community_owned or
activity_rights.cloned_from):
return False
if (role_services.ACTION_MODIFY_ROLES_FOR_ANY_ACTIVITY in
user.actions):
return True
if (role_services.ACTION_MODIFY_ROLES_FOR_OWNED_ACTIVITY in
user.actions):
if activity_rights.is_owner(user.user_id):
return True
return False
def check_can_release_ownership(user, activity_rights):
if activity_rights is None:
return False
if activity_rights.is_private():
return False
return check_can_modify_activity_roles(
user, activity_rights)
def check_can_publish_activity(user, activity_rights):
if activity_rights is None:
return False
if activity_rights.cloned_from:
return False
if activity_rights.is_published():
return False
if role_services.ACTION_PUBLISH_ANY_ACTIVITY in user.actions:
return True
if role_services.ACTION_PUBLISH_OWNED_ACTIVITY in user.actions:
if activity_rights.is_owner(user.user_id):
return True
return False
def check_can_unpublish_activity(user, activity_rights):
if activity_rights is None:
return False
if activity_rights.community_owned:
return False
if activity_rights.is_published():
if role_services.ACTION_UNPUBLISH_ANY_PUBLIC_ACTIVITY in user.actions:
return True
return False
def _assign_role(
committer, assignee_id, new_role, activity_id, activity_type):
committer_id = committer.user_id
activity_rights = _get_activity_rights(activity_type, activity_id)
if not check_can_modify_activity_roles(committer, activity_rights):
logging.error(
'User %s tried to allow user %s to be a(n) %s of activity %s '
'but was refused permission.' % (
committer_id, assignee_id, new_role, activity_id))
raise Exception(
'UnauthorizedUserException: Could not assign new role.')
assignee_username = user_services.get_username(assignee_id)
old_role = ROLE_NONE
if new_role == ROLE_OWNER:
if activity_rights.is_owner(assignee_id):
raise Exception('This user already owns this %s.' % activity_type)
activity_rights.owner_ids.append(assignee_id)
if assignee_id in activity_rights.viewer_ids:
activity_rights.viewer_ids.remove(assignee_id)
old_role = ROLE_VIEWER
if assignee_id in activity_rights.editor_ids:
activity_rights.editor_ids.remove(assignee_id)
old_role = ROLE_EDITOR
if assignee_id in activity_rights.voice_artist_ids:
activity_rights.voice_artist_ids.remove(assignee_id)
old_role = ROLE_VOICE_ARTIST
elif new_role == ROLE_EDITOR:
if (activity_rights.is_editor(assignee_id) or
activity_rights.is_owner(assignee_id)):
raise Exception(
'This user already can edit this %s.' % activity_type)
activity_rights.editor_ids.append(assignee_id)
if assignee_id in activity_rights.voice_artist_ids:
activity_rights.voice_artist_ids.remove(assignee_id)
old_role = ROLE_VOICE_ARTIST
if assignee_id in activity_rights.viewer_ids:
activity_rights.viewer_ids.remove(assignee_id)
old_role = ROLE_VIEWER
elif new_role == ROLE_VOICE_ARTIST:
if (activity_rights.is_editor(assignee_id) or
activity_rights.is_voice_artist(assignee_id) or
activity_rights.is_owner(assignee_id)):
raise Exception(
'This user already can voiceover this %s.' % activity_type)
activity_rights.voice_artist_ids.append(assignee_id)
if assignee_id in activity_rights.viewer_ids:
activity_rights.viewer_ids.remove(assignee_id)
old_role = ROLE_VIEWER
elif new_role == ROLE_VIEWER:
if (activity_rights.is_owner(assignee_id) or
activity_rights.is_editor(assignee_id) or
activity_rights.is_viewer(assignee_id)):
raise Exception(
'This user already can view this %s.' % activity_type)
if activity_rights.status != ACTIVITY_STATUS_PRIVATE:
raise Exception(
'Public %ss can be viewed by anyone.' % activity_type)
activity_rights.viewer_ids.append(assignee_id)
else:
raise Exception('Invalid role: %s' % new_role)
commit_message = 'Changed role of %s from %s to %s' % (
assignee_username, old_role, new_role)
commit_cmds = [{
'cmd': CMD_CHANGE_ROLE,
'assignee_id': assignee_id,
'old_role': old_role,
'new_role': new_role
}]
_save_activity_rights(
committer_id, activity_rights, activity_type,
commit_message, commit_cmds)
_update_activity_summary(activity_type, activity_rights)
def _release_ownership_of_activity(committer, activity_id, activity_type):
committer_id = committer.user_id
activity_rights = _get_activity_rights(activity_type, activity_id)
if not check_can_release_ownership(committer, activity_rights):
logging.error(
'User %s tried to release ownership of %s %s but was '
'refused permission.' % (committer_id, activity_type, activity_id))
raise Exception(
'The ownership of this %s cannot be released.' % activity_type)
activity_rights.community_owned = True
activity_rights.owner_ids = []
activity_rights.editor_ids = []
activity_rights.viewer_ids = []
commit_cmds = [{
'cmd': CMD_RELEASE_OWNERSHIP,
}]
_save_activity_rights(
committer_id, activity_rights, activity_type,
'%s ownership released to the community.' % activity_type, commit_cmds)
_update_activity_summary(activity_type, activity_rights)
def _change_activity_status(
committer_id, activity_id, activity_type, new_status, commit_message):
activity_rights = _get_activity_rights(activity_type, activity_id)
old_status = activity_rights.status
activity_rights.status = new_status
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
cmd_type = CMD_CHANGE_EXPLORATION_STATUS
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
cmd_type = CMD_CHANGE_COLLECTION_STATUS
commit_cmds = [{
'cmd': cmd_type,
'old_status': old_status,
'new_status': new_status
}]
if new_status != ACTIVITY_STATUS_PRIVATE:
activity_rights.viewer_ids = []
if activity_rights.first_published_msec is None:
activity_rights.first_published_msec = (
utils.get_current_time_in_millisecs())
_save_activity_rights(
committer_id, activity_rights, activity_type, commit_message,
commit_cmds)
_update_activity_summary(activity_type, activity_rights)
def _publish_activity(committer, activity_id, activity_type):
committer_id = committer.user_id
activity_rights = _get_activity_rights(activity_type, activity_id)
if not check_can_publish_activity(committer, activity_rights):
logging.error(
'User %s tried to publish %s %s but was refused '
'permission.' % (committer_id, activity_type, activity_id))
raise Exception('This %s cannot be published.' % activity_type)
_change_activity_status(
committer_id, activity_id, activity_type, ACTIVITY_STATUS_PUBLIC,
'%s published.' % activity_type)
def _unpublish_activity(committer, activity_id, activity_type):
committer_id = committer.user_id
activity_rights = _get_activity_rights(activity_type, activity_id)
if not check_can_unpublish_activity(committer, activity_rights):
logging.error(
'User %s tried to unpublish %s %s but was refused '
'permission.' % (committer_id, activity_type, activity_id))
raise Exception('This %s cannot be unpublished.' % activity_type)
_change_activity_status(
committer_id, activity_id, activity_type, ACTIVITY_STATUS_PRIVATE,
'%s unpublished.' % activity_type)
activity_services.remove_featured_activity(activity_type, activity_id)
def assign_role_for_exploration(
committer, exploration_id, assignee_id, new_role):
_assign_role(
committer, assignee_id, new_role, exploration_id,
constants.ACTIVITY_TYPE_EXPLORATION)
if new_role in [ROLE_OWNER, ROLE_EDITOR, ROLE_VOICE_ARTIST]:
subscription_services.subscribe_to_exploration(
assignee_id, exploration_id)
def release_ownership_of_exploration(committer, exploration_id):
_release_ownership_of_activity(
committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION)
def set_private_viewability_of_exploration(
committer, exploration_id, viewable_if_private):
committer_id = committer.user_id
exploration_rights = get_exploration_rights(exploration_id)
if not check_can_publish_activity(committer, exploration_rights):
logging.error(
'User %s tried to change private viewability of exploration %s '
'but was refused permission.' % (committer_id, exploration_id))
raise Exception(
'The viewability status of this exploration cannot be changed.')
old_viewable_if_private = exploration_rights.viewable_if_private
if old_viewable_if_private == viewable_if_private:
raise Exception(
'Trying to change viewability status of this exploration to %s, '
'but that is already the current value.' % viewable_if_private)
exploration_rights.viewable_if_private = viewable_if_private
commit_cmds = [{
'cmd': CMD_CHANGE_PRIVATE_VIEWABILITY,
'old_viewable_if_private': old_viewable_if_private,
'new_viewable_if_private': viewable_if_private,
}]
commit_message = (
'Made exploration viewable to anyone with the link.'
if viewable_if_private else
'Made exploration viewable only to invited playtesters.')
_save_activity_rights(
committer_id, exploration_rights, constants.ACTIVITY_TYPE_EXPLORATION,
commit_message, commit_cmds)
_update_exploration_summary(exploration_rights)
def publish_exploration(committer, exploration_id):
_publish_activity(
committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION)
def unpublish_exploration(committer, exploration_id):
_unpublish_activity(
committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION)
def assign_role_for_collection(
committer, collection_id, assignee_id, new_role):
_assign_role(
committer, assignee_id, new_role, collection_id,
constants.ACTIVITY_TYPE_COLLECTION)
if new_role in [ROLE_OWNER, ROLE_EDITOR]:
subscription_services.subscribe_to_collection(
assignee_id, collection_id)
def release_ownership_of_collection(committer, collection_id):
_release_ownership_of_activity(
committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION)
def publish_collection(committer, collection_id):
_publish_activity(
committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION)
def unpublish_collection(committer, collection_id):
_unpublish_activity(
committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION)
| true
| true
|
f709949f002c77e7ca71cbfadd25bd0c223af1b8
| 18,637
|
py
|
Python
|
vispy/visuals/line/line.py
|
jni/vispy
|
8b61cd439076aa3f50ac5f6dacb4c0af8c1d0684
|
[
"BSD-3-Clause"
] | 3
|
2019-02-28T16:05:33.000Z
|
2020-05-03T21:29:03.000Z
|
vispy/visuals/line/line.py
|
jni/vispy
|
8b61cd439076aa3f50ac5f6dacb4c0af8c1d0684
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/visuals/line/line.py
|
jni/vispy
|
8b61cd439076aa3f50ac5f6dacb4c0af8c1d0684
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Line visual implementing Agg- and GL-based drawing modes.
"""
from __future__ import division
import numpy as np
from ... import gloo, glsl
from ...color import Color, ColorArray, get_colormap
from ...ext.six import string_types
from ..shaders import Function
from ..visual import Visual, CompoundVisual
from ...util.profiler import Profiler
from .dash_atlas import DashAtlas
vec2to4 = Function("""
vec4 vec2to4(vec2 inp) {
return vec4(inp, 0, 1);
}
""")
vec3to4 = Function("""
vec4 vec3to4(vec3 inp) {
return vec4(inp, 1);
}
""")
"""
TODO:
* Agg support is very minimal; needs attention.
* Optimization--avoid creating new buffers, avoid triggering program
recompile.
"""
joins = {'miter': 0, 'round': 1, 'bevel': 2}
caps = {'': 0, 'none': 0, '.': 0,
'round': 1, ')': 1, '(': 1, 'o': 1,
'triangle in': 2, '<': 2,
'triangle out': 3, '>': 3,
'square': 4, '=': 4, 'butt': 4,
'|': 5}
class LineVisual(CompoundVisual):
"""Line visual
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
Can also be a colormap name, or appropriate `Function`.
width:
The width of the line in px. Line widths > 1px are only
guaranteed to work when using 'agg' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* numpy arrays specify the exact set of segment pairs to
connect.
method : str
Mode to use for drawing.
* "agg" uses anti-grain geometry to draw nicely antialiased lines
with proper joins and endcaps.
* "gl" uses OpenGL's built-in line rendering. This is much faster,
but produces much lower-quality results and is not guaranteed to
obey the requested line width or join/endcap styles.
antialias : bool
Enables or disables antialiasing.
For method='gl', this specifies whether to use GL's line smoothing,
which may be unavailable or inconsistent on some platforms.
"""
def __init__(self, pos=None, color=(0.5, 0.5, 0.5, 1), width=1,
connect='strip', method='gl', antialias=False):
self._line_visual = None
self._changed = {'pos': False, 'color': False, 'width': False,
'connect': False}
self._pos = None
self._color = None
self._width = None
self._connect = None
self._bounds = None
self._antialias = None
self._method = 'none'
CompoundVisual.__init__(self, [])
# don't call subclass set_data; these often have different
# signatures.
LineVisual.set_data(self, pos=pos, color=color, width=width,
connect=connect)
self.antialias = antialias
self.method = method
@property
def antialias(self):
return self._antialias
@antialias.setter
def antialias(self, aa):
self._antialias = bool(aa)
self.update()
@property
def method(self):
"""The current drawing method"""
return self._method
@method.setter
def method(self, method):
if method not in ('agg', 'gl'):
raise ValueError('method argument must be "agg" or "gl".')
if method == self._method:
return
self._method = method
if self._line_visual is not None:
self.remove_subvisual(self._line_visual)
if method == 'gl':
self._line_visual = _GLLineVisual(self)
elif method == 'agg':
self._line_visual = _AggLineVisual(self)
self.add_subvisual(self._line_visual)
for k in self._changed:
self._changed[k] = True
def set_data(self, pos=None, color=None, width=None, connect=None):
"""Set the data used to draw this visual.
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
width:
The width of the line in px. Line widths < 1 px will be rounded up
to 1 px when using the 'gl' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* int numpy arrays specify the exact set of segment pairs to
connect.
* bool numpy arrays specify which _adjacent_ pairs to connect.
"""
if pos is not None:
self._bounds = None
self._pos = pos
self._changed['pos'] = True
if color is not None:
self._color = color
self._changed['color'] = True
if width is not None:
self._width = width
self._changed['width'] = True
if connect is not None:
self._connect = connect
self._changed['connect'] = True
self.update()
@property
def color(self):
return self._color
@property
def width(self):
return self._width
@property
def connect(self):
return self._connect
@property
def pos(self):
return self._pos
def _interpret_connect(self):
if isinstance(self._connect, np.ndarray):
# Convert a boolean connection array to a vertex index array
if self._connect.ndim == 1 and self._connect.dtype == bool:
index = np.empty((len(self._connect), 2), dtype=np.uint32)
index[:] = np.arange(len(self._connect))[:, np.newaxis]
index[:, 1] += 1
return index[self._connect]
elif self._connect.ndim == 2 and self._connect.shape[1] == 2:
return self._connect.astype(np.uint32)
else:
raise TypeError("Got invalid connect array of shape %r and "
"dtype %r" % (self._connect.shape,
self._connect.dtype))
else:
return self._connect
def _interpret_color(self, color_in=None):
color_in = self._color if color_in is None else color_in
colormap = None
if isinstance(color_in, string_types):
try:
colormap = get_colormap(color_in)
color = Function(colormap.glsl_map)
except KeyError:
color = Color(color_in).rgba
elif isinstance(color_in, Function):
color = Function(color_in)
else:
color = ColorArray(color_in).rgba
if len(color) == 1:
color = color[0]
return color, colormap
def _compute_bounds(self, axis, view):
"""Get the bounds
Parameters
----------
mode : str
Describes the type of boundary requested. Can be "visual", "data",
or "mouse".
axis : 0, 1, 2
The axis along which to measure the bounding values, in
x-y-z order.
"""
# Can and should we calculate bounds?
if (self._bounds is None) and self._pos is not None:
pos = self._pos
self._bounds = [(pos[:, d].min(), pos[:, d].max())
for d in range(pos.shape[1])]
# Return what we can
if self._bounds is None:
return
else:
if axis < len(self._bounds):
return self._bounds[axis]
else:
return (0, 0)
def _prepare_draw(self, view):
if self._width == 0:
return False
CompoundVisual._prepare_draw(self, view)
class _GLLineVisual(Visual):
VERTEX_SHADER = """
varying vec4 v_color;
void main(void) {
gl_Position = $transform($to_vec4($position));
v_color = $color;
}
"""
FRAGMENT_SHADER = """
varying vec4 v_color;
void main() {
gl_FragColor = v_color;
}
"""
def __init__(self, parent):
self._parent = parent
self._pos_vbo = gloo.VertexBuffer()
self._color_vbo = gloo.VertexBuffer()
self._connect_ibo = gloo.IndexBuffer()
self._connect = None
Visual.__init__(self, vcode=self.VERTEX_SHADER,
fcode=self.FRAGMENT_SHADER)
self.set_gl_state('translucent')
def _prepare_transforms(self, view):
xform = view.transforms.get_transform()
view.view_program.vert['transform'] = xform
def _prepare_draw(self, view):
prof = Profiler()
if self._parent._changed['pos']:
if self._parent._pos is None:
return False
# todo: does this result in unnecessary copies?
pos = np.ascontiguousarray(self._parent._pos.astype(np.float32))
self._pos_vbo.set_data(pos)
self._program.vert['position'] = self._pos_vbo
if pos.shape[-1] == 2:
self._program.vert['to_vec4'] = vec2to4
elif pos.shape[-1] == 3:
self._program.vert['to_vec4'] = vec3to4
else:
raise TypeError("Got bad position array shape: %r"
% (pos.shape,))
if self._parent._changed['color']:
color, cmap = self._parent._interpret_color()
# If color is not visible, just quit now
if isinstance(color, Color) and color.is_blank:
return False
if isinstance(color, Function):
# TODO: Change to the parametric coordinate once that is done
self._program.vert['color'] = color(
'(gl_Position.x + 1.0) / 2.0')
else:
if color.ndim == 1:
self._program.vert['color'] = color
else:
self._color_vbo.set_data(color)
self._program.vert['color'] = self._color_vbo
self.shared_program['texture2D_LUT'] = cmap.texture_lut() \
if (hasattr(cmap, 'texture_lut')) else None
# Do we want to use OpenGL, and can we?
GL = None
from ...app._default_app import default_app
if default_app is not None and \
default_app.backend_name != 'ipynb_webgl':
try:
import OpenGL.GL as GL
except Exception: # can be other than ImportError sometimes
pass
# Turn on line smooth and/or line width
if GL:
if self._parent._antialias:
GL.glEnable(GL.GL_LINE_SMOOTH)
else:
GL.glDisable(GL.GL_LINE_SMOOTH)
px_scale = self.transforms.pixel_scale
width = px_scale * self._parent._width
GL.glLineWidth(max(width, 1.))
if self._parent._changed['connect']:
self._connect = self._parent._interpret_connect()
if isinstance(self._connect, np.ndarray):
self._connect_ibo.set_data(self._connect)
if self._connect is None:
return False
prof('prepare')
# Draw
if isinstance(self._connect, string_types) and \
self._connect == 'strip':
self._draw_mode = 'line_strip'
self._index_buffer = None
elif isinstance(self._connect, string_types) and \
self._connect == 'segments':
self._draw_mode = 'lines'
self._index_buffer = None
elif isinstance(self._connect, np.ndarray):
self._draw_mode = 'lines'
self._index_buffer = self._connect_ibo
else:
raise ValueError("Invalid line connect mode: %r" % self._connect)
prof('draw')
class _AggLineVisual(Visual):
_agg_vtype = np.dtype([('a_position', np.float32, (2,)),
('a_tangents', np.float32, (4,)),
('a_segment', np.float32, (2,)),
('a_angles', np.float32, (2,)),
('a_texcoord', np.float32, (2,)),
('alength', np.float32),
('color', np.float32, (4,))])
VERTEX_SHADER = glsl.get('lines/agg.vert')
FRAGMENT_SHADER = glsl.get('lines/agg.frag')
def __init__(self, parent):
self._parent = parent
self._vbo = gloo.VertexBuffer()
self._pos = None
self._color = None
self._da = DashAtlas()
dash_index, dash_period = self._da['solid']
self._U = dict(dash_index=dash_index, dash_period=dash_period,
linejoin=joins['round'],
linecaps=(caps['round'], caps['round']),
dash_caps=(caps['round'], caps['round']),
antialias=1.0)
self._dash_atlas = gloo.Texture2D(self._da._data)
Visual.__init__(self, vcode=self.VERTEX_SHADER,
fcode=self.FRAGMENT_SHADER)
self._index_buffer = gloo.IndexBuffer()
self.set_gl_state('translucent', depth_test=False)
self._draw_mode = 'triangles'
def _prepare_transforms(self, view):
data_doc = view.get_transform('visual', 'document')
doc_px = view.get_transform('document', 'framebuffer')
px_ndc = view.get_transform('framebuffer', 'render')
vert = view.view_program.vert
vert['transform'] = data_doc
vert['doc_px_transform'] = doc_px
vert['px_ndc_transform'] = px_ndc
def _prepare_draw(self, view):
bake = False
if self._parent._changed['pos']:
if self._parent._pos is None:
return False
# todo: does this result in unnecessary copies?
self._pos = np.ascontiguousarray(
self._parent._pos.astype(np.float32))
bake = True
if self._parent._changed['color']:
color, cmap = self._parent._interpret_color()
self._color = color
bake = True
if self._parent._changed['connect']:
if self._parent._connect not in [None, 'strip']:
raise NotImplementedError("Only 'strip' connection mode "
"allowed for agg-method lines.")
if bake:
V, idxs = self._agg_bake(self._pos, self._color)
self._vbo.set_data(V)
self._index_buffer.set_data(idxs)
# self._program.prepare()
self.shared_program.bind(self._vbo)
uniforms = dict(closed=False, miter_limit=4.0, dash_phase=0.0,
linewidth=self._parent._width)
for n, v in uniforms.items():
self.shared_program[n] = v
for n, v in self._U.items():
self.shared_program[n] = v
self.shared_program['u_dash_atlas'] = self._dash_atlas
@classmethod
def _agg_bake(cls, vertices, color, closed=False):
"""
Bake a list of 2D vertices for rendering them as thick line. Each line
segment must have its own vertices because of antialias (this means no
vertex sharing between two adjacent line segments).
"""
n = len(vertices)
P = np.array(vertices).reshape(n, 2).astype(float)
idx = np.arange(n) # used to eventually tile the color array
dx, dy = P[0] - P[-1]
d = np.sqrt(dx*dx+dy*dy)
# If closed, make sure first vertex = last vertex (+/- epsilon=1e-10)
if closed and d > 1e-10:
P = np.append(P, P[0]).reshape(n+1, 2)
idx = np.append(idx, idx[-1])
n += 1
V = np.zeros(len(P), dtype=cls._agg_vtype)
V['a_position'] = P
# Tangents & norms
T = P[1:] - P[:-1]
N = np.sqrt(T[:, 0]**2 + T[:, 1]**2)
# T /= N.reshape(len(T),1)
V['a_tangents'][+1:, :2] = T
V['a_tangents'][0, :2] = T[-1] if closed else T[0]
V['a_tangents'][:-1, 2:] = T
V['a_tangents'][-1, 2:] = T[0] if closed else T[-1]
# Angles
T1 = V['a_tangents'][:, :2]
T2 = V['a_tangents'][:, 2:]
A = np.arctan2(T1[:, 0]*T2[:, 1]-T1[:, 1]*T2[:, 0],
T1[:, 0]*T2[:, 0]+T1[:, 1]*T2[:, 1])
V['a_angles'][:-1, 0] = A[:-1]
V['a_angles'][:-1, 1] = A[+1:]
# Segment
L = np.cumsum(N)
V['a_segment'][+1:, 0] = L
V['a_segment'][:-1, 1] = L
# V['a_lengths'][:,2] = L[-1]
# Step 1: A -- B -- C => A -- B, B' -- C
V = np.repeat(V, 2, axis=0)[1:-1]
V['a_segment'][1:] = V['a_segment'][:-1]
V['a_angles'][1:] = V['a_angles'][:-1]
V['a_texcoord'][0::2] = -1
V['a_texcoord'][1::2] = +1
idx = np.repeat(idx, 2)[1:-1]
# Step 2: A -- B, B' -- C -> A0/A1 -- B0/B1, B'0/B'1 -- C0/C1
V = np.repeat(V, 2, axis=0)
V['a_texcoord'][0::2, 1] = -1
V['a_texcoord'][1::2, 1] = +1
idx = np.repeat(idx, 2)
idxs = np.resize(np.array([0, 1, 2, 1, 2, 3], dtype=np.uint32),
(n-1)*(2*3))
idxs += np.repeat(4*np.arange(n-1, dtype=np.uint32), 6)
# Length
V['alength'] = L[-1] * np.ones(len(V))
# Color
if color.ndim == 1:
color = np.tile(color, (len(V), 1))
elif color.ndim == 2 and len(color) == n:
color = color[idx]
else:
raise ValueError('Color length %s does not match number of '
'vertices %s' % (len(color), n))
V['color'] = color
return V, idxs
| 33.823956
| 78
| 0.540806
|
from __future__ import division
import numpy as np
from ... import gloo, glsl
from ...color import Color, ColorArray, get_colormap
from ...ext.six import string_types
from ..shaders import Function
from ..visual import Visual, CompoundVisual
from ...util.profiler import Profiler
from .dash_atlas import DashAtlas
vec2to4 = Function("""
vec4 vec2to4(vec2 inp) {
return vec4(inp, 0, 1);
}
""")
vec3to4 = Function("""
vec4 vec3to4(vec3 inp) {
return vec4(inp, 1);
}
""")
joins = {'miter': 0, 'round': 1, 'bevel': 2}
caps = {'': 0, 'none': 0, '.': 0,
'round': 1, ')': 1, '(': 1, 'o': 1,
'triangle in': 2, '<': 2,
'triangle out': 3, '>': 3,
'square': 4, '=': 4, 'butt': 4,
'|': 5}
class LineVisual(CompoundVisual):
def __init__(self, pos=None, color=(0.5, 0.5, 0.5, 1), width=1,
connect='strip', method='gl', antialias=False):
self._line_visual = None
self._changed = {'pos': False, 'color': False, 'width': False,
'connect': False}
self._pos = None
self._color = None
self._width = None
self._connect = None
self._bounds = None
self._antialias = None
self._method = 'none'
CompoundVisual.__init__(self, [])
# signatures.
LineVisual.set_data(self, pos=pos, color=color, width=width,
connect=connect)
self.antialias = antialias
self.method = method
@property
def antialias(self):
return self._antialias
@antialias.setter
def antialias(self, aa):
self._antialias = bool(aa)
self.update()
@property
def method(self):
return self._method
@method.setter
def method(self, method):
if method not in ('agg', 'gl'):
raise ValueError('method argument must be "agg" or "gl".')
if method == self._method:
return
self._method = method
if self._line_visual is not None:
self.remove_subvisual(self._line_visual)
if method == 'gl':
self._line_visual = _GLLineVisual(self)
elif method == 'agg':
self._line_visual = _AggLineVisual(self)
self.add_subvisual(self._line_visual)
for k in self._changed:
self._changed[k] = True
def set_data(self, pos=None, color=None, width=None, connect=None):
if pos is not None:
self._bounds = None
self._pos = pos
self._changed['pos'] = True
if color is not None:
self._color = color
self._changed['color'] = True
if width is not None:
self._width = width
self._changed['width'] = True
if connect is not None:
self._connect = connect
self._changed['connect'] = True
self.update()
@property
def color(self):
return self._color
@property
def width(self):
return self._width
@property
def connect(self):
return self._connect
@property
def pos(self):
return self._pos
def _interpret_connect(self):
if isinstance(self._connect, np.ndarray):
# Convert a boolean connection array to a vertex index array
if self._connect.ndim == 1 and self._connect.dtype == bool:
index = np.empty((len(self._connect), 2), dtype=np.uint32)
index[:] = np.arange(len(self._connect))[:, np.newaxis]
index[:, 1] += 1
return index[self._connect]
elif self._connect.ndim == 2 and self._connect.shape[1] == 2:
return self._connect.astype(np.uint32)
else:
raise TypeError("Got invalid connect array of shape %r and "
"dtype %r" % (self._connect.shape,
self._connect.dtype))
else:
return self._connect
def _interpret_color(self, color_in=None):
color_in = self._color if color_in is None else color_in
colormap = None
if isinstance(color_in, string_types):
try:
colormap = get_colormap(color_in)
color = Function(colormap.glsl_map)
except KeyError:
color = Color(color_in).rgba
elif isinstance(color_in, Function):
color = Function(color_in)
else:
color = ColorArray(color_in).rgba
if len(color) == 1:
color = color[0]
return color, colormap
def _compute_bounds(self, axis, view):
# Can and should we calculate bounds?
if (self._bounds is None) and self._pos is not None:
pos = self._pos
self._bounds = [(pos[:, d].min(), pos[:, d].max())
for d in range(pos.shape[1])]
# Return what we can
if self._bounds is None:
return
else:
if axis < len(self._bounds):
return self._bounds[axis]
else:
return (0, 0)
def _prepare_draw(self, view):
if self._width == 0:
return False
CompoundVisual._prepare_draw(self, view)
class _GLLineVisual(Visual):
VERTEX_SHADER = """
varying vec4 v_color;
void main(void) {
gl_Position = $transform($to_vec4($position));
v_color = $color;
}
"""
FRAGMENT_SHADER = """
varying vec4 v_color;
void main() {
gl_FragColor = v_color;
}
"""
def __init__(self, parent):
self._parent = parent
self._pos_vbo = gloo.VertexBuffer()
self._color_vbo = gloo.VertexBuffer()
self._connect_ibo = gloo.IndexBuffer()
self._connect = None
Visual.__init__(self, vcode=self.VERTEX_SHADER,
fcode=self.FRAGMENT_SHADER)
self.set_gl_state('translucent')
def _prepare_transforms(self, view):
xform = view.transforms.get_transform()
view.view_program.vert['transform'] = xform
def _prepare_draw(self, view):
prof = Profiler()
if self._parent._changed['pos']:
if self._parent._pos is None:
return False
# todo: does this result in unnecessary copies?
pos = np.ascontiguousarray(self._parent._pos.astype(np.float32))
self._pos_vbo.set_data(pos)
self._program.vert['position'] = self._pos_vbo
if pos.shape[-1] == 2:
self._program.vert['to_vec4'] = vec2to4
elif pos.shape[-1] == 3:
self._program.vert['to_vec4'] = vec3to4
else:
raise TypeError("Got bad position array shape: %r"
% (pos.shape,))
if self._parent._changed['color']:
color, cmap = self._parent._interpret_color()
# If color is not visible, just quit now
if isinstance(color, Color) and color.is_blank:
return False
if isinstance(color, Function):
# TODO: Change to the parametric coordinate once that is done
self._program.vert['color'] = color(
'(gl_Position.x + 1.0) / 2.0')
else:
if color.ndim == 1:
self._program.vert['color'] = color
else:
self._color_vbo.set_data(color)
self._program.vert['color'] = self._color_vbo
self.shared_program['texture2D_LUT'] = cmap.texture_lut() \
if (hasattr(cmap, 'texture_lut')) else None
# Do we want to use OpenGL, and can we?
GL = None
from ...app._default_app import default_app
if default_app is not None and \
default_app.backend_name != 'ipynb_webgl':
try:
import OpenGL.GL as GL
except Exception: # can be other than ImportError sometimes
pass
# Turn on line smooth and/or line width
if GL:
if self._parent._antialias:
GL.glEnable(GL.GL_LINE_SMOOTH)
else:
GL.glDisable(GL.GL_LINE_SMOOTH)
px_scale = self.transforms.pixel_scale
width = px_scale * self._parent._width
GL.glLineWidth(max(width, 1.))
if self._parent._changed['connect']:
self._connect = self._parent._interpret_connect()
if isinstance(self._connect, np.ndarray):
self._connect_ibo.set_data(self._connect)
if self._connect is None:
return False
prof('prepare')
# Draw
if isinstance(self._connect, string_types) and \
self._connect == 'strip':
self._draw_mode = 'line_strip'
self._index_buffer = None
elif isinstance(self._connect, string_types) and \
self._connect == 'segments':
self._draw_mode = 'lines'
self._index_buffer = None
elif isinstance(self._connect, np.ndarray):
self._draw_mode = 'lines'
self._index_buffer = self._connect_ibo
else:
raise ValueError("Invalid line connect mode: %r" % self._connect)
prof('draw')
class _AggLineVisual(Visual):
_agg_vtype = np.dtype([('a_position', np.float32, (2,)),
('a_tangents', np.float32, (4,)),
('a_segment', np.float32, (2,)),
('a_angles', np.float32, (2,)),
('a_texcoord', np.float32, (2,)),
('alength', np.float32),
('color', np.float32, (4,))])
VERTEX_SHADER = glsl.get('lines/agg.vert')
FRAGMENT_SHADER = glsl.get('lines/agg.frag')
def __init__(self, parent):
self._parent = parent
self._vbo = gloo.VertexBuffer()
self._pos = None
self._color = None
self._da = DashAtlas()
dash_index, dash_period = self._da['solid']
self._U = dict(dash_index=dash_index, dash_period=dash_period,
linejoin=joins['round'],
linecaps=(caps['round'], caps['round']),
dash_caps=(caps['round'], caps['round']),
antialias=1.0)
self._dash_atlas = gloo.Texture2D(self._da._data)
Visual.__init__(self, vcode=self.VERTEX_SHADER,
fcode=self.FRAGMENT_SHADER)
self._index_buffer = gloo.IndexBuffer()
self.set_gl_state('translucent', depth_test=False)
self._draw_mode = 'triangles'
def _prepare_transforms(self, view):
data_doc = view.get_transform('visual', 'document')
doc_px = view.get_transform('document', 'framebuffer')
px_ndc = view.get_transform('framebuffer', 'render')
vert = view.view_program.vert
vert['transform'] = data_doc
vert['doc_px_transform'] = doc_px
vert['px_ndc_transform'] = px_ndc
def _prepare_draw(self, view):
bake = False
if self._parent._changed['pos']:
if self._parent._pos is None:
return False
# todo: does this result in unnecessary copies?
self._pos = np.ascontiguousarray(
self._parent._pos.astype(np.float32))
bake = True
if self._parent._changed['color']:
color, cmap = self._parent._interpret_color()
self._color = color
bake = True
if self._parent._changed['connect']:
if self._parent._connect not in [None, 'strip']:
raise NotImplementedError("Only 'strip' connection mode "
"allowed for agg-method lines.")
if bake:
V, idxs = self._agg_bake(self._pos, self._color)
self._vbo.set_data(V)
self._index_buffer.set_data(idxs)
# self._program.prepare()
self.shared_program.bind(self._vbo)
uniforms = dict(closed=False, miter_limit=4.0, dash_phase=0.0,
linewidth=self._parent._width)
for n, v in uniforms.items():
self.shared_program[n] = v
for n, v in self._U.items():
self.shared_program[n] = v
self.shared_program['u_dash_atlas'] = self._dash_atlas
@classmethod
def _agg_bake(cls, vertices, color, closed=False):
n = len(vertices)
P = np.array(vertices).reshape(n, 2).astype(float)
idx = np.arange(n) # used to eventually tile the color array
dx, dy = P[0] - P[-1]
d = np.sqrt(dx*dx+dy*dy)
# If closed, make sure first vertex = last vertex (+/- epsilon=1e-10)
if closed and d > 1e-10:
P = np.append(P, P[0]).reshape(n+1, 2)
idx = np.append(idx, idx[-1])
n += 1
V = np.zeros(len(P), dtype=cls._agg_vtype)
V['a_position'] = P
# Tangents & norms
T = P[1:] - P[:-1]
N = np.sqrt(T[:, 0]**2 + T[:, 1]**2)
# T /= N.reshape(len(T),1)
V['a_tangents'][+1:, :2] = T
V['a_tangents'][0, :2] = T[-1] if closed else T[0]
V['a_tangents'][:-1, 2:] = T
V['a_tangents'][-1, 2:] = T[0] if closed else T[-1]
# Angles
T1 = V['a_tangents'][:, :2]
T2 = V['a_tangents'][:, 2:]
A = np.arctan2(T1[:, 0]*T2[:, 1]-T1[:, 1]*T2[:, 0],
T1[:, 0]*T2[:, 0]+T1[:, 1]*T2[:, 1])
V['a_angles'][:-1, 0] = A[:-1]
V['a_angles'][:-1, 1] = A[+1:]
# Segment
L = np.cumsum(N)
V['a_segment'][+1:, 0] = L
V['a_segment'][:-1, 1] = L
# V['a_lengths'][:,2] = L[-1]
# Step 1: A -- B -- C => A -- B, B' -- C
V = np.repeat(V, 2, axis=0)[1:-1]
V['a_segment'][1:] = V['a_segment'][:-1]
V['a_angles'][1:] = V['a_angles'][:-1]
V['a_texcoord'][0::2] = -1
V['a_texcoord'][1::2] = +1
idx = np.repeat(idx, 2)[1:-1]
V = np.repeat(V, 2, axis=0)
V['a_texcoord'][0::2, 1] = -1
V['a_texcoord'][1::2, 1] = +1
idx = np.repeat(idx, 2)
idxs = np.resize(np.array([0, 1, 2, 1, 2, 3], dtype=np.uint32),
(n-1)*(2*3))
idxs += np.repeat(4*np.arange(n-1, dtype=np.uint32), 6)
# Length
V['alength'] = L[-1] * np.ones(len(V))
# Color
if color.ndim == 1:
color = np.tile(color, (len(V), 1))
elif color.ndim == 2 and len(color) == n:
color = color[idx]
else:
raise ValueError('Color length %s does not match number of '
'vertices %s' % (len(color), n))
V['color'] = color
return V, idxs
| true
| true
|
f7099500a68960bb66d8067e80352e61f0cd79d0
| 723
|
py
|
Python
|
main.py
|
valknight/PlexDiscordPresence
|
3fcd236ab8abcef2b11e37dffe5a463b272b5881
|
[
"MIT"
] | 2
|
2019-02-19T18:43:37.000Z
|
2021-09-06T16:36:55.000Z
|
main.py
|
valknight/PlexDiscordPresence
|
3fcd236ab8abcef2b11e37dffe5a463b272b5881
|
[
"MIT"
] | 1
|
2021-09-13T18:30:22.000Z
|
2021-09-13T18:30:22.000Z
|
main.py
|
valknight/PlexDiscordPresence
|
3fcd236ab8abcef2b11e37dffe5a463b272b5881
|
[
"MIT"
] | null | null | null |
import tautulli
import config
import time
from config import client_id
from pypresence import Presence
RPC = Presence(client_id)
def main():
RPC.connect()
print("Check discord")
while True:
current_activity = tautulli.get_my_activity()
if current_activity is not None:
to_send = dict(state=current_activity['title'])
if current_activity['grandparent_title'] != "":
to_send['details'] = current_activity['grandparent_title']
RPC.update(**to_send)
else:
RPC.clear()
time.sleep(15) # rich presence is limited to once per 15 seconds
if __name__ == "__main__":
main()
# print(get_data("get_server_friendly_name"))
| 28.92
| 74
| 0.655602
|
import tautulli
import config
import time
from config import client_id
from pypresence import Presence
RPC = Presence(client_id)
def main():
RPC.connect()
print("Check discord")
while True:
current_activity = tautulli.get_my_activity()
if current_activity is not None:
to_send = dict(state=current_activity['title'])
if current_activity['grandparent_title'] != "":
to_send['details'] = current_activity['grandparent_title']
RPC.update(**to_send)
else:
RPC.clear()
time.sleep(15)
if __name__ == "__main__":
main()
| true
| true
|
f70995ff867a7704a1fe14f6760f10b07cb7ae8b
| 663
|
py
|
Python
|
tests/trestle/core/remote/__init__.py
|
degenaro/compliance-trestle
|
9feb6908c80c3873cf310079144fbbbe20002c54
|
[
"Apache-2.0"
] | null | null | null |
tests/trestle/core/remote/__init__.py
|
degenaro/compliance-trestle
|
9feb6908c80c3873cf310079144fbbbe20002c54
|
[
"Apache-2.0"
] | null | null | null |
tests/trestle/core/remote/__init__.py
|
degenaro/compliance-trestle
|
9feb6908c80c3873cf310079144fbbbe20002c54
|
[
"Apache-2.0"
] | null | null | null |
# -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trestle remote tests."""
| 39
| 74
| 0.742081
| true
| true
|
|
f7099645b764655b7a6b01b5a8b89fb66d9a99e8
| 102
|
py
|
Python
|
diffpy.pdffit2/run_test.py
|
st3107/conda-recipes
|
61a8fbefa807f43f1023397fd00310551da200a9
|
[
"BSD-3-Clause"
] | null | null | null |
diffpy.pdffit2/run_test.py
|
st3107/conda-recipes
|
61a8fbefa807f43f1023397fd00310551da200a9
|
[
"BSD-3-Clause"
] | null | null | null |
diffpy.pdffit2/run_test.py
|
st3107/conda-recipes
|
61a8fbefa807f43f1023397fd00310551da200a9
|
[
"BSD-3-Clause"
] | 1
|
2020-12-01T18:11:29.000Z
|
2020-12-01T18:11:29.000Z
|
#!/usr/bin/env python
import diffpy.pdffit2.tests
assert diffpy.pdffit2.tests.test().wasSuccessful()
| 20.4
| 50
| 0.784314
|
import diffpy.pdffit2.tests
assert diffpy.pdffit2.tests.test().wasSuccessful()
| true
| true
|
f709975689be0cff9a6ae96e30974f303f6430bd
| 2,473
|
py
|
Python
|
src/slim/nets/nets_factory_test.py
|
nghugo88/tf-pose-estimation
|
0df660feeb52957f40f4a5e18920adc317af3653
|
[
"Apache-2.0"
] | 3,326
|
2018-01-26T22:42:25.000Z
|
2022-02-16T13:16:39.000Z
|
src/slim/nets/nets_factory_test.py
|
nghugo88/tf-pose-estimation
|
0df660feeb52957f40f4a5e18920adc317af3653
|
[
"Apache-2.0"
] | 150
|
2017-08-28T14:59:36.000Z
|
2022-03-11T23:21:35.000Z
|
src/slim/nets/nets_factory_test.py
|
nghugo88/tf-pose-estimation
|
0df660feeb52957f40f4a5e18920adc317af3653
|
[
"Apache-2.0"
] | 2,580
|
2017-05-14T14:33:41.000Z
|
2022-03-31T15:04:14.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.inception."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import nets_factory
class NetworksTest(tf.test.TestCase):
def testGetNetworkFnFirstHalf(self):
batch_size = 5
num_classes = 1000
for net in nets_factory.networks_map.keys()[:10]:
with tf.Graph().as_default() as g, self.test_session(g):
net_fn = nets_factory.get_network_fn(net, num_classes)
# Most networks use 224 as their default_image_size
image_size = getattr(net_fn, 'default_image_size', 224)
inputs = tf.random_uniform((batch_size, image_size, image_size, 3))
logits, end_points = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict))
self.assertEqual(logits.get_shape().as_list()[0], batch_size)
self.assertEqual(logits.get_shape().as_list()[-1], num_classes)
def testGetNetworkFnSecondHalf(self):
batch_size = 5
num_classes = 1000
for net in nets_factory.networks_map.keys()[10:]:
with tf.Graph().as_default() as g, self.test_session(g):
net_fn = nets_factory.get_network_fn(net, num_classes)
# Most networks use 224 as their default_image_size
image_size = getattr(net_fn, 'default_image_size', 224)
inputs = tf.random_uniform((batch_size, image_size, image_size, 3))
logits, end_points = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict))
self.assertEqual(logits.get_shape().as_list()[0], batch_size)
self.assertEqual(logits.get_shape().as_list()[-1], num_classes)
if __name__ == '__main__':
tf.test.main()
| 39.887097
| 80
| 0.698342
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import nets_factory
class NetworksTest(tf.test.TestCase):
def testGetNetworkFnFirstHalf(self):
batch_size = 5
num_classes = 1000
for net in nets_factory.networks_map.keys()[:10]:
with tf.Graph().as_default() as g, self.test_session(g):
net_fn = nets_factory.get_network_fn(net, num_classes)
image_size = getattr(net_fn, 'default_image_size', 224)
inputs = tf.random_uniform((batch_size, image_size, image_size, 3))
logits, end_points = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict))
self.assertEqual(logits.get_shape().as_list()[0], batch_size)
self.assertEqual(logits.get_shape().as_list()[-1], num_classes)
def testGetNetworkFnSecondHalf(self):
batch_size = 5
num_classes = 1000
for net in nets_factory.networks_map.keys()[10:]:
with tf.Graph().as_default() as g, self.test_session(g):
net_fn = nets_factory.get_network_fn(net, num_classes)
image_size = getattr(net_fn, 'default_image_size', 224)
inputs = tf.random_uniform((batch_size, image_size, image_size, 3))
logits, end_points = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict))
self.assertEqual(logits.get_shape().as_list()[0], batch_size)
self.assertEqual(logits.get_shape().as_list()[-1], num_classes)
if __name__ == '__main__':
tf.test.main()
| true
| true
|
f709978d4de03e050175f1392140b62ad12c1672
| 4,168
|
py
|
Python
|
gale/classification/model/meta_arch/common.py
|
benihime91/litcv
|
1da107e1dcf1f20d6da4ac3f126e22d409a7f92e
|
[
"Apache-2.0"
] | null | null | null |
gale/classification/model/meta_arch/common.py
|
benihime91/litcv
|
1da107e1dcf1f20d6da4ac3f126e22d409a7f92e
|
[
"Apache-2.0"
] | null | null | null |
gale/classification/model/meta_arch/common.py
|
benihime91/litcv
|
1da107e1dcf1f20d6da4ac3f126e22d409a7f92e
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04b_classification.model.meta_arch.common.ipynb (unless otherwise specified).
__all__ = ['GeneralizedImageClassifier']
# Cell
import logging
from collections import namedtuple
from typing import *
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.core.memory import get_human_readable_count
from torch.nn import Module
from ..backbones import ImageClassificationBackbone
from ..build import build_backbone, build_head
from ..heads import ImageClassificationHead
from ....core_classes import BasicModule
from ....utils.shape_spec import ShapeSpec
_logger = logging.getLogger(__name__)
# Cell
class GeneralizedImageClassifier(BasicModule):
"""
A General Image Classifier. Any models that contains the following 2 components:
1. Feature extractor (aka backbone)
2. Image Classification head (Pooling + Classifier)
"""
_hypers = namedtuple("hypers", field_names=["lr", "wd"])
def __init__(
self,
backbone: ImageClassificationBackbone,
head: ImageClassificationHead,
):
"""
Arguments:
1. `backbone`: a `ImageClassificationBackbone` module, must follow gale's backbone interface
2. `head`: a head containg the classifier. and the pooling layer, must be an instance of
`ImageClassificationHead`.
"""
super(GeneralizedImageClassifier, self).__init__()
self.backbone = backbone
assert isinstance(backbone, ImageClassificationBackbone)
self.head = head
assert isinstance(head, ImageClassificationHead)
def forward(self, batched_inputs: torch.Tensor) -> torch.Tensor:
"""
Runs the batched_inputs through `backbone` followed by the `head`.
Returns a Tensor which contains the logits for the batched_inputs.
"""
# forward pass through the backbone
out = self.backbone(batched_inputs)
# pass through the classification layer
out = self.head(out)
return out
@classmethod
def from_config_dict(cls, cfg: DictConfig):
"""
Instantiate the Meta Architecture from gale config
"""
if not hasattr(cfg.model, "backbone"):
raise ValueError("Configuration for model backbone not found")
if not hasattr(cfg.model, "head"):
raise ValueError("Configuration for model head not found")
input_shape = ShapeSpec(cfg.input.channels, cfg.input.height, cfg.input.width)
_logger.debug(f"Inputs: {input_shape}")
backbone = build_backbone(cfg, input_shape=input_shape)
param_count = get_human_readable_count(
sum([m.numel() for m in backbone.parameters()])
)
_logger.debug(
"Backbone {} created, param count: {}.".format(
cfg.model.backbone.name, param_count
)
)
head = build_head(cfg, backbone.output_shape())
param_count = get_human_readable_count(
sum([m.numel() for m in head.parameters()])
)
_logger.debug(
"Head {} created, param count: {}.".format(cfg.model.head.name, param_count)
)
kwds = {"backbone": backbone, "head": head}
instance = cls(**kwds)
instance.input_shape = input_shape
param_count = get_human_readable_count(
sum([m.numel() for m in instance.parameters()])
)
_logger.info("Model created, param count: {}.".format(param_count))
return instance
def build_param_dicts(self):
"""
Builds up the Paramters dicts for optimization
"""
backbone_params = self.backbone.build_param_dicts()
head_params = self.head.build_param_dicts()
return backbone_params + head_params
@property
def hypers(self) -> Tuple:
"""
Returns list of parameters like `lr` and `wd`
for each param group
"""
lrs = []
wds = []
for p in self.build_param_dicts():
lrs.append(p["lr"])
wds.append(p["weight_decay"])
return self._hypers(lrs, wds)
| 33.344
| 125
| 0.648992
|
__all__ = ['GeneralizedImageClassifier']
import logging
from collections import namedtuple
from typing import *
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.core.memory import get_human_readable_count
from torch.nn import Module
from ..backbones import ImageClassificationBackbone
from ..build import build_backbone, build_head
from ..heads import ImageClassificationHead
from ....core_classes import BasicModule
from ....utils.shape_spec import ShapeSpec
_logger = logging.getLogger(__name__)
class GeneralizedImageClassifier(BasicModule):
_hypers = namedtuple("hypers", field_names=["lr", "wd"])
def __init__(
self,
backbone: ImageClassificationBackbone,
head: ImageClassificationHead,
):
super(GeneralizedImageClassifier, self).__init__()
self.backbone = backbone
assert isinstance(backbone, ImageClassificationBackbone)
self.head = head
assert isinstance(head, ImageClassificationHead)
def forward(self, batched_inputs: torch.Tensor) -> torch.Tensor:
out = self.backbone(batched_inputs)
out = self.head(out)
return out
@classmethod
def from_config_dict(cls, cfg: DictConfig):
if not hasattr(cfg.model, "backbone"):
raise ValueError("Configuration for model backbone not found")
if not hasattr(cfg.model, "head"):
raise ValueError("Configuration for model head not found")
input_shape = ShapeSpec(cfg.input.channels, cfg.input.height, cfg.input.width)
_logger.debug(f"Inputs: {input_shape}")
backbone = build_backbone(cfg, input_shape=input_shape)
param_count = get_human_readable_count(
sum([m.numel() for m in backbone.parameters()])
)
_logger.debug(
"Backbone {} created, param count: {}.".format(
cfg.model.backbone.name, param_count
)
)
head = build_head(cfg, backbone.output_shape())
param_count = get_human_readable_count(
sum([m.numel() for m in head.parameters()])
)
_logger.debug(
"Head {} created, param count: {}.".format(cfg.model.head.name, param_count)
)
kwds = {"backbone": backbone, "head": head}
instance = cls(**kwds)
instance.input_shape = input_shape
param_count = get_human_readable_count(
sum([m.numel() for m in instance.parameters()])
)
_logger.info("Model created, param count: {}.".format(param_count))
return instance
def build_param_dicts(self):
backbone_params = self.backbone.build_param_dicts()
head_params = self.head.build_param_dicts()
return backbone_params + head_params
@property
def hypers(self) -> Tuple:
lrs = []
wds = []
for p in self.build_param_dicts():
lrs.append(p["lr"])
wds.append(p["weight_decay"])
return self._hypers(lrs, wds)
| true
| true
|
f709983851a949a8e91eea102571610f8c22f66c
| 1,811
|
py
|
Python
|
RSA/multi_power.py
|
dev-alberto/Computational-Number-Theory
|
89644a4d69553bc726409b1f85d5bc897e8491ec
|
[
"MIT"
] | 1
|
2019-02-21T20:48:01.000Z
|
2019-02-21T20:48:01.000Z
|
RSA/multi_power.py
|
dev-alberto/Computational-Number-Theory
|
89644a4d69553bc726409b1f85d5bc897e8491ec
|
[
"MIT"
] | null | null | null |
RSA/multi_power.py
|
dev-alberto/Computational-Number-Theory
|
89644a4d69553bc726409b1f85d5bc897e8491ec
|
[
"MIT"
] | null | null | null |
from util import getPrime, inv, gcd
from random import randrange
from time import time
from datetime import timedelta
def gen_keys():
p = getPrime(512)
q = getPrime(512)
p_s = p ** 2
n = p_s * q
phi = (p_s - p) * (q - 1)
e = randrange(1, phi)
g = gcd(e, phi)
while g != 1:
e = randrange(1, phi)
g = gcd(e, phi)
e = 41
d = inv(e, phi)
dp = d % (p - 1)
dq = d % (q - 1)
p2_inv_q = inv(p_s, q)
e_inv_p = inv(e, p)
#public, private
return [(n, e), (p, q, dp, dq, p2_inv_q, e_inv_p), d]
def encrypt(public, m):
return pow(m, public[1], public[0])
def hensel(cp, dp, p, e_inv_p, e, c):
p_s = p**2
m_p = pow(cp, dp-1, p)
K0 = m_p * cp % p
A = -pow(K0, e, p_s)
A = (A + c) % p_s
m_p = m_p * A % p_s
m_p = m_p * e_inv_p % p_s
m_p = (m_p + K0) % p_s
return m_p
def decrypt(c, privk, pub):
p, q, dp, dq, p2_inv_q, e_inv_p = privk
n, e = pub
p_s = p**2
c_p = c % p_s
c_q = c % q
m_p = hensel(c_p, dp, p, e_inv_p, e, c)
m_q = pow(c_q, dq, q)
V = (m_q - m_p) % q
V = V * p2_inv_q % q
M = V * p_s % n
M = (M + m_p) % n
return M
def classic_decrypt(c, d, n):
return pow(c, d, n)
if __name__ == '__main__':
m_ = 65
public, private, d = gen_keys()
#print(public)
c = encrypt(public, m_)
start_hensel = time()
dec = decrypt(c, private, public)
elapsed = time() - start_hensel
print(str(timedelta(seconds=elapsed)))
delta1 = timedelta(seconds=elapsed)
print(dec)
start_normal = time()
dec_ = classic_decrypt(c, d, public[0])
elapsed_ = time() - start_normal
print(str(timedelta(seconds=elapsed_)))
delta2 = timedelta(seconds=elapsed_)
print(dec_)
print(delta2/delta1)
| 19.684783
| 57
| 0.539481
|
from util import getPrime, inv, gcd
from random import randrange
from time import time
from datetime import timedelta
def gen_keys():
p = getPrime(512)
q = getPrime(512)
p_s = p ** 2
n = p_s * q
phi = (p_s - p) * (q - 1)
e = randrange(1, phi)
g = gcd(e, phi)
while g != 1:
e = randrange(1, phi)
g = gcd(e, phi)
e = 41
d = inv(e, phi)
dp = d % (p - 1)
dq = d % (q - 1)
p2_inv_q = inv(p_s, q)
e_inv_p = inv(e, p)
return [(n, e), (p, q, dp, dq, p2_inv_q, e_inv_p), d]
def encrypt(public, m):
return pow(m, public[1], public[0])
def hensel(cp, dp, p, e_inv_p, e, c):
p_s = p**2
m_p = pow(cp, dp-1, p)
K0 = m_p * cp % p
A = -pow(K0, e, p_s)
A = (A + c) % p_s
m_p = m_p * A % p_s
m_p = m_p * e_inv_p % p_s
m_p = (m_p + K0) % p_s
return m_p
def decrypt(c, privk, pub):
p, q, dp, dq, p2_inv_q, e_inv_p = privk
n, e = pub
p_s = p**2
c_p = c % p_s
c_q = c % q
m_p = hensel(c_p, dp, p, e_inv_p, e, c)
m_q = pow(c_q, dq, q)
V = (m_q - m_p) % q
V = V * p2_inv_q % q
M = V * p_s % n
M = (M + m_p) % n
return M
def classic_decrypt(c, d, n):
return pow(c, d, n)
if __name__ == '__main__':
m_ = 65
public, private, d = gen_keys()
c = encrypt(public, m_)
start_hensel = time()
dec = decrypt(c, private, public)
elapsed = time() - start_hensel
print(str(timedelta(seconds=elapsed)))
delta1 = timedelta(seconds=elapsed)
print(dec)
start_normal = time()
dec_ = classic_decrypt(c, d, public[0])
elapsed_ = time() - start_normal
print(str(timedelta(seconds=elapsed_)))
delta2 = timedelta(seconds=elapsed_)
print(dec_)
print(delta2/delta1)
| true
| true
|
f709998f3e0fc54cc0e672fbc903c33dd1f1b011
| 4,504
|
py
|
Python
|
handbook_tools/commands/toc.py
|
uribench/software-engineering-handbook-tools
|
30b48ed0b48aabbec451be0ef6e2519b3c54cefa
|
[
"Unlicense"
] | 2
|
2018-06-27T07:59:12.000Z
|
2021-04-29T00:22:08.000Z
|
handbook_tools/commands/toc.py
|
uribench/software-engineering-handbook-tools
|
30b48ed0b48aabbec451be0ef6e2519b3c54cefa
|
[
"Unlicense"
] | 11
|
2018-06-18T06:55:46.000Z
|
2020-07-19T10:33:42.000Z
|
handbook_tools/commands/toc.py
|
uribench/software-engineering-handbook-tools
|
30b48ed0b48aabbec451be0ef6e2519b3c54cefa
|
[
"Unlicense"
] | 1
|
2019-07-05T13:07:11.000Z
|
2019-07-05T13:07:11.000Z
|
"""
'toc' sub-command of the 'handbook' command.
This module composes a TOC for the Handbook from configuration files.
"""
import os
import sys
from urllib.request import pathname2url
from handbook_tools.lib.command_base import CommandBase
from handbook_tools.lib.navigation_tree import NavigationTree
__version__ = '0.6.8'
class Toc(CommandBase):
"""
Compose a TOC of the Handbook from configuration.
Usage:
toc [options]
Options:
-h, --help Show this help message and exit
--version Show the version and exit
-o, --output=FILE Specify output TOC file relative to site root
-d, --depth=LEVEL Max depth of the generated TOC tree [default: 8]
--no-stop Ignore 'stop' tags to scan the entire tree
--no-prefix Do not include item prefix for the TOC items
--no-index Do not include index numbers for the TOC items
--no-link Do not include links for the TOC items
--header Include HTML header for the TOC file
Examples:
handbook toc -h
handbook toc --version
handbook toc
handbook --root=tests/fixtures/site toc
handbook toc -d 3
handbook toc --depth=3 --no-index
handbook toc --d 2 --no-index --no-link -o toc2.md
handbook toc --no-stop -o toc.md
"""
def __init__(self, command_args=None, global_args=None):
""""""
super().__init__(command_args, global_args, version=__version__)
# kill bullets of unordered list (not supported by GitHub)
self.toc_header = '<style>ul { list-style-type: none; }</style>\n\n'
self.toc_title = '# Table of Contents\n\n'
self.markdown_ul = '-'
self._process_args()
self.toc_file = self._init_output_file(self.output_filename)
try:
if self.include_toc_header:
self.toc_file.write(self.toc_header)
self.toc_file.write(self.toc_title)
except IOError as err:
print('Error: Operation failed: {}'.format(err.strerror))
self.depth = 0
self.index = []
self.navigation_tree = None
def execute(self):
"""Entry point for the execution of this sub-command"""
self.navigation_tree = NavigationTree(self.site_root, self.verbose, self.no_stop)
self.navigation_tree.scan(self.node_performer)
if self.toc_file is not sys.stdout:
self.toc_file.close()
def node_performer(self, root_path, *_):
"""Custom performer executed for each visited node"""
name = os.path.basename(root_path)
link = root_path.replace(self.site_root, '')
self._update_index_counter(link)
# skip handbook root and too deep TOC items
if self.depth > 1 and (self.depth - 1) <= self.max_depth:
self.toc_file.write(self._format_toc(name, link))
def _process_args(self):
"""Process command_args"""
# default values not set by docopt were set in CommandBase
self.output_filename = self.args['--output']
self.max_depth = int(self.args['--depth'])
self.no_stop = self.args['--no-stop']
self.include_prefix = not self.args['--no-prefix']
self.include_index = not self.args['--no-index']
self.include_link = not self.args['--no-link']
self.include_toc_header = self.args['--header']
def _update_index_counter(self, link):
""""""
depth = len(link.split(os.sep)) - 1
if depth > len(self.index):
self.index += [1]
if depth <= self.depth:
self.index[depth-1] += 1
self.index = self.index[:depth]
self.depth = depth
def _format_toc(self, name, link):
""""""
# compose indent string
indent = ' ' * 2 * (self.depth - 2)
# compose optional item prefix string
prefix = ''
if self.include_prefix:
prefix = self.markdown_ul + ' '
# compose optional index string
index_string = ''
if self.include_index:
index_string = '.'.join(str(e) for e in self.index[1:self.depth])
index_string += ' '
# compose item string with optional link
toc_item = name
if self.include_link:
link_url = pathname2url(link)
toc_item = '[' + name + '](' + link_url + ')'
return '{}{}{}{}\n'.format(indent, prefix, index_string, toc_item)
| 36.322581
| 89
| 0.60524
|
import os
import sys
from urllib.request import pathname2url
from handbook_tools.lib.command_base import CommandBase
from handbook_tools.lib.navigation_tree import NavigationTree
__version__ = '0.6.8'
class Toc(CommandBase):
def __init__(self, command_args=None, global_args=None):
super().__init__(command_args, global_args, version=__version__)
self.toc_header = '<style>ul { list-style-type: none; }</style>\n\n'
self.toc_title = '# Table of Contents\n\n'
self.markdown_ul = '-'
self._process_args()
self.toc_file = self._init_output_file(self.output_filename)
try:
if self.include_toc_header:
self.toc_file.write(self.toc_header)
self.toc_file.write(self.toc_title)
except IOError as err:
print('Error: Operation failed: {}'.format(err.strerror))
self.depth = 0
self.index = []
self.navigation_tree = None
def execute(self):
self.navigation_tree = NavigationTree(self.site_root, self.verbose, self.no_stop)
self.navigation_tree.scan(self.node_performer)
if self.toc_file is not sys.stdout:
self.toc_file.close()
def node_performer(self, root_path, *_):
name = os.path.basename(root_path)
link = root_path.replace(self.site_root, '')
self._update_index_counter(link)
if self.depth > 1 and (self.depth - 1) <= self.max_depth:
self.toc_file.write(self._format_toc(name, link))
def _process_args(self):
self.output_filename = self.args['--output']
self.max_depth = int(self.args['--depth'])
self.no_stop = self.args['--no-stop']
self.include_prefix = not self.args['--no-prefix']
self.include_index = not self.args['--no-index']
self.include_link = not self.args['--no-link']
self.include_toc_header = self.args['--header']
def _update_index_counter(self, link):
depth = len(link.split(os.sep)) - 1
if depth > len(self.index):
self.index += [1]
if depth <= self.depth:
self.index[depth-1] += 1
self.index = self.index[:depth]
self.depth = depth
def _format_toc(self, name, link):
indent = ' ' * 2 * (self.depth - 2)
prefix = ''
if self.include_prefix:
prefix = self.markdown_ul + ' '
index_string = ''
if self.include_index:
index_string = '.'.join(str(e) for e in self.index[1:self.depth])
index_string += ' '
toc_item = name
if self.include_link:
link_url = pathname2url(link)
toc_item = '[' + name + '](' + link_url + ')'
return '{}{}{}{}\n'.format(indent, prefix, index_string, toc_item)
| true
| true
|
f7099a27cae14071fac72802c976debd492c90a6
| 4,060
|
py
|
Python
|
translate/cloud-client/beta_snippets_test.py
|
Cuciu/python-test
|
baee855ce20a2a1344ffb208a40ebc20014fba5f
|
[
"Apache-2.0"
] | 1
|
2022-02-06T00:04:04.000Z
|
2022-02-06T00:04:04.000Z
|
translate/cloud-client/beta_snippets_test.py
|
Cuciu/python-test
|
baee855ce20a2a1344ffb208a40ebc20014fba5f
|
[
"Apache-2.0"
] | 1
|
2021-03-25T22:38:27.000Z
|
2021-03-25T22:38:27.000Z
|
translate/cloud-client/beta_snippets_test.py
|
Cuciu/python-test
|
baee855ce20a2a1344ffb208a40ebc20014fba5f
|
[
"Apache-2.0"
] | 1
|
2020-02-17T03:55:51.000Z
|
2020-02-17T03:55:51.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import uuid
import beta_snippets
from google.cloud import storage
PROJECT_ID = os.environ['GCLOUD_PROJECT']
@pytest.fixture(scope='function')
def bucket():
"""Create a temporary bucket to store annotation output."""
bucket_name = str(uuid.uuid1())
storage_client = storage.Client()
bucket = storage_client.create_bucket(bucket_name)
yield bucket
bucket.delete(force=True)
@pytest.fixture(scope='session')
def glossary():
"""Get the ID of a glossary available to session (do not mutate/delete)."""
glossary_id = 'must-start-with-letters-' + str(uuid.uuid1())
beta_snippets.create_glossary(PROJECT_ID, glossary_id)
yield glossary_id
try:
beta_snippets.delete_glossary(PROJECT_ID, glossary_id)
except Exception:
pass
@pytest.fixture(scope='function')
def unique_glossary_id():
"""Get a unique ID. Attempts to delete glossary with this ID after test."""
glossary_id = 'must-start-with-letters-' + str(uuid.uuid1())
yield glossary_id
try:
beta_snippets.delete_glossary(PROJECT_ID, glossary_id)
except Exception:
pass
def test_translate_text(capsys):
beta_snippets.translate_text(PROJECT_ID, 'Hello world')
out, _ = capsys.readouterr()
assert 'Zdravo svet' in out
def test_batch_translate_text(capsys, bucket):
beta_snippets.batch_translate_text(
PROJECT_ID,
'gs://cloud-samples-data/translation/text.txt',
'gs://{}/translation/BATCH_TRANSLATION_OUTPUT/'.format(bucket.name))
out, _ = capsys.readouterr()
assert 'Total Characters: 13' in out
assert 'Translated Characters: 13' in out
def test_detect_language(capsys):
beta_snippets.detect_language(PROJECT_ID, 'Hæ sæta')
out, _ = capsys.readouterr()
assert 'is' in out
def test_list_languages(capsys):
beta_snippets.list_languages(PROJECT_ID)
out, _ = capsys.readouterr()
assert 'zh-CN' in out
def test_list_languages_with_target(capsys):
beta_snippets.list_languages_with_target(PROJECT_ID, 'is')
out, _ = capsys.readouterr()
assert u'Language Code: sq' in out
assert u'Display Name: albanska' in out
def test_create_glossary(capsys, unique_glossary_id):
beta_snippets.create_glossary(PROJECT_ID, unique_glossary_id)
out, _ = capsys.readouterr()
assert 'Created' in out
assert PROJECT_ID in out
assert unique_glossary_id in out
assert 'gs://cloud-samples-data/translation/glossary.csv' in out
def test_get_glossary(capsys, glossary):
beta_snippets.get_glossary(PROJECT_ID, glossary)
out, _ = capsys.readouterr()
assert glossary in out
assert 'gs://cloud-samples-data/translation/glossary.csv' in out
def test_list_glossary(capsys, glossary):
beta_snippets.list_glossaries(PROJECT_ID)
out, _ = capsys.readouterr()
assert glossary in out
assert 'gs://cloud-samples-data/translation/glossary.csv' in out
def test_translate_text_with_glossary(capsys, glossary):
beta_snippets.translate_text_with_glossary(
PROJECT_ID, glossary, 'directions')
out, _ = capsys.readouterr()
assert 'direcciones' in out
def test_delete_glossary(capsys, unique_glossary_id):
beta_snippets.create_glossary(PROJECT_ID, unique_glossary_id)
beta_snippets.delete_glossary(PROJECT_ID, unique_glossary_id)
out, _ = capsys.readouterr()
assert PROJECT_ID in out
assert 'us-central1' in out
assert unique_glossary_id in out
| 30.074074
| 79
| 0.734236
|
import os
import pytest
import uuid
import beta_snippets
from google.cloud import storage
PROJECT_ID = os.environ['GCLOUD_PROJECT']
@pytest.fixture(scope='function')
def bucket():
bucket_name = str(uuid.uuid1())
storage_client = storage.Client()
bucket = storage_client.create_bucket(bucket_name)
yield bucket
bucket.delete(force=True)
@pytest.fixture(scope='session')
def glossary():
glossary_id = 'must-start-with-letters-' + str(uuid.uuid1())
beta_snippets.create_glossary(PROJECT_ID, glossary_id)
yield glossary_id
try:
beta_snippets.delete_glossary(PROJECT_ID, glossary_id)
except Exception:
pass
@pytest.fixture(scope='function')
def unique_glossary_id():
glossary_id = 'must-start-with-letters-' + str(uuid.uuid1())
yield glossary_id
try:
beta_snippets.delete_glossary(PROJECT_ID, glossary_id)
except Exception:
pass
def test_translate_text(capsys):
beta_snippets.translate_text(PROJECT_ID, 'Hello world')
out, _ = capsys.readouterr()
assert 'Zdravo svet' in out
def test_batch_translate_text(capsys, bucket):
beta_snippets.batch_translate_text(
PROJECT_ID,
'gs://cloud-samples-data/translation/text.txt',
'gs://{}/translation/BATCH_TRANSLATION_OUTPUT/'.format(bucket.name))
out, _ = capsys.readouterr()
assert 'Total Characters: 13' in out
assert 'Translated Characters: 13' in out
def test_detect_language(capsys):
beta_snippets.detect_language(PROJECT_ID, 'Hæ sæta')
out, _ = capsys.readouterr()
assert 'is' in out
def test_list_languages(capsys):
beta_snippets.list_languages(PROJECT_ID)
out, _ = capsys.readouterr()
assert 'zh-CN' in out
def test_list_languages_with_target(capsys):
beta_snippets.list_languages_with_target(PROJECT_ID, 'is')
out, _ = capsys.readouterr()
assert u'Language Code: sq' in out
assert u'Display Name: albanska' in out
def test_create_glossary(capsys, unique_glossary_id):
beta_snippets.create_glossary(PROJECT_ID, unique_glossary_id)
out, _ = capsys.readouterr()
assert 'Created' in out
assert PROJECT_ID in out
assert unique_glossary_id in out
assert 'gs://cloud-samples-data/translation/glossary.csv' in out
def test_get_glossary(capsys, glossary):
beta_snippets.get_glossary(PROJECT_ID, glossary)
out, _ = capsys.readouterr()
assert glossary in out
assert 'gs://cloud-samples-data/translation/glossary.csv' in out
def test_list_glossary(capsys, glossary):
beta_snippets.list_glossaries(PROJECT_ID)
out, _ = capsys.readouterr()
assert glossary in out
assert 'gs://cloud-samples-data/translation/glossary.csv' in out
def test_translate_text_with_glossary(capsys, glossary):
beta_snippets.translate_text_with_glossary(
PROJECT_ID, glossary, 'directions')
out, _ = capsys.readouterr()
assert 'direcciones' in out
def test_delete_glossary(capsys, unique_glossary_id):
beta_snippets.create_glossary(PROJECT_ID, unique_glossary_id)
beta_snippets.delete_glossary(PROJECT_ID, unique_glossary_id)
out, _ = capsys.readouterr()
assert PROJECT_ID in out
assert 'us-central1' in out
assert unique_glossary_id in out
| true
| true
|
f7099a55a30bdd3116b8fe67a61658fa8e908227
| 6,145
|
py
|
Python
|
Snake.py
|
JoaoSantos2007/jogoCobrinha
|
d4a09a339d929d7a19984a45f27127153b009bb3
|
[
"MIT"
] | null | null | null |
Snake.py
|
JoaoSantos2007/jogoCobrinha
|
d4a09a339d929d7a19984a45f27127153b009bb3
|
[
"MIT"
] | null | null | null |
Snake.py
|
JoaoSantos2007/jogoCobrinha
|
d4a09a339d929d7a19984a45f27127153b009bb3
|
[
"MIT"
] | null | null | null |
import pygame # importa a biblioteca Pygame
import random # importa a biblioteca Random
from audioplayer import AudioPlayer
inicio = False
source = "/home/joao/Arquivos/jogoCobrinha/"
# Começar partida
def iniciar(inicio, tela, fonte, texto):
texto = fonte.render("Pressione T para iniciar: ", True, cor_pontos)
tela.blit(imagem, [0, 263])
tela.blit(texto, [150, 150])
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_t:
inicio = True
if event.type == pygame.QUIT:
raise Execption
return inicio
while True:
status = True
pygame.init()
player = AudioPlayer(source+"supermario.mp3")
comer = AudioPlayer(source+"comer.mp3")
erro = AudioPlayer(source+"Erro.mp3")
player.play()
# pygame.mixer.init()
# pygame.mixer.music.load('supermario.mp3')
# pygame.mixer.music.play()
# Definir cores
cor_inicio = (64, 193, 255)
cor_fundo = (150, 255, 159) # Define a cor do fundo
cor_cobra = (255, 0, 0) # Define a cor da cobra
cor_comida = (138, 0, 0) # Define a cor da comida 128,60,60
cor_pontos = (0, 0, 0) # Define a cor dos pontos
cor_inicio = (64, 193, 255)
cor_fim = (255, 255, 110)
#########
dimensoes = (600, 600)
fim = ""
# Valores Iniciais
pontuação = ""
texto = ""
tempo = 9.0
direcao_x = "Liberado"
direcao_y = "Liberado"
x = 300
y = 300
d = 20
dx = 0
dy = 0
x_comida = round(random.randrange(0, 600 - d)/20)*20
y_comida = round(random.randrange(0, 600 - d)/20)*20
fonte = pygame.font.SysFont("hack", 35)
fonte2 = pygame.font.SysFont("hack", 100)
lista_cobra = [[x, y]]
tela = pygame.display.set_mode((dimensoes))
pygame.display.set_caption("Snake")
tela.fill(cor_inicio)
imagem = pygame.image.load(source+"cobrinha.png")
estatico = imagem.get_rect()
clock = pygame.time.Clock()
if inicio == False:
while inicio == False:
pygame.display.update()
inicio = iniciar(inicio, tela, fonte, texto)
def desenha_cobra(lista_cobra):
tela.fill(cor_fundo)
for unidade in lista_cobra:
pygame.draw.rect(tela, cor_cobra, [unidade[0], unidade[1], d, d])
tela.fill(cor_fundo)
def mover_cobra(dx, dy, lista_cobra, direcao_x, direcao_y):
delta_x = 0
delta_y = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
raise Exception
if event.type == pygame.KEYDOWN:
if direcao_x == "Liberado":
if event.key == pygame.K_LEFT or event.key == pygame.K_a:
dx = -d
dy = 0
direcao_x = "Ocupado"
direcao_y = "Liberado"
elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:
dx = d
dy = 0
direcao_x = "Ocupado"
direcao_y = "Liberado"
if direcao_y == "Liberado":
if event.key == pygame.K_UP or event.key == pygame.K_w:
dx = 0
dy = -d
direcao_y = "Ocupado"
direcao_x = "Liberado"
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
dx = 0
dy = d
direcao_y = "Ocupado"
direcao_x = "Liberado"
if event.key == pygame.K_ESCAPE:
raise Exception
x_novo = lista_cobra[-1][0] + dx
y_novo = lista_cobra[-1][1] + dy
lista_cobra.append([x_novo, y_novo])
del lista_cobra[0]
# x = x + delta_x
# y = y + delta_y
return dx, dy, lista_cobra, direcao_x, direcao_y
def verifica_comida(dx, dy, x_comida, y_comida, lista_cobra, tempo):
head = lista_cobra[-1]
x_novo = head[0] + dx
y_novo = head[1] + dy
if head[0] == x_comida and head[1] == y_comida:
comer.play()
lista_cobra.append([x_novo, y_novo])
tempo = tempo + 0.5
x_comida = round(random.randrange(0, 600 - d)/20)*20
y_comida = round(random.randrange(0, 600 - d)/20)*20
pygame.draw.rect(tela, cor_comida, [x_comida, y_comida, d, d])
return x_comida, y_comida, lista_cobra, tempo
def verifica_parede(lista_cobra, status):
head = lista_cobra[-1]
x = head[0]
y = head[1]
if x not in range(600) or y not in range(600):
status = False
return status
def verifica_mordeu_cobra(lista_cobra, status):
head = lista_cobra[-1]
corpo = lista_cobra.copy()
del corpo[-1]
for x, y in corpo:
if x == head[0] and y == head[1]:
status = False
return status
def atualizar_pontos(lista_cobra):
pontos = str(len(lista_cobra))
score = fonte.render("Scores: " + pontos, True, cor_pontos)
tela.blit(score, [0, 0])
return pontos
while status == True:
pygame.display.update()
desenha_cobra(lista_cobra)
dx, dy, lista_cobra, direcao_x, direcao_y = mover_cobra(
dx, dy, lista_cobra, direcao_x, direcao_y)
x_comida, y_comida, lista_cobra, tempo = verifica_comida(
dx, dy, x_comida, y_comida, lista_cobra, tempo)
# print(lista_cobra)
status = verifica_parede(lista_cobra, status)
status = verifica_mordeu_cobra(lista_cobra, status)
pontuação = atualizar_pontos(lista_cobra)
clock.tick(tempo)
erro.play()
pygame.display.update()
tela.fill(cor_fim)
fim = fonte2.render("Gamer Over: ", True, cor_pontos)
tela.blit(fim, [100, 50])
pontuação = fonte2.render("Pontos: " + pontuação, True, cor_pontos)
tela.blit(pontuação, [100, 200])
pygame.display.update()
clock.tick(0.3)
| 29.261905
| 80
| 0.552482
|
import pygame
import random
from audioplayer import AudioPlayer
inicio = False
source = "/home/joao/Arquivos/jogoCobrinha/"
def iniciar(inicio, tela, fonte, texto):
texto = fonte.render("Pressione T para iniciar: ", True, cor_pontos)
tela.blit(imagem, [0, 263])
tela.blit(texto, [150, 150])
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_t:
inicio = True
if event.type == pygame.QUIT:
raise Execption
return inicio
while True:
status = True
pygame.init()
player = AudioPlayer(source+"supermario.mp3")
comer = AudioPlayer(source+"comer.mp3")
erro = AudioPlayer(source+"Erro.mp3")
player.play()
cor_inicio = (64, 193, 255)
cor_fundo = (150, 255, 159)
cor_cobra = (255, 0, 0)
cor_comida = (138, 0, 0)
cor_pontos = (0, 0, 0)
cor_inicio = (64, 193, 255)
cor_fim = (255, 255, 110)
m = ""
pontuação = ""
texto = ""
tempo = 9.0
direcao_x = "Liberado"
direcao_y = "Liberado"
x = 300
y = 300
d = 20
dx = 0
dy = 0
x_comida = round(random.randrange(0, 600 - d)/20)*20
y_comida = round(random.randrange(0, 600 - d)/20)*20
fonte = pygame.font.SysFont("hack", 35)
fonte2 = pygame.font.SysFont("hack", 100)
lista_cobra = [[x, y]]
tela = pygame.display.set_mode((dimensoes))
pygame.display.set_caption("Snake")
tela.fill(cor_inicio)
imagem = pygame.image.load(source+"cobrinha.png")
estatico = imagem.get_rect()
clock = pygame.time.Clock()
if inicio == False:
while inicio == False:
pygame.display.update()
inicio = iniciar(inicio, tela, fonte, texto)
def desenha_cobra(lista_cobra):
tela.fill(cor_fundo)
for unidade in lista_cobra:
pygame.draw.rect(tela, cor_cobra, [unidade[0], unidade[1], d, d])
tela.fill(cor_fundo)
def mover_cobra(dx, dy, lista_cobra, direcao_x, direcao_y):
delta_x = 0
delta_y = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
raise Exception
if event.type == pygame.KEYDOWN:
if direcao_x == "Liberado":
if event.key == pygame.K_LEFT or event.key == pygame.K_a:
dx = -d
dy = 0
direcao_x = "Ocupado"
direcao_y = "Liberado"
elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:
dx = d
dy = 0
direcao_x = "Ocupado"
direcao_y = "Liberado"
if direcao_y == "Liberado":
if event.key == pygame.K_UP or event.key == pygame.K_w:
dx = 0
dy = -d
direcao_y = "Ocupado"
direcao_x = "Liberado"
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
dx = 0
dy = d
direcao_y = "Ocupado"
direcao_x = "Liberado"
if event.key == pygame.K_ESCAPE:
raise Exception
x_novo = lista_cobra[-1][0] + dx
y_novo = lista_cobra[-1][1] + dy
lista_cobra.append([x_novo, y_novo])
del lista_cobra[0]
return dx, dy, lista_cobra, direcao_x, direcao_y
def verifica_comida(dx, dy, x_comida, y_comida, lista_cobra, tempo):
head = lista_cobra[-1]
x_novo = head[0] + dx
y_novo = head[1] + dy
if head[0] == x_comida and head[1] == y_comida:
comer.play()
lista_cobra.append([x_novo, y_novo])
tempo = tempo + 0.5
x_comida = round(random.randrange(0, 600 - d)/20)*20
y_comida = round(random.randrange(0, 600 - d)/20)*20
pygame.draw.rect(tela, cor_comida, [x_comida, y_comida, d, d])
return x_comida, y_comida, lista_cobra, tempo
def verifica_parede(lista_cobra, status):
head = lista_cobra[-1]
x = head[0]
y = head[1]
if x not in range(600) or y not in range(600):
status = False
return status
def verifica_mordeu_cobra(lista_cobra, status):
head = lista_cobra[-1]
corpo = lista_cobra.copy()
del corpo[-1]
for x, y in corpo:
if x == head[0] and y == head[1]:
status = False
return status
def atualizar_pontos(lista_cobra):
pontos = str(len(lista_cobra))
score = fonte.render("Scores: " + pontos, True, cor_pontos)
tela.blit(score, [0, 0])
return pontos
while status == True:
pygame.display.update()
desenha_cobra(lista_cobra)
dx, dy, lista_cobra, direcao_x, direcao_y = mover_cobra(
dx, dy, lista_cobra, direcao_x, direcao_y)
x_comida, y_comida, lista_cobra, tempo = verifica_comida(
dx, dy, x_comida, y_comida, lista_cobra, tempo)
status = verifica_parede(lista_cobra, status)
status = verifica_mordeu_cobra(lista_cobra, status)
pontuação = atualizar_pontos(lista_cobra)
clock.tick(tempo)
erro.play()
pygame.display.update()
tela.fill(cor_fim)
fim = fonte2.render("Gamer Over: ", True, cor_pontos)
tela.blit(fim, [100, 50])
pontuação = fonte2.render("Pontos: " + pontuação, True, cor_pontos)
tela.blit(pontuação, [100, 200])
pygame.display.update()
clock.tick(0.3)
| true
| true
|
f7099a6152e7bc1aa632fc5d51076615ce91b95a
| 608
|
py
|
Python
|
app/model.py
|
SayAkhan/testkakao
|
f1753733ce6f9c62829ac9f33eea4fec4c8ba03a
|
[
"MIT"
] | null | null | null |
app/model.py
|
SayAkhan/testkakao
|
f1753733ce6f9c62829ac9f33eea4fec4c8ba03a
|
[
"MIT"
] | null | null | null |
app/model.py
|
SayAkhan/testkakao
|
f1753733ce6f9c62829ac9f33eea4fec4c8ba03a
|
[
"MIT"
] | null | null | null |
#from app import db
from datetime import datetime, timedelta
#class User(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# user_key = db.Column(db.String(32), index=True, unique=True)
# join_date = db.Column(db.String())
# last_active_date = db.Column(db.String())
# def __init__(self, user_key):
# self.user_key = user_key
# self.join_date = datetime.strftime(
# datetime.utcnow() + timedelta(hours=9),
# "%Y.%m.%d %H:%M:%S")
# self.last_active_date = self.join_date
# def __repr__(self):
# return "<User %r>" % (self.user_key)
| 30.4
| 65
| 0.625
|
from datetime import datetime, timedelta
| true
| true
|
f7099a668e8105f9d5648b4e92033cd303825672
| 26,111
|
py
|
Python
|
InnerEye/ML/pipelines/inference.py
|
JacopoTeneggi/InnerEye-DeepLearning
|
988d9fa318a19cfd435370248970d976ee2e78b0
|
[
"MIT"
] | 402
|
2020-09-22T16:38:16.000Z
|
2022-03-30T09:56:03.000Z
|
InnerEye/ML/pipelines/inference.py
|
JacopoTeneggi/InnerEye-DeepLearning
|
988d9fa318a19cfd435370248970d976ee2e78b0
|
[
"MIT"
] | 259
|
2020-09-23T09:32:33.000Z
|
2022-03-30T18:15:01.000Z
|
InnerEye/ML/pipelines/inference.py
|
JacopoTeneggi/InnerEye-DeepLearning
|
988d9fa318a19cfd435370248970d976ee2e78b0
|
[
"MIT"
] | 112
|
2020-09-23T00:12:58.000Z
|
2022-03-31T07:39:55.000Z
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from __future__ import annotations
import logging
from enum import Enum
from pathlib import Path
from typing import Any, Dict, Optional
import numpy as np
import torch
from radio import CTImagesMaskedBatch
from radio.batchflow import Dataset, action, inbatch_parallel
from InnerEye.Common.type_annotations import TupleFloat3
from InnerEye.ML import config
from InnerEye.ML.common import ModelExecutionMode
from InnerEye.ML.config import SegmentationModelBase
from InnerEye.ML.lightning_helpers import load_from_checkpoint_and_adjust_for_inference
from InnerEye.ML.lightning_models import SegmentationLightning
from InnerEye.ML.model_config_base import ModelConfigBase
from InnerEye.ML.models.architectures.base_model import BaseSegmentationModel
from InnerEye.ML.utils import image_util, ml_util
from InnerEye.ML.utils.image_util import compute_uncertainty_map_from_posteriors, gaussian_smooth_posteriors, \
posteriors_to_segmentation
class InferencePipelineBase:
"""Base class for all inference pipelines."""
def __init__(self, model_config: ModelConfigBase):
self.model_config = model_config
class FullImageInferencePipelineBase(InferencePipelineBase):
"""
Base Class for full image inference intended to be inherited by inference pipelines
that can perform full image prediction
"""
def __init__(self, model_config: SegmentationModelBase):
super().__init__(model_config)
def predict_and_post_process_whole_image(self, image_channels: np.ndarray,
voxel_spacing_mm: TupleFloat3,
mask: np.ndarray = None,
patient_id: int = 0) -> InferencePipeline.Result:
return self.post_process(self.predict_whole_image(image_channels, voxel_spacing_mm, mask, patient_id))
def predict_whole_image(self, image_channels: np.ndarray,
voxel_spacing_mm: TupleFloat3,
mask: np.ndarray = None,
patient_id: int = 0) -> InferencePipeline.Result:
raise NotImplementedError("Full image inference capability must be implemented by concrete classes")
def post_process(self, results: InferencePipeline.Result) -> InferencePipeline.Result:
"""
Perform connected component analysis to update segmentation with largest
connected component based on the configurations
:param results: inference results to post-process
:return: post-processed version of results
"""
if self.model_config.posterior_smoothing_mm:
posteriors = gaussian_smooth_posteriors(
posteriors=results.posteriors,
kernel_size_mm=self.model_config.posterior_smoothing_mm,
voxel_spacing_mm=results.voxel_spacing_mm
)
results = InferencePipeline.Result(
patient_id=results.patient_id,
posteriors=posteriors,
segmentation=posteriors_to_segmentation(posteriors),
voxel_spacing_mm=results.voxel_spacing_mm
)
if self.model_config.summed_probability_rules and not self.model_config.disable_extra_postprocessing:
assert isinstance(self.model_config, SegmentationModelBase)
results = results.with_new_segmentation(
image_util.apply_summed_probability_rules(self.model_config, results.posteriors, results.segmentation))
if self.model_config.largest_connected_component_foreground_classes is not None:
# get indices for classes to restrict
restrict_class_indices_and_thresholds = []
for name, idx in self.model_config.class_and_index_with_background().items():
for name2, threshold in self.model_config.largest_connected_component_foreground_classes:
if name2 == name:
restrict_class_indices_and_thresholds.append((idx, threshold))
results = results.with_new_segmentation(
image_util.extract_largest_foreground_connected_component(
multi_label_array=results.segmentation,
# mypy gets confused below because List is invariant. Sequence is covariant
# but does not allow "append".
restrictions=restrict_class_indices_and_thresholds)) # type: ignore
if self.model_config.slice_exclusion_rules and not self.model_config.disable_extra_postprocessing:
results = results.with_new_segmentation(
image_util.apply_slice_exclusion_rules(self.model_config, results.segmentation))
return results
class InferencePipeline(FullImageInferencePipelineBase):
"""
Pipeline class for model for whole image inference on ct-images.
"""
# the model output is expected to be a valid probability distribution
MODEL_OUTPUT_POSTERIOR_RANGE = (0, 1)
class Variables(Enum):
"""
Variables associated with the inference pipeline
"""
# an instantiated model to use for inference.
Model = 'model'
# the configuration associated with the model.
ModelConfig = 'model_config'
# the shape of the image required as output from the pipeline.
OutputImageShape = 'output_image_shape'
# A Tuple[int,int,int] with the crop size that should be used. For large images, this will be
# the test_crop_size from the model config, but for smaller images, it will be the componentwise
# minimum of test_crop_size and image_size
CropSize = 'crop_size'
# The stride size to use, possibly adjusted for small images (see above for crop_size)
Stride = 'stride'
# The size of the output tensor that the model will produce when fed with an input tensor that
# has the given crop_size.
OutputSize = 'output_size'
class Result:
"""
Contains the inference results from a single pass of the inference pipeline
"""
def __init__(self,
patient_id: int,
segmentation: np.ndarray,
posteriors: np.ndarray,
voxel_spacing_mm: TupleFloat3):
"""
:param patient_id: The id of the patient instance for with inference is being performed on.
:param segmentation: Z x Y x X (argmaxed over the posteriors in the class dimension)
:param voxel_spacing_mm: Voxel spacing to use for each dimension in (Z x Y x X) order
:param posteriors: Class x Z x Y x X
"""
self.patient_id = patient_id
self.segmentation = segmentation
self.posteriors = posteriors
self.voxel_spacing_mm = voxel_spacing_mm
if len(self.voxel_spacing_mm) != 3:
raise ValueError(f"voxel_spacing_mm must have length 3, found: {voxel_spacing_mm}")
if any(np.array(self.voxel_spacing_mm) <= 0):
raise ValueError(f"voxel_spacing_mm must have values > 0 in each dimension, found: {voxel_spacing_mm}")
ml_util.check_size_matches(self.segmentation,
self.posteriors,
dim1=3,
dim2=4,
matching_dimensions=[-3, -2, -1],
arg1_name="segmentation",
arg2_name="posteriors")
segmentation_value_range = np.unique(self.segmentation)
if not np.all([x in range(self.posteriors.shape[0]) for x in segmentation_value_range]):
raise Exception("values in the segmentation map must be in range [0, classes), "
"found classes:{}, segmentation range:{}"
.format(self.posteriors.shape[0], segmentation_value_range))
self._uncertainty = compute_uncertainty_map_from_posteriors(self.posteriors)
@property
def uncertainty(self) -> np.ndarray:
return self._uncertainty
def with_new_segmentation(self, segmentation: np.ndarray) -> InferencePipeline.Result:
if segmentation.shape != self.segmentation.shape:
raise ValueError(f"Attempt to replace segmentation of shape {self.segmentation.shape} "
f"with one of shape {segmentation.shape}")
return InferencePipeline.Result(
patient_id=self.patient_id,
segmentation=segmentation,
posteriors=self.posteriors,
voxel_spacing_mm=self.voxel_spacing_mm)
def __init__(self, model: SegmentationLightning, model_config: config.SegmentationModelBase,
pipeline_id: int = 0):
super().__init__(model_config)
self.model = model
self.model.model.eval()
self.pipeline_id = pipeline_id
@staticmethod
def create_from_checkpoint(path_to_checkpoint: Path,
model_config: SegmentationModelBase,
pipeline_id: int = 0) -> Optional[InferencePipeline]:
"""
Creates an instance of the inference pipeline for a given epoch from a stored checkpoint.
After loading, the model parameters are checked for NaN and Infinity values.
If there is no checkpoint file for the given epoch, return None.
:param path_to_checkpoint: The path to the checkpoint that we want to load
model_config.checkpoint_folder
:param model_config: Model related configurations.
:param pipeline_id: Numeric identifier for the pipeline (useful for logging when ensembling)
:return InferencePipeline: an instantiated inference pipeline instance, or None if there was no checkpoint
file for this epoch.
"""
if not path_to_checkpoint.is_file():
# not raising a value error here: This is used to create individual pipelines for ensembles,
# possible one model cannot be created but others can
logging.warning(f"Could not recover model from checkpoint path {path_to_checkpoint}")
return None
lightning_model = load_from_checkpoint_and_adjust_for_inference(model_config, path_to_checkpoint)
assert isinstance(lightning_model, SegmentationLightning)
return InferencePipeline(model=lightning_model, model_config=model_config, pipeline_id=pipeline_id)
def predict_whole_image(self, image_channels: np.ndarray,
voxel_spacing_mm: TupleFloat3,
mask: np.ndarray = None,
patient_id: int = 0) -> InferencePipeline.Result:
"""
Performs a single inference pass through the pipeline for the provided image
:param image_channels: The input image channels to perform inference on in format: Channels x Z x Y x X.
:param voxel_spacing_mm: Voxel spacing to use for each dimension in (Z x Y x X) order
:param mask: A binary image used to ignore results outside it in format: Z x Y x X.
:param patient_id: The identifier of the patient this image belongs to (defaults to 0 if None provided).
:return InferenceResult: that contains Segmentation for each of the classes and their posterior probabilities.
"""
if image_channels is None:
raise Exception("image_channels cannot be None")
if image_channels.ndim != 4:
raise NotImplementedError("image_channels must be in shape: Channels x Z x Y x X"
"found image_channels shape: {}".format(image_channels.shape))
if mask is not None:
ml_util.check_size_matches(image_channels, mask, 4, 3, [-1, -2, -3])
self.model.eval()
# create the dataset for the batch
batch_dataset = Dataset(index=[patient_id], batch_class=InferenceBatch)
# setup the pipeline
pipeline = (batch_dataset.p
# define pipeline variables
.init_variables([InferencePipeline.Variables.Model,
InferencePipeline.Variables.ModelConfig,
InferencePipeline.Variables.CropSize,
InferencePipeline.Variables.OutputSize,
InferencePipeline.Variables.OutputImageShape,
InferencePipeline.Variables.Stride])
# update the variables for the batch actions
.update_variable(name=InferencePipeline.Variables.Model, value=self.model)
.update_variable(name=InferencePipeline.Variables.ModelConfig, value=self.model_config)
# perform cascaded batch actions
.load(image_channels=image_channels, mask=mask)
.pre_process()
.predict()
.post_process()
)
# run the batch through the pipeline
logging.info(f"Inference pipeline ({self.pipeline_id}), Predicting patient: {patient_id}")
processed_batch: InferenceBatch = pipeline.next_batch(batch_size=1)
posteriors = processed_batch.get_component(InferenceBatch.Components.Posteriors)
image_util.check_array_range(posteriors, error_prefix="Whole image posteriors")
# prepare pipeline results from the processed batch
return InferencePipeline.Result(
patient_id=patient_id,
segmentation=processed_batch.get_component(InferenceBatch.Components.Segmentation),
posteriors=posteriors,
voxel_spacing_mm=voxel_spacing_mm
)
class InferenceBatch(CTImagesMaskedBatch):
"""
Batch class for IO with the inference pipeline. One instance of a batch will load the image
into the 'images' component of the pipeline, and store the results of the full pass
of the pipeline into the 'segmentation' and 'posteriors' components.
"""
class Components(Enum):
"""
Components associated with the inference batch class
"""
# the input image channels in Channels x Z x Y x X format.
ImageChannels = 'channels'
# a set of 2D image slices (ie: a 3D image channel), stacked in Z x Y x X format.
Images = 'images'
# a binary mask used to ignore predictions in Z x Y x X format.
Mask = 'mask'
# a numpy.ndarray in Z x Y x X format with class labels for each voxel in the original image.
Segmentation = 'segmentation'
# a numpy.ndarray with the first dimension indexing each class in C x Z x Y x X format
# with each Z x Y x X being the same shape as the Images component, and consisting of
# [0, 1] values representing the model confidence for each voxel.
Posteriors = 'posteriors'
def __init__(self, index: int, *args: Any, **kwargs: Any):
super().__init__(index, *args, **kwargs)
self.components = [x.value for x in InferenceBatch.Components]
@action
def load(self, image_channels: np.ndarray, mask: np.ndarray) -> InferenceBatch:
"""
Load image channels and mask into their respective pipeline components.
"""
self.set_component(component=InferenceBatch.Components.ImageChannels, data=image_channels)
model_config = self.get_configs()
if model_config is None:
raise ValueError("model_config is None")
if model_config.test_crop_size is None:
raise ValueError("model_config.test_crop_size is None")
if model_config.inference_stride_size is None:
raise ValueError("model_config.inference_stride_size is None")
# fetch the image channels from the batch
image_channels = self.get_component(InferenceBatch.Components.ImageChannels)
self.pipeline.set_variable(name=InferencePipeline.Variables.OutputImageShape, value=image_channels[0].shape)
# There may be cases where the test image is smaller than the test_crop_size. Adjust crop_size
# to always fit into image. If test_crop_size is smaller than the image, crop will remain unchanged.
image_size = image_channels.shape[1:]
model: BaseSegmentationModel = self.pipeline.get_variable(InferencePipeline.Variables.Model).model
effective_crop, effective_stride = \
model.crop_size_constraints.restrict_crop_size_to_image(image_size,
model_config.test_crop_size,
model_config.inference_stride_size)
self.pipeline.set_variable(name=InferencePipeline.Variables.CropSize, value=effective_crop)
self.pipeline.set_variable(name=InferencePipeline.Variables.Stride, value=effective_stride)
logging.debug(
f"Inference on image size {image_size} will run "
f"with crop size {effective_crop} and stride {effective_stride}")
# In most cases, we will be able to read the output size from the pre-computed values
# via get_output_size. Only if we have a non-standard (smaller) crop size, re-computed the output size.
output_size = model_config.get_output_size(execution_mode=ModelExecutionMode.TEST)
if effective_crop != model_config.test_crop_size:
output_size = model.get_output_shape(input_shape=effective_crop) # type: ignore
self.pipeline.set_variable(name=InferencePipeline.Variables.OutputSize, value=output_size)
if mask is not None:
self.set_component(component=InferenceBatch.Components.Mask, data=mask)
return self
@action
def pre_process(self) -> InferenceBatch:
"""
Prepare the input components of the batch for further processing.
"""
model_config = self.get_configs()
# fetch the image channels from the batch
image_channels = self.get_component(InferenceBatch.Components.ImageChannels)
crop_size = self.pipeline.get_variable(InferencePipeline.Variables.CropSize)
output_size = self.pipeline.get_variable(InferencePipeline.Variables.OutputSize)
image_channels = image_util.pad_images_for_inference(
images=image_channels,
crop_size=crop_size,
output_size=output_size,
padding_mode=model_config.padding_mode
)
# update the post-processed components
self.set_component(component=InferenceBatch.Components.ImageChannels, data=image_channels)
return self
@action
def predict(self) -> InferenceBatch:
"""
Perform a forward pass of the model on the provided image, this generates
a set of posterior maps for each class, as well as a segmentation output
stored in the respective 'posteriors' and 'segmentation' components.
"""
model_config = self.get_configs()
# extract patches for each image channel: Num patches x Channels x Z x Y x X
patches = self._extract_patches_for_image_channels()
# split the generated patches into batches and perform forward passes
predictions = []
batch_size = model_config.inference_batch_size
for batch_idx in range(0, len(patches), batch_size):
# slice over the batches to prepare batch
batch = torch.tensor(patches[batch_idx: batch_idx + batch_size, ...]).float()
if model_config.use_gpu:
batch = batch.cuda()
# perform the forward pass
batch_predictions = self._model_fn(batch).detach().cpu().numpy()
# collect the predictions over each of the batches
predictions.append(batch_predictions)
# map the batched predictions to the original batch shape
# of shape but with an added class dimension: Num patches x Class x Z x Y x X
predictions = np.concatenate(predictions, axis=0)
# create posterior output for each class with the shape: Class x Z x Y x x. We use float32 as these
# arrays can be big.
output_image_shape = self.pipeline.get_variable(InferencePipeline.Variables.OutputImageShape)
posteriors = np.zeros(shape=[model_config.number_of_classes] + list(output_image_shape), dtype=np.float32)
stride = self.pipeline.get_variable(InferencePipeline.Variables.Stride)
for c in range(len(posteriors)):
# stitch the patches for each posterior class
self.load_from_patches(predictions[:, c, ...], # type: ignore
stride=stride,
scan_shape=output_image_shape,
data_attr=InferenceBatch.Components.Posteriors.value)
# extract computed output from the component so the pipeline buffer can be reused
posteriors[c] = self.get_component(InferenceBatch.Components.Posteriors)
# store the stitched up results for the batch
self.set_component(component=InferenceBatch.Components.Posteriors, data=posteriors)
return self
@action
def post_process(self) -> InferenceBatch:
"""
Perform post processing on the computed outputs of the a single pass of the pipelines.
Currently the following operations are performed:
-------------------------------------------------------------------------------------
1) the mask is applied to the posteriors (if required).
2) the final posteriors are used to perform an argmax to generate a multi-label segmentation.
3) extract the largest foreground connected component in the segmentation if required
"""
mask = self.get_component(InferenceBatch.Components.Mask)
posteriors = self.get_component(InferenceBatch.Components.Posteriors)
if mask is not None:
posteriors = image_util.apply_mask_to_posteriors(posteriors=posteriors, mask=mask)
# create segmentation using an argmax over the posterior probabilities
segmentation = image_util.posteriors_to_segmentation(posteriors)
# update the post-processed posteriors and save the segmentation
self.set_component(component=InferenceBatch.Components.Posteriors, data=posteriors)
self.set_component(component=InferenceBatch.Components.Segmentation, data=segmentation)
return self
def get_configs(self) -> config.SegmentationModelBase:
return self.pipeline.get_variable(InferencePipeline.Variables.ModelConfig)
def get_component(self, component: InferenceBatch.Components) -> np.ndarray:
return getattr(self, component.value) if hasattr(self, component.value) else None
@inbatch_parallel(init='indices', post='_post_custom_components', target='threads')
def set_component(self, batch_idx: int, component: InferenceBatch.Components, data: np.ndarray) \
-> Dict[str, Any]:
logging.debug("Updated data in pipeline component: {}, for batch: {}.".format(component.value, batch_idx))
return {
component.value: {'type': component.value, 'data': data}
}
def _extract_patches_for_image_channels(self) -> np.ndarray:
"""
Extracts deterministically, patches from each image channel
:return: Patches for each image channel in format: Num patches x Channels x Z x Y x X
"""
model_config = self.get_configs()
image_channels = self.get_component(InferenceBatch.Components.ImageChannels)
# There may be cases where the test image is smaller than the test_crop_size. Adjust crop_size
# to always fit into image, and adjust stride accordingly. If test_crop_size is smaller than the
# image, crop and stride will remain unchanged.
crop_size = self.pipeline.get_variable(InferencePipeline.Variables.CropSize)
stride = self.pipeline.get_variable(InferencePipeline.Variables.Stride)
patches = []
for channel_index, channel in enumerate(image_channels):
# set the current image channel component to process
self.set_component(component=InferenceBatch.Components.Images, data=channel)
channel_patches = self.get_patches(patch_shape=crop_size,
stride=stride,
padding=model_config.padding_mode.value,
data_attr=InferenceBatch.Components.Images.value)
logging.debug(
f"Image channel {channel_index}: Tensor with extracted patches has size {channel_patches.shape}")
patches.append(channel_patches)
# reset the images component
self.set_component(component=InferenceBatch.Components.Images, data=[])
return np.stack(patches, axis=1)
def _model_fn(self, patches: torch.Tensor) -> torch.Tensor:
"""
Wrapper function to handle the model forward pass
:param patches: Image patches to be passed to the model in format Patches x Channels x Z x Y x X
:return posteriors: Confidence maps [0,1] for each patch per class
in format: Patches x Channels x Class x Z x Y x X
"""
model = self.pipeline.get_variable(InferencePipeline.Variables.Model)
# Model forward pass returns posteriors
with torch.no_grad():
return model(patches)
| 52.537223
| 119
| 0.655547
|
from __future__ import annotations
import logging
from enum import Enum
from pathlib import Path
from typing import Any, Dict, Optional
import numpy as np
import torch
from radio import CTImagesMaskedBatch
from radio.batchflow import Dataset, action, inbatch_parallel
from InnerEye.Common.type_annotations import TupleFloat3
from InnerEye.ML import config
from InnerEye.ML.common import ModelExecutionMode
from InnerEye.ML.config import SegmentationModelBase
from InnerEye.ML.lightning_helpers import load_from_checkpoint_and_adjust_for_inference
from InnerEye.ML.lightning_models import SegmentationLightning
from InnerEye.ML.model_config_base import ModelConfigBase
from InnerEye.ML.models.architectures.base_model import BaseSegmentationModel
from InnerEye.ML.utils import image_util, ml_util
from InnerEye.ML.utils.image_util import compute_uncertainty_map_from_posteriors, gaussian_smooth_posteriors, \
posteriors_to_segmentation
class InferencePipelineBase:
def __init__(self, model_config: ModelConfigBase):
self.model_config = model_config
class FullImageInferencePipelineBase(InferencePipelineBase):
def __init__(self, model_config: SegmentationModelBase):
super().__init__(model_config)
def predict_and_post_process_whole_image(self, image_channels: np.ndarray,
voxel_spacing_mm: TupleFloat3,
mask: np.ndarray = None,
patient_id: int = 0) -> InferencePipeline.Result:
return self.post_process(self.predict_whole_image(image_channels, voxel_spacing_mm, mask, patient_id))
def predict_whole_image(self, image_channels: np.ndarray,
voxel_spacing_mm: TupleFloat3,
mask: np.ndarray = None,
patient_id: int = 0) -> InferencePipeline.Result:
raise NotImplementedError("Full image inference capability must be implemented by concrete classes")
def post_process(self, results: InferencePipeline.Result) -> InferencePipeline.Result:
if self.model_config.posterior_smoothing_mm:
posteriors = gaussian_smooth_posteriors(
posteriors=results.posteriors,
kernel_size_mm=self.model_config.posterior_smoothing_mm,
voxel_spacing_mm=results.voxel_spacing_mm
)
results = InferencePipeline.Result(
patient_id=results.patient_id,
posteriors=posteriors,
segmentation=posteriors_to_segmentation(posteriors),
voxel_spacing_mm=results.voxel_spacing_mm
)
if self.model_config.summed_probability_rules and not self.model_config.disable_extra_postprocessing:
assert isinstance(self.model_config, SegmentationModelBase)
results = results.with_new_segmentation(
image_util.apply_summed_probability_rules(self.model_config, results.posteriors, results.segmentation))
if self.model_config.largest_connected_component_foreground_classes is not None:
restrict_class_indices_and_thresholds = []
for name, idx in self.model_config.class_and_index_with_background().items():
for name2, threshold in self.model_config.largest_connected_component_foreground_classes:
if name2 == name:
restrict_class_indices_and_thresholds.append((idx, threshold))
results = results.with_new_segmentation(
image_util.extract_largest_foreground_connected_component(
multi_label_array=results.segmentation,
restrictions=restrict_class_indices_and_thresholds))
if self.model_config.slice_exclusion_rules and not self.model_config.disable_extra_postprocessing:
results = results.with_new_segmentation(
image_util.apply_slice_exclusion_rules(self.model_config, results.segmentation))
return results
class InferencePipeline(FullImageInferencePipelineBase):
MODEL_OUTPUT_POSTERIOR_RANGE = (0, 1)
class Variables(Enum):
Model = 'model'
ModelConfig = 'model_config'
OutputImageShape = 'output_image_shape'
CropSize = 'crop_size'
Stride = 'stride'
OutputSize = 'output_size'
class Result:
def __init__(self,
patient_id: int,
segmentation: np.ndarray,
posteriors: np.ndarray,
voxel_spacing_mm: TupleFloat3):
self.patient_id = patient_id
self.segmentation = segmentation
self.posteriors = posteriors
self.voxel_spacing_mm = voxel_spacing_mm
if len(self.voxel_spacing_mm) != 3:
raise ValueError(f"voxel_spacing_mm must have length 3, found: {voxel_spacing_mm}")
if any(np.array(self.voxel_spacing_mm) <= 0):
raise ValueError(f"voxel_spacing_mm must have values > 0 in each dimension, found: {voxel_spacing_mm}")
ml_util.check_size_matches(self.segmentation,
self.posteriors,
dim1=3,
dim2=4,
matching_dimensions=[-3, -2, -1],
arg1_name="segmentation",
arg2_name="posteriors")
segmentation_value_range = np.unique(self.segmentation)
if not np.all([x in range(self.posteriors.shape[0]) for x in segmentation_value_range]):
raise Exception("values in the segmentation map must be in range [0, classes), "
"found classes:{}, segmentation range:{}"
.format(self.posteriors.shape[0], segmentation_value_range))
self._uncertainty = compute_uncertainty_map_from_posteriors(self.posteriors)
@property
def uncertainty(self) -> np.ndarray:
return self._uncertainty
def with_new_segmentation(self, segmentation: np.ndarray) -> InferencePipeline.Result:
if segmentation.shape != self.segmentation.shape:
raise ValueError(f"Attempt to replace segmentation of shape {self.segmentation.shape} "
f"with one of shape {segmentation.shape}")
return InferencePipeline.Result(
patient_id=self.patient_id,
segmentation=segmentation,
posteriors=self.posteriors,
voxel_spacing_mm=self.voxel_spacing_mm)
def __init__(self, model: SegmentationLightning, model_config: config.SegmentationModelBase,
pipeline_id: int = 0):
super().__init__(model_config)
self.model = model
self.model.model.eval()
self.pipeline_id = pipeline_id
@staticmethod
def create_from_checkpoint(path_to_checkpoint: Path,
model_config: SegmentationModelBase,
pipeline_id: int = 0) -> Optional[InferencePipeline]:
if not path_to_checkpoint.is_file():
logging.warning(f"Could not recover model from checkpoint path {path_to_checkpoint}")
return None
lightning_model = load_from_checkpoint_and_adjust_for_inference(model_config, path_to_checkpoint)
assert isinstance(lightning_model, SegmentationLightning)
return InferencePipeline(model=lightning_model, model_config=model_config, pipeline_id=pipeline_id)
def predict_whole_image(self, image_channels: np.ndarray,
voxel_spacing_mm: TupleFloat3,
mask: np.ndarray = None,
patient_id: int = 0) -> InferencePipeline.Result:
if image_channels is None:
raise Exception("image_channels cannot be None")
if image_channels.ndim != 4:
raise NotImplementedError("image_channels must be in shape: Channels x Z x Y x X"
"found image_channels shape: {}".format(image_channels.shape))
if mask is not None:
ml_util.check_size_matches(image_channels, mask, 4, 3, [-1, -2, -3])
self.model.eval()
batch_dataset = Dataset(index=[patient_id], batch_class=InferenceBatch)
pipeline = (batch_dataset.p
.init_variables([InferencePipeline.Variables.Model,
InferencePipeline.Variables.ModelConfig,
InferencePipeline.Variables.CropSize,
InferencePipeline.Variables.OutputSize,
InferencePipeline.Variables.OutputImageShape,
InferencePipeline.Variables.Stride])
.update_variable(name=InferencePipeline.Variables.Model, value=self.model)
.update_variable(name=InferencePipeline.Variables.ModelConfig, value=self.model_config)
.load(image_channels=image_channels, mask=mask)
.pre_process()
.predict()
.post_process()
)
logging.info(f"Inference pipeline ({self.pipeline_id}), Predicting patient: {patient_id}")
processed_batch: InferenceBatch = pipeline.next_batch(batch_size=1)
posteriors = processed_batch.get_component(InferenceBatch.Components.Posteriors)
image_util.check_array_range(posteriors, error_prefix="Whole image posteriors")
return InferencePipeline.Result(
patient_id=patient_id,
segmentation=processed_batch.get_component(InferenceBatch.Components.Segmentation),
posteriors=posteriors,
voxel_spacing_mm=voxel_spacing_mm
)
class InferenceBatch(CTImagesMaskedBatch):
class Components(Enum):
ImageChannels = 'channels'
Images = 'images'
Mask = 'mask'
Segmentation = 'segmentation'
Posteriors = 'posteriors'
def __init__(self, index: int, *args: Any, **kwargs: Any):
super().__init__(index, *args, **kwargs)
self.components = [x.value for x in InferenceBatch.Components]
@action
def load(self, image_channels: np.ndarray, mask: np.ndarray) -> InferenceBatch:
self.set_component(component=InferenceBatch.Components.ImageChannels, data=image_channels)
model_config = self.get_configs()
if model_config is None:
raise ValueError("model_config is None")
if model_config.test_crop_size is None:
raise ValueError("model_config.test_crop_size is None")
if model_config.inference_stride_size is None:
raise ValueError("model_config.inference_stride_size is None")
image_channels = self.get_component(InferenceBatch.Components.ImageChannels)
self.pipeline.set_variable(name=InferencePipeline.Variables.OutputImageShape, value=image_channels[0].shape)
image_size = image_channels.shape[1:]
model: BaseSegmentationModel = self.pipeline.get_variable(InferencePipeline.Variables.Model).model
effective_crop, effective_stride = \
model.crop_size_constraints.restrict_crop_size_to_image(image_size,
model_config.test_crop_size,
model_config.inference_stride_size)
self.pipeline.set_variable(name=InferencePipeline.Variables.CropSize, value=effective_crop)
self.pipeline.set_variable(name=InferencePipeline.Variables.Stride, value=effective_stride)
logging.debug(
f"Inference on image size {image_size} will run "
f"with crop size {effective_crop} and stride {effective_stride}")
output_size = model_config.get_output_size(execution_mode=ModelExecutionMode.TEST)
if effective_crop != model_config.test_crop_size:
output_size = model.get_output_shape(input_shape=effective_crop)
self.pipeline.set_variable(name=InferencePipeline.Variables.OutputSize, value=output_size)
if mask is not None:
self.set_component(component=InferenceBatch.Components.Mask, data=mask)
return self
@action
def pre_process(self) -> InferenceBatch:
model_config = self.get_configs()
image_channels = self.get_component(InferenceBatch.Components.ImageChannels)
crop_size = self.pipeline.get_variable(InferencePipeline.Variables.CropSize)
output_size = self.pipeline.get_variable(InferencePipeline.Variables.OutputSize)
image_channels = image_util.pad_images_for_inference(
images=image_channels,
crop_size=crop_size,
output_size=output_size,
padding_mode=model_config.padding_mode
)
self.set_component(component=InferenceBatch.Components.ImageChannels, data=image_channels)
return self
@action
def predict(self) -> InferenceBatch:
model_config = self.get_configs()
patches = self._extract_patches_for_image_channels()
predictions = []
batch_size = model_config.inference_batch_size
for batch_idx in range(0, len(patches), batch_size):
batch = torch.tensor(patches[batch_idx: batch_idx + batch_size, ...]).float()
if model_config.use_gpu:
batch = batch.cuda()
batch_predictions = self._model_fn(batch).detach().cpu().numpy()
predictions.append(batch_predictions)
predictions = np.concatenate(predictions, axis=0)
output_image_shape = self.pipeline.get_variable(InferencePipeline.Variables.OutputImageShape)
posteriors = np.zeros(shape=[model_config.number_of_classes] + list(output_image_shape), dtype=np.float32)
stride = self.pipeline.get_variable(InferencePipeline.Variables.Stride)
for c in range(len(posteriors)):
self.load_from_patches(predictions[:, c, ...],
stride=stride,
scan_shape=output_image_shape,
data_attr=InferenceBatch.Components.Posteriors.value)
posteriors[c] = self.get_component(InferenceBatch.Components.Posteriors)
self.set_component(component=InferenceBatch.Components.Posteriors, data=posteriors)
return self
@action
def post_process(self) -> InferenceBatch:
mask = self.get_component(InferenceBatch.Components.Mask)
posteriors = self.get_component(InferenceBatch.Components.Posteriors)
if mask is not None:
posteriors = image_util.apply_mask_to_posteriors(posteriors=posteriors, mask=mask)
segmentation = image_util.posteriors_to_segmentation(posteriors)
self.set_component(component=InferenceBatch.Components.Posteriors, data=posteriors)
self.set_component(component=InferenceBatch.Components.Segmentation, data=segmentation)
return self
def get_configs(self) -> config.SegmentationModelBase:
return self.pipeline.get_variable(InferencePipeline.Variables.ModelConfig)
def get_component(self, component: InferenceBatch.Components) -> np.ndarray:
return getattr(self, component.value) if hasattr(self, component.value) else None
@inbatch_parallel(init='indices', post='_post_custom_components', target='threads')
def set_component(self, batch_idx: int, component: InferenceBatch.Components, data: np.ndarray) \
-> Dict[str, Any]:
logging.debug("Updated data in pipeline component: {}, for batch: {}.".format(component.value, batch_idx))
return {
component.value: {'type': component.value, 'data': data}
}
def _extract_patches_for_image_channels(self) -> np.ndarray:
model_config = self.get_configs()
image_channels = self.get_component(InferenceBatch.Components.ImageChannels)
crop_size = self.pipeline.get_variable(InferencePipeline.Variables.CropSize)
stride = self.pipeline.get_variable(InferencePipeline.Variables.Stride)
patches = []
for channel_index, channel in enumerate(image_channels):
self.set_component(component=InferenceBatch.Components.Images, data=channel)
channel_patches = self.get_patches(patch_shape=crop_size,
stride=stride,
padding=model_config.padding_mode.value,
data_attr=InferenceBatch.Components.Images.value)
logging.debug(
f"Image channel {channel_index}: Tensor with extracted patches has size {channel_patches.shape}")
patches.append(channel_patches)
self.set_component(component=InferenceBatch.Components.Images, data=[])
return np.stack(patches, axis=1)
def _model_fn(self, patches: torch.Tensor) -> torch.Tensor:
model = self.pipeline.get_variable(InferencePipeline.Variables.Model)
with torch.no_grad():
return model(patches)
| true
| true
|
f7099b9af35d02543cd8c55b0aed90da402ab7ff
| 6,986
|
py
|
Python
|
qualcoder/GUI/ui_dialog_report_compare_coder_file.py
|
ericbrasiln/QualCoder
|
46108a0e43034bdeed77319bb09dc1a3227a8c3a
|
[
"MIT"
] | null | null | null |
qualcoder/GUI/ui_dialog_report_compare_coder_file.py
|
ericbrasiln/QualCoder
|
46108a0e43034bdeed77319bb09dc1a3227a8c3a
|
[
"MIT"
] | null | null | null |
qualcoder/GUI/ui_dialog_report_compare_coder_file.py
|
ericbrasiln/QualCoder
|
46108a0e43034bdeed77319bb09dc1a3227a8c3a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_dialog_report_compare_coder_file.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog_reportCompareCoderFile(object):
def setupUi(self, Dialog_reportCompareCoderFile):
Dialog_reportCompareCoderFile.setObjectName("Dialog_reportCompareCoderFile")
Dialog_reportCompareCoderFile.setWindowModality(QtCore.Qt.NonModal)
Dialog_reportCompareCoderFile.resize(989, 580)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog_reportCompareCoderFile)
self.verticalLayout.setContentsMargins(1, 1, 1, 1)
self.verticalLayout.setSpacing(1)
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox = QtWidgets.QGroupBox(Dialog_reportCompareCoderFile)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setMinimumSize(QtCore.QSize(0, 120))
self.groupBox.setMaximumSize(QtCore.QSize(16777215, 120))
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setGeometry(QtCore.QRect(10, 20, 101, 22))
self.label_2.setObjectName("label_2")
self.comboBox_coders = QtWidgets.QComboBox(self.groupBox)
self.comboBox_coders.setGeometry(QtCore.QRect(112, 20, 211, 28))
self.comboBox_coders.setObjectName("comboBox_coders")
self.label_title = QtWidgets.QLabel(self.groupBox)
self.label_title.setGeometry(QtCore.QRect(10, -2, 291, 22))
self.label_title.setObjectName("label_title")
self.label_matrix = QtWidgets.QLabel(self.groupBox)
self.label_matrix.setGeometry(QtCore.QRect(600, 20, 30, 30))
self.label_matrix.setText("")
self.label_matrix.setObjectName("label_matrix")
self.label_memos = QtWidgets.QLabel(self.groupBox)
self.label_memos.setGeometry(QtCore.QRect(600, 70, 30, 30))
self.label_memos.setText("")
self.label_memos.setObjectName("label_memos")
self.label_selections = QtWidgets.QLabel(self.groupBox)
self.label_selections.setGeometry(QtCore.QRect(330, 20, 611, 28))
self.label_selections.setObjectName("label_selections")
self.pushButton_clear = QtWidgets.QPushButton(self.groupBox)
self.pushButton_clear.setGeometry(QtCore.QRect(50, 60, 32, 32))
self.pushButton_clear.setText("")
self.pushButton_clear.setObjectName("pushButton_clear")
self.pushButton_export_odt = QtWidgets.QPushButton(self.groupBox)
self.pushButton_export_odt.setGeometry(QtCore.QRect(90, 60, 32, 32))
self.pushButton_export_odt.setText("")
self.pushButton_export_odt.setObjectName("pushButton_export_odt")
self.pushButton_run = QtWidgets.QPushButton(self.groupBox)
self.pushButton_run.setGeometry(QtCore.QRect(10, 60, 32, 32))
self.pushButton_run.setText("")
self.pushButton_run.setObjectName("pushButton_run")
self.pushButton_help1 = QtWidgets.QPushButton(self.groupBox)
self.pushButton_help1.setGeometry(QtCore.QRect(130, 60, 32, 32))
self.pushButton_help1.setText("")
self.pushButton_help1.setObjectName("pushButton_help1")
self.verticalLayout.addWidget(self.groupBox)
self.groupBox_2 = QtWidgets.QGroupBox(Dialog_reportCompareCoderFile)
self.groupBox_2.setTitle("")
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.splitter = QtWidgets.QSplitter(self.groupBox_2)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.splitter_vert = QtWidgets.QSplitter(self.splitter)
self.splitter_vert.setOrientation(QtCore.Qt.Vertical)
self.splitter_vert.setObjectName("splitter_vert")
self.listWidget_files = QtWidgets.QListWidget(self.splitter_vert)
self.listWidget_files.setObjectName("listWidget_files")
self.treeWidget = QtWidgets.QTreeWidget(self.splitter_vert)
self.treeWidget.setObjectName("treeWidget")
self.treeWidget.headerItem().setText(0, "Code Tree")
self.textEdit = QtWidgets.QTextEdit(self.splitter)
self.textEdit.setObjectName("textEdit")
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox_2)
self.retranslateUi(Dialog_reportCompareCoderFile)
QtCore.QMetaObject.connectSlotsByName(Dialog_reportCompareCoderFile)
Dialog_reportCompareCoderFile.setTabOrder(self.comboBox_coders, self.treeWidget)
Dialog_reportCompareCoderFile.setTabOrder(self.treeWidget, self.textEdit)
def retranslateUi(self, Dialog_reportCompareCoderFile):
_translate = QtCore.QCoreApplication.translate
Dialog_reportCompareCoderFile.setWindowTitle(_translate("Dialog_reportCompareCoderFile", "Reports"))
self.label_2.setText(_translate("Dialog_reportCompareCoderFile", "Coders:"))
self.label_title.setToolTip(_translate("Dialog_reportCompareCoderFile", "To compare coding.\n"
"Select two coders, one file, one code."))
self.label_title.setText(_translate("Dialog_reportCompareCoderFile", "Coder comparisons by file"))
self.label_matrix.setToolTip(_translate("Dialog_reportCompareCoderFile", "<html><head/><body><p>Matrix options</p></body></html>"))
self.label_memos.setToolTip(_translate("Dialog_reportCompareCoderFile", "Memo reporting options"))
self.label_selections.setText(_translate("Dialog_reportCompareCoderFile", "Coders selected"))
self.pushButton_clear.setToolTip(_translate("Dialog_reportCompareCoderFile", "<html><head/><body><p>Clear selection</p></body></html>"))
self.pushButton_export_odt.setToolTip(_translate("Dialog_reportCompareCoderFile", "Export ODT file"))
self.pushButton_run.setToolTip(_translate("Dialog_reportCompareCoderFile", "<html><head/><body><p>Run comparison</p></body></html>"))
self.pushButton_help1.setToolTip(_translate("Dialog_reportCompareCoderFile", "Statistics explanation"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog_reportCompareCoderFile = QtWidgets.QDialog()
ui = Ui_Dialog_reportCompareCoderFile()
ui.setupUi(Dialog_reportCompareCoderFile)
Dialog_reportCompareCoderFile.show()
sys.exit(app.exec_())
| 57.735537
| 144
| 0.73862
|
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog_reportCompareCoderFile(object):
def setupUi(self, Dialog_reportCompareCoderFile):
Dialog_reportCompareCoderFile.setObjectName("Dialog_reportCompareCoderFile")
Dialog_reportCompareCoderFile.setWindowModality(QtCore.Qt.NonModal)
Dialog_reportCompareCoderFile.resize(989, 580)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog_reportCompareCoderFile)
self.verticalLayout.setContentsMargins(1, 1, 1, 1)
self.verticalLayout.setSpacing(1)
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox = QtWidgets.QGroupBox(Dialog_reportCompareCoderFile)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setMinimumSize(QtCore.QSize(0, 120))
self.groupBox.setMaximumSize(QtCore.QSize(16777215, 120))
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setGeometry(QtCore.QRect(10, 20, 101, 22))
self.label_2.setObjectName("label_2")
self.comboBox_coders = QtWidgets.QComboBox(self.groupBox)
self.comboBox_coders.setGeometry(QtCore.QRect(112, 20, 211, 28))
self.comboBox_coders.setObjectName("comboBox_coders")
self.label_title = QtWidgets.QLabel(self.groupBox)
self.label_title.setGeometry(QtCore.QRect(10, -2, 291, 22))
self.label_title.setObjectName("label_title")
self.label_matrix = QtWidgets.QLabel(self.groupBox)
self.label_matrix.setGeometry(QtCore.QRect(600, 20, 30, 30))
self.label_matrix.setText("")
self.label_matrix.setObjectName("label_matrix")
self.label_memos = QtWidgets.QLabel(self.groupBox)
self.label_memos.setGeometry(QtCore.QRect(600, 70, 30, 30))
self.label_memos.setText("")
self.label_memos.setObjectName("label_memos")
self.label_selections = QtWidgets.QLabel(self.groupBox)
self.label_selections.setGeometry(QtCore.QRect(330, 20, 611, 28))
self.label_selections.setObjectName("label_selections")
self.pushButton_clear = QtWidgets.QPushButton(self.groupBox)
self.pushButton_clear.setGeometry(QtCore.QRect(50, 60, 32, 32))
self.pushButton_clear.setText("")
self.pushButton_clear.setObjectName("pushButton_clear")
self.pushButton_export_odt = QtWidgets.QPushButton(self.groupBox)
self.pushButton_export_odt.setGeometry(QtCore.QRect(90, 60, 32, 32))
self.pushButton_export_odt.setText("")
self.pushButton_export_odt.setObjectName("pushButton_export_odt")
self.pushButton_run = QtWidgets.QPushButton(self.groupBox)
self.pushButton_run.setGeometry(QtCore.QRect(10, 60, 32, 32))
self.pushButton_run.setText("")
self.pushButton_run.setObjectName("pushButton_run")
self.pushButton_help1 = QtWidgets.QPushButton(self.groupBox)
self.pushButton_help1.setGeometry(QtCore.QRect(130, 60, 32, 32))
self.pushButton_help1.setText("")
self.pushButton_help1.setObjectName("pushButton_help1")
self.verticalLayout.addWidget(self.groupBox)
self.groupBox_2 = QtWidgets.QGroupBox(Dialog_reportCompareCoderFile)
self.groupBox_2.setTitle("")
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.splitter = QtWidgets.QSplitter(self.groupBox_2)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.splitter_vert = QtWidgets.QSplitter(self.splitter)
self.splitter_vert.setOrientation(QtCore.Qt.Vertical)
self.splitter_vert.setObjectName("splitter_vert")
self.listWidget_files = QtWidgets.QListWidget(self.splitter_vert)
self.listWidget_files.setObjectName("listWidget_files")
self.treeWidget = QtWidgets.QTreeWidget(self.splitter_vert)
self.treeWidget.setObjectName("treeWidget")
self.treeWidget.headerItem().setText(0, "Code Tree")
self.textEdit = QtWidgets.QTextEdit(self.splitter)
self.textEdit.setObjectName("textEdit")
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox_2)
self.retranslateUi(Dialog_reportCompareCoderFile)
QtCore.QMetaObject.connectSlotsByName(Dialog_reportCompareCoderFile)
Dialog_reportCompareCoderFile.setTabOrder(self.comboBox_coders, self.treeWidget)
Dialog_reportCompareCoderFile.setTabOrder(self.treeWidget, self.textEdit)
def retranslateUi(self, Dialog_reportCompareCoderFile):
_translate = QtCore.QCoreApplication.translate
Dialog_reportCompareCoderFile.setWindowTitle(_translate("Dialog_reportCompareCoderFile", "Reports"))
self.label_2.setText(_translate("Dialog_reportCompareCoderFile", "Coders:"))
self.label_title.setToolTip(_translate("Dialog_reportCompareCoderFile", "To compare coding.\n"
"Select two coders, one file, one code."))
self.label_title.setText(_translate("Dialog_reportCompareCoderFile", "Coder comparisons by file"))
self.label_matrix.setToolTip(_translate("Dialog_reportCompareCoderFile", "<html><head/><body><p>Matrix options</p></body></html>"))
self.label_memos.setToolTip(_translate("Dialog_reportCompareCoderFile", "Memo reporting options"))
self.label_selections.setText(_translate("Dialog_reportCompareCoderFile", "Coders selected"))
self.pushButton_clear.setToolTip(_translate("Dialog_reportCompareCoderFile", "<html><head/><body><p>Clear selection</p></body></html>"))
self.pushButton_export_odt.setToolTip(_translate("Dialog_reportCompareCoderFile", "Export ODT file"))
self.pushButton_run.setToolTip(_translate("Dialog_reportCompareCoderFile", "<html><head/><body><p>Run comparison</p></body></html>"))
self.pushButton_help1.setToolTip(_translate("Dialog_reportCompareCoderFile", "Statistics explanation"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog_reportCompareCoderFile = QtWidgets.QDialog()
ui = Ui_Dialog_reportCompareCoderFile()
ui.setupUi(Dialog_reportCompareCoderFile)
Dialog_reportCompareCoderFile.show()
sys.exit(app.exec_())
| true
| true
|
f7099beab38b1d30053511639c2dc6a1ef187290
| 1,884
|
py
|
Python
|
app/misc/modular.py
|
Cicadadenis/999
|
f1de12723c89d77fc4e020ba9343289665330776
|
[
"MIT"
] | null | null | null |
app/misc/modular.py
|
Cicadadenis/999
|
f1de12723c89d77fc4e020ba9343289665330776
|
[
"MIT"
] | null | null | null |
app/misc/modular.py
|
Cicadadenis/999
|
f1de12723c89d77fc4e020ba9343289665330776
|
[
"MIT"
] | null | null | null |
import glob
import logging
from importlib import import_module
from os.path import basename, isdir, isfile
from pathlib import Path
from aiogram import Dispatcher
class ModuleManager:
def __init__(self, dp: Dispatcher):
self.dp = dp
self.root = Path(__file__).parent.parent
def load_path(self, path: str):
mod_paths = glob.glob(f"{self.root}/{path}/*.py")
all_modules = [
basename(module)[:-3]
for module in mod_paths
if isfile(module) and module.endswith(".py")
]
for module in all_modules:
self.load(path.replace("/", ".") + f".{module}")
def load(self, module: str):
try:
imp_module = import_module("app." + module)
except ModuleNotFoundError:
logging.error(f"Module <{module}> was not found.")
raise SystemExit()
if not hasattr(imp_module, "setup"):
logging.error(f"Module <{module}> doesn't have <setup>.")
raise SystemExit()
if not callable(imp_module.setup):
logging.error(f"Module <{module}> doesn't have callable <setup>.")
raise SystemExit()
try:
imp_module.setup(self.dp)
except Exception as error:
logging.exception(f"An error occured in <{module}>: {error}")
raise SystemExit()
logging.debug(f"Module <{module}> was loaded.")
return module
def load_all(self, modules: list):
"""
Iterates through modules and loads them.
"""
for module in modules:
# Shortcut for %module%.__init__
if module.startswith("$"):
self.load(f"{module[1:]}.__init__")
elif isdir(f"{self.root}/{module}/"):
self.load_path(module)
else:
self.load(module)
| 26.914286
| 78
| 0.565817
|
import glob
import logging
from importlib import import_module
from os.path import basename, isdir, isfile
from pathlib import Path
from aiogram import Dispatcher
class ModuleManager:
def __init__(self, dp: Dispatcher):
self.dp = dp
self.root = Path(__file__).parent.parent
def load_path(self, path: str):
mod_paths = glob.glob(f"{self.root}/{path}/*.py")
all_modules = [
basename(module)[:-3]
for module in mod_paths
if isfile(module) and module.endswith(".py")
]
for module in all_modules:
self.load(path.replace("/", ".") + f".{module}")
def load(self, module: str):
try:
imp_module = import_module("app." + module)
except ModuleNotFoundError:
logging.error(f"Module <{module}> was not found.")
raise SystemExit()
if not hasattr(imp_module, "setup"):
logging.error(f"Module <{module}> doesn't have <setup>.")
raise SystemExit()
if not callable(imp_module.setup):
logging.error(f"Module <{module}> doesn't have callable <setup>.")
raise SystemExit()
try:
imp_module.setup(self.dp)
except Exception as error:
logging.exception(f"An error occured in <{module}>: {error}")
raise SystemExit()
logging.debug(f"Module <{module}> was loaded.")
return module
def load_all(self, modules: list):
for module in modules:
if module.startswith("$"):
self.load(f"{module[1:]}.__init__")
elif isdir(f"{self.root}/{module}/"):
self.load_path(module)
else:
self.load(module)
| true
| true
|
f7099c4ecec5f13d588cfea1db3c144e28b6d645
| 1,887
|
py
|
Python
|
entities/entity-processor.py
|
surma-dump/html-build
|
eeeeae3624cc7ee6733a0c5f9d077546a8b81e90
|
[
"CC-BY-4.0"
] | 69
|
2015-09-06T14:33:32.000Z
|
2022-02-16T03:17:39.000Z
|
entities/entity-processor.py
|
surma-dump/html-build
|
eeeeae3624cc7ee6733a0c5f9d077546a8b81e90
|
[
"CC-BY-4.0"
] | 186
|
2015-08-31T08:10:56.000Z
|
2022-03-16T17:11:57.000Z
|
entities/entity-processor.py
|
surma-dump/html-build
|
eeeeae3624cc7ee6733a0c5f9d077546a8b81e90
|
[
"CC-BY-4.0"
] | 72
|
2015-08-28T03:36:52.000Z
|
2022-03-13T21:27:13.000Z
|
import xml.dom.minidom
import sys
# this uses 658 MB
document = xml.dom.minidom.parse(sys.stdin)
sets = []
entities = {}
for group in document.getElementsByTagName('group'):
if (group.getAttribute('name') == 'html5' or group.getAttribute('name') == 'mathml'):
for set in group.getElementsByTagName('set'):
sets.append(set.getAttribute('name'))
for entity in document.getElementsByTagName('entity'):
assert entity.parentNode.tagName == 'character'
assert entity.hasAttribute('set')
set = entity.getAttribute('set')
if (set in sets):
assert entity.hasAttribute('id')
name = entity.getAttribute('id')
assert len(name) > 0
assert entity.parentNode.hasAttribute('id')
value = entity.parentNode.getAttribute('id')
assert name not in entities or entities[name] == value, '(name: ' + name + ' old value: ' + entities[name] + ' new value: ' + value + ')'
if (name not in entities):
entities[name] = value
if ('-' in value):
value1 = value[1:6];
value2 = value[7:];
glyph = '<span data-x="" class="glyph compound">&#x' + value1 + ';&#x' + value2 + ';</span>'
print(' <tr id="entity-' + name + '"> <td> <code data-x="">' + name + ';</code> </td> <td> U+' + value1 + ' U+' + value2 + ' </td> <td> ' + glyph + ' </td> </tr>');
else:
if (value[1:] in ['020DC', '00311', '020DB', '020DB']):
glyph = '<span data-x="" class="glyph composition">◌' + '&#x' + value[1:] + ';</span>'
elif ('00000' < value[1:] < '00020'):
glyph = '<span data-x="" class="glyph control">$' + value[4:] + ';</span>'
else:
glyph = '<span data-x="" class="glyph">&#x' + value[1:] + ';</span>'
print(' <tr id="entity-' + name + '"> <td> <code data-x="">' + name + ';</code> </td> <td> U+' + value[1:] + ' </td> <td> ' + glyph + ' </td> </tr>');
| 46.02439
| 176
| 0.559618
|
import xml.dom.minidom
import sys
document = xml.dom.minidom.parse(sys.stdin)
sets = []
entities = {}
for group in document.getElementsByTagName('group'):
if (group.getAttribute('name') == 'html5' or group.getAttribute('name') == 'mathml'):
for set in group.getElementsByTagName('set'):
sets.append(set.getAttribute('name'))
for entity in document.getElementsByTagName('entity'):
assert entity.parentNode.tagName == 'character'
assert entity.hasAttribute('set')
set = entity.getAttribute('set')
if (set in sets):
assert entity.hasAttribute('id')
name = entity.getAttribute('id')
assert len(name) > 0
assert entity.parentNode.hasAttribute('id')
value = entity.parentNode.getAttribute('id')
assert name not in entities or entities[name] == value, '(name: ' + name + ' old value: ' + entities[name] + ' new value: ' + value + ')'
if (name not in entities):
entities[name] = value
if ('-' in value):
value1 = value[1:6];
value2 = value[7:];
glyph = '<span data-x="" class="glyph compound">&#x' + value1 + ';&#x' + value2 + ';</span>'
print(' <tr id="entity-' + name + '"> <td> <code data-x="">' + name + ';</code> </td> <td> U+' + value1 + ' U+' + value2 + ' </td> <td> ' + glyph + ' </td> </tr>');
else:
if (value[1:] in ['020DC', '00311', '020DB', '020DB']):
glyph = '<span data-x="" class="glyph composition">◌' + '&#x' + value[1:] + ';</span>'
elif ('00000' < value[1:] < '00020'):
glyph = '<span data-x="" class="glyph control">$' + value[4:] + ';</span>'
else:
glyph = '<span data-x="" class="glyph">&#x' + value[1:] + ';</span>'
print(' <tr id="entity-' + name + '"> <td> <code data-x="">' + name + ';</code> </td> <td> U+' + value[1:] + ' </td> <td> ' + glyph + ' </td> </tr>');
| true
| true
|
f7099c93ad2bd1e3a69de77ee2572adef4df10e2
| 1,154
|
py
|
Python
|
util/metric.py
|
smartdolphin/variational-autoencoder
|
999e00c1f630d1e3b6b433c965f87d236ba18668
|
[
"MIT"
] | 3
|
2018-05-31T08:30:30.000Z
|
2018-09-02T09:07:51.000Z
|
util/metric.py
|
smartdolphin/variational-autoencoder
|
999e00c1f630d1e3b6b433c965f87d236ba18668
|
[
"MIT"
] | null | null | null |
util/metric.py
|
smartdolphin/variational-autoencoder
|
999e00c1f630d1e3b6b433c965f87d236ba18668
|
[
"MIT"
] | 1
|
2018-09-02T09:07:53.000Z
|
2018-09-02T09:07:53.000Z
|
from collections import Counter
import numpy as np
from sklearn.metrics import accuracy_score, confusion_matrix
def __majority(arr):
counter = Counter(arr)
value, _ = counter.most_common(1)[0]
return value
def clustering_accuracy(y_true, y_clustering):
clustering_labels = list(set(y_clustering))
new_labels = np.zeros_like(y_clustering)
for clustering_label in clustering_labels:
locator = y_clustering == clustering_label
locations = np.argwhere(locator)
real_labels = y_true[locations].ravel()
major_label = __majority(real_labels)
new_labels[locator] = major_label
return accuracy_score(y_true, new_labels)
def confusion_matrix_majority(y_true, y_clustering):
clustering_labels = list(set(y_clustering))
new_labels = np.zeros_like(y_clustering)
for clustering_label in clustering_labels:
locator = y_clustering == clustering_label
locations = np.argwhere(locator)
real_labels = y_true[locations].ravel()
major_label = __majority(real_labels)
new_labels[locator] = major_label
return confusion_matrix(y_true, new_labels)
| 33.941176
| 60
| 0.733969
|
from collections import Counter
import numpy as np
from sklearn.metrics import accuracy_score, confusion_matrix
def __majority(arr):
counter = Counter(arr)
value, _ = counter.most_common(1)[0]
return value
def clustering_accuracy(y_true, y_clustering):
clustering_labels = list(set(y_clustering))
new_labels = np.zeros_like(y_clustering)
for clustering_label in clustering_labels:
locator = y_clustering == clustering_label
locations = np.argwhere(locator)
real_labels = y_true[locations].ravel()
major_label = __majority(real_labels)
new_labels[locator] = major_label
return accuracy_score(y_true, new_labels)
def confusion_matrix_majority(y_true, y_clustering):
clustering_labels = list(set(y_clustering))
new_labels = np.zeros_like(y_clustering)
for clustering_label in clustering_labels:
locator = y_clustering == clustering_label
locations = np.argwhere(locator)
real_labels = y_true[locations].ravel()
major_label = __majority(real_labels)
new_labels[locator] = major_label
return confusion_matrix(y_true, new_labels)
| true
| true
|
f7099df0b81adf9edb8587839dbb5f4204a4277b
| 430
|
py
|
Python
|
app/core/migrations/0006_recipe_image.py
|
Plachey/recipe-app-api
|
226317d0af02e3add2239ea46eeeff45ce55d151
|
[
"MIT"
] | null | null | null |
app/core/migrations/0006_recipe_image.py
|
Plachey/recipe-app-api
|
226317d0af02e3add2239ea46eeeff45ce55d151
|
[
"MIT"
] | null | null | null |
app/core/migrations/0006_recipe_image.py
|
Plachey/recipe-app-api
|
226317d0af02e3add2239ea46eeeff45ce55d151
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-04-14 14:07
import core.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_recipe'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='image',
field=models.ImageField(null=True, upload_to=core.models.recipe_image_file_path),
),
]
| 21.5
| 93
| 0.62093
|
import core.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_recipe'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='image',
field=models.ImageField(null=True, upload_to=core.models.recipe_image_file_path),
),
]
| true
| true
|
f7099e207c4fe793f5a10e86c872264015378602
| 4,347
|
py
|
Python
|
xrpl/core/binarycodec/types/currency.py
|
antonyggvzvmnxxcx/xrpl-py
|
fda7ce2a28807374b40324478e42e17d97a063d7
|
[
"ISC"
] | null | null | null |
xrpl/core/binarycodec/types/currency.py
|
antonyggvzvmnxxcx/xrpl-py
|
fda7ce2a28807374b40324478e42e17d97a063d7
|
[
"ISC"
] | 2
|
2022-02-23T22:57:46.000Z
|
2022-02-24T11:41:49.000Z
|
xrpl/core/binarycodec/types/currency.py
|
antonyggvzvmnxxcx/xrpl-py
|
fda7ce2a28807374b40324478e42e17d97a063d7
|
[
"ISC"
] | 1
|
2022-02-21T07:36:36.000Z
|
2022-02-21T07:36:36.000Z
|
"""Codec for currency property inside an XRPL issued currency amount json."""
from __future__ import annotations # Requires Python 3.7+
from typing import Optional, Type
from typing_extensions import Final
from xrpl.constants import HEX_CURRENCY_REGEX, ISO_CURRENCY_REGEX
from xrpl.core.binarycodec.exceptions import XRPLBinaryCodecException
from xrpl.core.binarycodec.types.hash160 import Hash160
_CURRENCY_CODE_LENGTH: Final[int] = 20 # bytes
def _is_iso_code(value: str) -> bool:
"""Tests if value is a valid 3-char iso code."""
return bool(ISO_CURRENCY_REGEX.fullmatch(value))
def _iso_code_from_hex(value: bytes) -> Optional[str]:
candidate_iso = value.decode("ascii")
if candidate_iso == "XRP":
raise XRPLBinaryCodecException(
"Disallowed currency code: to indicate the currency "
"XRP you must use 20 bytes of 0s"
)
if _is_iso_code(candidate_iso):
return candidate_iso
return None
def _is_hex(value: str) -> bool:
"""Tests if value is a valid 40-char hex string."""
return bool(HEX_CURRENCY_REGEX.fullmatch(value))
def _iso_to_bytes(iso: str) -> bytes:
"""
Convert an ISO code to a 160-bit (20 byte) encoded representation.
See "Currency codes" subheading in
`Amount Fields <https://xrpl.org/serialization.html#amount-fields>`_
"""
if not _is_iso_code(iso):
raise XRPLBinaryCodecException(f"Invalid ISO code: {iso}")
if iso == "XRP":
# This code (160 bit all zeroes) is used to indicate XRP in
# rare cases where a field must specify a currency code for XRP.
return bytes(_CURRENCY_CODE_LENGTH)
iso_bytes = iso.encode("ASCII")
# Currency Codes: https://xrpl.org/currency-formats.html#standard-currency-codes
# 160 total bits:
# 8 bits type code (0x00)
# 88 bits reserved (0's)
# 24 bits ASCII
# 16 bits version (0x00)
# 24 bits reserved (0's)
return bytes(12) + iso_bytes + bytes(5)
class Currency(Hash160):
"""
Codec for serializing and deserializing currency codes in issued currency amounts.
`Amount fields <https://xrpl.org/serialization.html#amount-fields>`_
Attributes:
buffer: The byte encoding of this currency.
_iso: The three-character ISO currency code if standard format, else None.
"""
LENGTH: Final[int] = 20
_iso: Optional[str] = None
def __init__(self: Currency, buffer: Optional[bytes] = None) -> None:
"""Construct a Currency."""
if buffer is not None:
super().__init__(buffer)
else:
super().__init__(bytes(self.LENGTH))
code_bytes = self.buffer[12:15]
# Determine whether this currency code is in standard or nonstandard format:
# https://xrpl.org/currency-formats.html#nonstandard-currency-codes
if self.buffer[0] != 0:
# non-standard currency
self._iso = None
elif self.buffer.hex() == "0" * 40: # all 0s
# the special case for literal XRP
self._iso = "XRP"
else:
self._iso = _iso_code_from_hex(code_bytes)
@classmethod
def from_value(cls: Type[Currency], value: str) -> Currency:
"""
Construct a Currency object from a string representation of a currency.
Args:
value: The string to construct a Currency object from.
Returns:
A Currency object constructed from value.
Raises:
XRPLBinaryCodecException: If the Currency representation is invalid.
"""
if not isinstance(value, str):
raise XRPLBinaryCodecException(
"Invalid type to construct a Currency: expected str,"
f" received {value.__class__.__name__}."
)
if _is_iso_code(value):
return Currency(_iso_to_bytes(value))
if _is_hex(value):
return cls(bytes.fromhex(value))
raise XRPLBinaryCodecException("Unsupported Currency representation: {value}")
def to_json(self: Currency) -> str:
"""
Returns the JSON representation of a currency.
Returns:
The JSON representation of a Currency.
"""
if self._iso is not None:
return self._iso
return self.buffer.hex().upper()
| 33.183206
| 86
| 0.645503
|
from __future__ import annotations
from typing import Optional, Type
from typing_extensions import Final
from xrpl.constants import HEX_CURRENCY_REGEX, ISO_CURRENCY_REGEX
from xrpl.core.binarycodec.exceptions import XRPLBinaryCodecException
from xrpl.core.binarycodec.types.hash160 import Hash160
_CURRENCY_CODE_LENGTH: Final[int] = 20
def _is_iso_code(value: str) -> bool:
return bool(ISO_CURRENCY_REGEX.fullmatch(value))
def _iso_code_from_hex(value: bytes) -> Optional[str]:
candidate_iso = value.decode("ascii")
if candidate_iso == "XRP":
raise XRPLBinaryCodecException(
"Disallowed currency code: to indicate the currency "
"XRP you must use 20 bytes of 0s"
)
if _is_iso_code(candidate_iso):
return candidate_iso
return None
def _is_hex(value: str) -> bool:
return bool(HEX_CURRENCY_REGEX.fullmatch(value))
def _iso_to_bytes(iso: str) -> bytes:
if not _is_iso_code(iso):
raise XRPLBinaryCodecException(f"Invalid ISO code: {iso}")
if iso == "XRP":
return bytes(_CURRENCY_CODE_LENGTH)
iso_bytes = iso.encode("ASCII")
24 bits ASCII
# 16 bits version (0x00)
# 24 bits reserved (0's)
return bytes(12) + iso_bytes + bytes(5)
class Currency(Hash160):
LENGTH: Final[int] = 20
_iso: Optional[str] = None
def __init__(self: Currency, buffer: Optional[bytes] = None) -> None:
if buffer is not None:
super().__init__(buffer)
else:
super().__init__(bytes(self.LENGTH))
code_bytes = self.buffer[12:15]
!= 0:
self._iso = None
elif self.buffer.hex() == "0" * 40:
self._iso = "XRP"
else:
self._iso = _iso_code_from_hex(code_bytes)
@classmethod
def from_value(cls: Type[Currency], value: str) -> Currency:
if not isinstance(value, str):
raise XRPLBinaryCodecException(
"Invalid type to construct a Currency: expected str,"
f" received {value.__class__.__name__}."
)
if _is_iso_code(value):
return Currency(_iso_to_bytes(value))
if _is_hex(value):
return cls(bytes.fromhex(value))
raise XRPLBinaryCodecException("Unsupported Currency representation: {value}")
def to_json(self: Currency) -> str:
if self._iso is not None:
return self._iso
return self.buffer.hex().upper()
| true
| true
|
f7099e28f13d0d0ebf689edd59f647a14c4109a6
| 833
|
py
|
Python
|
python/functions.py
|
felipesud/side-projects
|
82ce8559cd64ce726eeebe5c8f7f5f07228ac44a
|
[
"MIT"
] | null | null | null |
python/functions.py
|
felipesud/side-projects
|
82ce8559cd64ce726eeebe5c8f7f5f07228ac44a
|
[
"MIT"
] | null | null | null |
python/functions.py
|
felipesud/side-projects
|
82ce8559cd64ce726eeebe5c8f7f5f07228ac44a
|
[
"MIT"
] | null | null | null |
#We’ve already seen a few python functions such as print and input, but now we’re going to dive into writing our own functions. To get started, we’ll write a function that takes in a number and squares it:
def square(x):
return x * x
#Notice how we use the def keyword to indicate we’re defining a function, that we’re taking in a single input called x and that we use the return keyword to indicate what the function’s output should be.
#We can then “call” this function just as we’ve called other ones: using parentheses:
for i in range(10):
print(f"The square of {i} is {square(i)}")
""" Output:
The square of 0 is 0
The square of 1 is 1
The square of 2 is 4
The square of 3 is 9
The square of 4 is 16
The square of 5 is 25
The square of 6 is 36
The square of 7 is 49
The square of 8 is 64
The square of 9 is 81
"""
| 32.038462
| 205
| 0.728691
|
def square(x):
return x * x
for i in range(10):
print(f"The square of {i} is {square(i)}")
| true
| true
|
f7099e513b3ccd63804c178e2f0d32173e3f1c4e
| 55
|
py
|
Python
|
pre_commit_hooks/loaderon_hooks/tests/testing_samples/check_model_name_samples/error.py
|
alvaroscelza/pre-commit-hooks
|
fc9a7a376dc733a1e3cc00b5ed35936bcb3c3b3b
|
[
"MIT"
] | null | null | null |
pre_commit_hooks/loaderon_hooks/tests/testing_samples/check_model_name_samples/error.py
|
alvaroscelza/pre-commit-hooks
|
fc9a7a376dc733a1e3cc00b5ed35936bcb3c3b3b
|
[
"MIT"
] | null | null | null |
pre_commit_hooks/loaderon_hooks/tests/testing_samples/check_model_name_samples/error.py
|
alvaroscelza/pre-commit-hooks
|
fc9a7a376dc733a1e3cc00b5ed35936bcb3c3b3b
|
[
"MIT"
] | null | null | null |
class SomeClass(object):
_name = "some.model.name"
| 18.333333
| 29
| 0.690909
|
class SomeClass(object):
_name = "some.model.name"
| true
| true
|
f709a042ad90eee2b99eba0e4d74e9980bc62785
| 8,682
|
py
|
Python
|
apps/news/views.py
|
dawang-youy/Django-blog
|
529e7ef16d65170dc56cd628c34c5c9806138eed
|
[
"Apache-2.0"
] | null | null | null |
apps/news/views.py
|
dawang-youy/Django-blog
|
529e7ef16d65170dc56cd628c34c5c9806138eed
|
[
"Apache-2.0"
] | null | null | null |
apps/news/views.py
|
dawang-youy/Django-blog
|
529e7ef16d65170dc56cd628c34c5c9806138eed
|
[
"Apache-2.0"
] | null | null | null |
import logging
import json
from django.shortcuts import render,HttpResponse
from django.http import Http404
from django.views import View
from django.core.paginator import Paginator,EmptyPage,PageNotAnInteger
from . import models
from . import constants
from utils.json_fun import to_json_data
from utils.res_code import Code,error_map
from myblog import settings
# Create your views here.
# 导入日志器
logger = logging.getLogger('django')
# def index(request):
# """
# index page
# :param request:
# :return:
# """
# return render(request,'news/index.html')
# def detail(request):
# return render(request,'news/news_detail.html')
# def search(request):
# return render(request,'news/search.html')
class IndexView(View):
"""
create news view
render tags hot_news
"""
def get(self, request):
"""
create index page view
"""
tags = models.Tag.objects.only('id', 'name').filter(is_delete=False)
hot_news = models.HotNews.objects.select_related('news').only('news__title', 'news__image_url',
'news__id').filter(is_delete=False).order_by(
'priority', '-news__clicks')[0:constants.SHOW_HOTNEWS_COUNT]
context = {
'tags':tags,
'hot_news':hot_news,
'navId' : 0
}
navId = 0
return render(request, 'news/index.html', locals())
#1.创建类视图
#2.校验参数
#3.从数据库中查询新闻列表数据
#4.序列化数据
#5.返回给前端
class NewsListView(View):
"""
create news list view
route :/news/
"""
def get(self, request):
print(request)
try:
tag_id = int(request.GET.get('tag_id', 0))
except Exception as e:
logger.error("标签错误:\n{}".format(e))
tag_id = 0
try:
page = int(request.GET.get('page', 1))
except Exception as e:
logger.error("当前页数错误:\n{}".format(e))
page = 1
news_queryset = models.News.objects.select_related('tag', 'author'). \
only('id','title', 'digest', 'image_url', 'update_time', 'tag__name', 'author__username')
# if models.Tag.objects.only('id').filter(is_delete=False, id=tag_id).exists():
# news = news_queryset.filter(is_delete=False, tag_id=tag_id)
# else:
# news = news_queryset.filter(is_delete=False)
news = news_queryset.filter(is_delete=False, tag_id=tag_id) or \
news_queryset.filter(is_delete=False)
paginator = Paginator(news, constants.PER_PAGE_NEWS_COUNT)
try:
news_info = paginator.page(page)
except EmptyPage:
# 若用户访问的页数大于实际页数,则返回最后一页数据
logging.info("用户访问的页数大于总页数。")
news_info = paginator.page(paginator.num_pages)
# 4.序列化输出
news_info_list = []
for n in news_info:
news_info_list.append({
'id': n.id,
'title': n.title,
'digest': n.digest,
'image_url': n.image_url,
'tag_name': n.tag.name,
'author': n.author.username,
'update_time': n.update_time.strftime('%Y年%m月%d日 %H:%M'),
})
# 5.创建返回给前端的数据
data = {
'total_pages': paginator.num_pages,
'news': news_info_list
}
# print(data)
return to_json_data(data=data)
class NewsBanner(View):
"""
create news banner model
router:/news/banners/
"""
def get(self, request):
banners = models.Banner.objects.select_related('news').only('image_url', 'news__id', 'news__title').\
filter(is_delete=False)[0:constants.SHOW_BANNER_COUNT]
# 序列化输出
banners_info_list = []
for b in banners:
banners_info_list.append({
'image_url': b.image_url,
'news_id': b.news.id,
'news_title': b.news.title,
})
# 创建返回给前端的数据
data = {
'banners': banners_info_list
}
return to_json_data(data=data)
class NewsDetailView(View):
"""
create news detail view
router:/news/<int:news_id>/
"""
# /* 为文章内容添加样式 */
# 在templates/news1/news_detail.html文件中需要添加如下内容:
# .news-content p {
# font-size: 16px;
# line-height: 26px;
# text-align: justify;
# word-wrap: break-word;
# padding: 3px 0
# }
def get(self, request, news_id):
news = models.News.objects.select_related('tag', 'author'). \
only('title', 'content', 'update_time', 'tag__name', 'author__username').\
filter(is_delete=False, id=news_id).first()
if news:
comments = models.Comments.objects.select_related('author', 'parents').\
only('content', 'author__username', 'update_time',
'parents__author__username', 'parents__content', 'parents__update_time').\
filter(is_delete=False, news_id=news_id)
# 序列化输出
comments_list = []
# 迭代之后开始去数据库查
for comm in comments:
comments_list.append(comm.to_dict_data())
comments_count = len(comments_list)
return render(request, 'news/news_detail.html', locals())
else:
raise Http404("<新闻{}>不存在😢".format(news_id))
# return Http404('<h1>Page not found</h1>')
#return HttpResponseNotFound('<h1>Page not found</h1>')
class NewsCommentView(View):
"""
create newscomments detail view
router:news/<int:news_id>/comments/
"""
# print('2222')
def post(self, request, news_id):
# print('111111', request)
if not request.user.is_authenticated:
return to_json_data(errno=Code.SESSIONERR, errmsg=error_map[Code.SESSIONERR])
if not models.News.objects.only('id').filter(is_delete=False, id=news_id).exists():
return to_json_data(errno=Code.PARAMERR, errmsg="新闻不存在!")
# 从前端获取参数
try:
json_data = request.body
# print('111111',json_data)
if not json_data:
return to_json_data(errno=Code.PARAMERR, errmsg="参数为空,请重新输入!")
# 将json转化为dict
dict_data = json.loads(json_data.decode('utf8'))
except Exception as e:
logger.info('错误信息:\n{}'.format(e))
return to_json_data(errno=Code.UNKOWNERR,errmsg=error_map[Code.UNKOWNERR])
content = dict_data.get('content')
if not content:
return to_json_data(errno=Code.PARAMERR, errmsg="评论内容不能为空!")
parents_id = dict_data.get('parents_id')
try:
if parents_id:
parent_id = int(parents_id)
if not models.Comments.objects.only('id'). \
filter(is_delete=False, id=parents_id, news_id=news_id).exists():
return to_json_data(errno=Code.PARAMERR, errmsg=error_map[Code.PARAMERR])
except Exception as e:
logging.info("前端传过来的parents_id异常:\n{}".format(e))
return to_json_data(errno=Code.PARAMERR, errmsg="未知异常")
# 保存到数据库
new_content = models.Comments()
new_content.content = content
new_content.news_id = news_id
new_content.author = request.user
new_content.parents_id = parents_id if parents_id else None
new_content.save()
return to_json_data(data=new_content.to_dict_data())
from haystack.views import SearchView as _SearchView
class SearchView(_SearchView):
# 模版文件
template = 'news/search.html'
# 重写响应方式,如果请求参数q为空,返回模型News的热门新闻数据,否则根据参数q搜索相关数据
def create_response(self):
kw = self.request.GET.get('q', '')
if not kw:
show_all = True
hot_news = models.HotNews.objects.select_related('news'). \
only('news__title', 'news__image_url', 'news__id'). \
filter(is_delete=False).order_by('priority', '-news__clicks')
paginator = Paginator(hot_news, settings.HAYSTACK_SEARCH_RESULTS_PER_PAGE)
try:
page = paginator.page(int(self.request.GET.get('page', 1)))
except PageNotAnInteger:
# 如果参数page的数据类型不是整型,则返回第一页数据
page = paginator.page(1)
except EmptyPage:
# 用户访问的页数大于实际页数,则返回最后一页的数据
page = paginator.page(paginator.num_pages)
navId = 3
return render(self.request, self.template, locals())
else:
show_all = False
qs = super(SearchView, self).create_response()
return qs
| 35.008065
| 115
| 0.585695
|
import logging
import json
from django.shortcuts import render,HttpResponse
from django.http import Http404
from django.views import View
from django.core.paginator import Paginator,EmptyPage,PageNotAnInteger
from . import models
from . import constants
from utils.json_fun import to_json_data
from utils.res_code import Code,error_map
from myblog import settings
logger = logging.getLogger('django')
# index page
# :param request:
# :return:
# """
class IndexView(View):
def get(self, request):
tags = models.Tag.objects.only('id', 'name').filter(is_delete=False)
hot_news = models.HotNews.objects.select_related('news').only('news__title', 'news__image_url',
'news__id').filter(is_delete=False).order_by(
'priority', '-news__clicks')[0:constants.SHOW_HOTNEWS_COUNT]
context = {
'tags':tags,
'hot_news':hot_news,
'navId' : 0
}
navId = 0
return render(request, 'news/index.html', locals())
class NewsListView(View):
def get(self, request):
print(request)
try:
tag_id = int(request.GET.get('tag_id', 0))
except Exception as e:
logger.error("标签错误:\n{}".format(e))
tag_id = 0
try:
page = int(request.GET.get('page', 1))
except Exception as e:
logger.error("当前页数错误:\n{}".format(e))
page = 1
news_queryset = models.News.objects.select_related('tag', 'author'). \
only('id','title', 'digest', 'image_url', 'update_time', 'tag__name', 'author__username')
news = news_queryset.filter(is_delete=False, tag_id=tag_id) or \
news_queryset.filter(is_delete=False)
paginator = Paginator(news, constants.PER_PAGE_NEWS_COUNT)
try:
news_info = paginator.page(page)
except EmptyPage:
logging.info("用户访问的页数大于总页数。")
news_info = paginator.page(paginator.num_pages)
news_info_list = []
for n in news_info:
news_info_list.append({
'id': n.id,
'title': n.title,
'digest': n.digest,
'image_url': n.image_url,
'tag_name': n.tag.name,
'author': n.author.username,
'update_time': n.update_time.strftime('%Y年%m月%d日 %H:%M'),
})
data = {
'total_pages': paginator.num_pages,
'news': news_info_list
}
return to_json_data(data=data)
class NewsBanner(View):
def get(self, request):
banners = models.Banner.objects.select_related('news').only('image_url', 'news__id', 'news__title').\
filter(is_delete=False)[0:constants.SHOW_BANNER_COUNT]
banners_info_list = []
for b in banners:
banners_info_list.append({
'image_url': b.image_url,
'news_id': b.news.id,
'news_title': b.news.title,
})
data = {
'banners': banners_info_list
}
return to_json_data(data=data)
class NewsDetailView(View):
def get(self, request, news_id):
news = models.News.objects.select_related('tag', 'author'). \
only('title', 'content', 'update_time', 'tag__name', 'author__username').\
filter(is_delete=False, id=news_id).first()
if news:
comments = models.Comments.objects.select_related('author', 'parents').\
only('content', 'author__username', 'update_time',
'parents__author__username', 'parents__content', 'parents__update_time').\
filter(is_delete=False, news_id=news_id)
comments_list = []
for comm in comments:
comments_list.append(comm.to_dict_data())
comments_count = len(comments_list)
return render(request, 'news/news_detail.html', locals())
else:
raise Http404("<新闻{}>不存在😢".format(news_id))
class NewsCommentView(View):
def post(self, request, news_id):
if not request.user.is_authenticated:
return to_json_data(errno=Code.SESSIONERR, errmsg=error_map[Code.SESSIONERR])
if not models.News.objects.only('id').filter(is_delete=False, id=news_id).exists():
return to_json_data(errno=Code.PARAMERR, errmsg="新闻不存在!")
try:
json_data = request.body
if not json_data:
return to_json_data(errno=Code.PARAMERR, errmsg="参数为空,请重新输入!")
dict_data = json.loads(json_data.decode('utf8'))
except Exception as e:
logger.info('错误信息:\n{}'.format(e))
return to_json_data(errno=Code.UNKOWNERR,errmsg=error_map[Code.UNKOWNERR])
content = dict_data.get('content')
if not content:
return to_json_data(errno=Code.PARAMERR, errmsg="评论内容不能为空!")
parents_id = dict_data.get('parents_id')
try:
if parents_id:
parent_id = int(parents_id)
if not models.Comments.objects.only('id'). \
filter(is_delete=False, id=parents_id, news_id=news_id).exists():
return to_json_data(errno=Code.PARAMERR, errmsg=error_map[Code.PARAMERR])
except Exception as e:
logging.info("前端传过来的parents_id异常:\n{}".format(e))
return to_json_data(errno=Code.PARAMERR, errmsg="未知异常")
new_content = models.Comments()
new_content.content = content
new_content.news_id = news_id
new_content.author = request.user
new_content.parents_id = parents_id if parents_id else None
new_content.save()
return to_json_data(data=new_content.to_dict_data())
from haystack.views import SearchView as _SearchView
class SearchView(_SearchView):
template = 'news/search.html'
def create_response(self):
kw = self.request.GET.get('q', '')
if not kw:
show_all = True
hot_news = models.HotNews.objects.select_related('news'). \
only('news__title', 'news__image_url', 'news__id'). \
filter(is_delete=False).order_by('priority', '-news__clicks')
paginator = Paginator(hot_news, settings.HAYSTACK_SEARCH_RESULTS_PER_PAGE)
try:
page = paginator.page(int(self.request.GET.get('page', 1)))
except PageNotAnInteger:
page = paginator.page(1)
except EmptyPage:
page = paginator.page(paginator.num_pages)
navId = 3
return render(self.request, self.template, locals())
else:
show_all = False
qs = super(SearchView, self).create_response()
return qs
| true
| true
|
f709a16aaacdecfd6b39728f922d2694addb5ca8
| 3,463
|
py
|
Python
|
vsts/vsts/feature_management/v4_0/models/contributed_feature.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
vsts/vsts/feature_management/v4_0/models/contributed_feature.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
vsts/vsts/feature_management/v4_0/models/contributed_feature.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ContributedFeature(Model):
"""ContributedFeature.
:param _links: Named links describing the feature
:type _links: :class:`ReferenceLinks <feature-management.v4_0.models.ReferenceLinks>`
:param default_state: If true, the feature is enabled unless overridden at some scope
:type default_state: bool
:param default_value_rules: Rules for setting the default value if not specified by any setting/scope. Evaluated in order until a rule returns an Enabled or Disabled state (not Undefined)
:type default_value_rules: list of :class:`ContributedFeatureValueRule <feature-management.v4_0.models.ContributedFeatureValueRule>`
:param description: The description of the feature
:type description: str
:param id: The full contribution id of the feature
:type id: str
:param name: The friendly name of the feature
:type name: str
:param override_rules: Rules for overriding a feature value. These rules are run before explicit user/host state values are checked. They are evaluated in order until a rule returns an Enabled or Disabled state (not Undefined)
:type override_rules: list of :class:`ContributedFeatureValueRule <feature-management.v4_0.models.ContributedFeatureValueRule>`
:param scopes: The scopes/levels at which settings can set the enabled/disabled state of this feature
:type scopes: list of :class:`ContributedFeatureSettingScope <feature-management.v4_0.models.ContributedFeatureSettingScope>`
:param service_instance_type: The service instance id of the service that owns this feature
:type service_instance_type: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'default_state': {'key': 'defaultState', 'type': 'bool'},
'default_value_rules': {'key': 'defaultValueRules', 'type': '[ContributedFeatureValueRule]'},
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'override_rules': {'key': 'overrideRules', 'type': '[ContributedFeatureValueRule]'},
'scopes': {'key': 'scopes', 'type': '[ContributedFeatureSettingScope]'},
'service_instance_type': {'key': 'serviceInstanceType', 'type': 'str'}
}
def __init__(self, _links=None, default_state=None, default_value_rules=None, description=None, id=None, name=None, override_rules=None, scopes=None, service_instance_type=None):
super(ContributedFeature, self).__init__()
self._links = _links
self.default_state = default_state
self.default_value_rules = default_value_rules
self.description = description
self.id = id
self.name = name
self.override_rules = override_rules
self.scopes = scopes
self.service_instance_type = service_instance_type
| 59.706897
| 230
| 0.663298
|
from msrest.serialization import Model
class ContributedFeature(Model):
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'default_state': {'key': 'defaultState', 'type': 'bool'},
'default_value_rules': {'key': 'defaultValueRules', 'type': '[ContributedFeatureValueRule]'},
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'override_rules': {'key': 'overrideRules', 'type': '[ContributedFeatureValueRule]'},
'scopes': {'key': 'scopes', 'type': '[ContributedFeatureSettingScope]'},
'service_instance_type': {'key': 'serviceInstanceType', 'type': 'str'}
}
def __init__(self, _links=None, default_state=None, default_value_rules=None, description=None, id=None, name=None, override_rules=None, scopes=None, service_instance_type=None):
super(ContributedFeature, self).__init__()
self._links = _links
self.default_state = default_state
self.default_value_rules = default_value_rules
self.description = description
self.id = id
self.name = name
self.override_rules = override_rules
self.scopes = scopes
self.service_instance_type = service_instance_type
| true
| true
|
f709a1ceea0ffc10aa2fbd792d7b6518d912934a
| 1,709
|
py
|
Python
|
numpy-arrays/code.py
|
patelshival/ga-dsmp
|
c355d28daf50c51b1610930f963dcd17b770e17a
|
[
"MIT"
] | null | null | null |
numpy-arrays/code.py
|
patelshival/ga-dsmp
|
c355d28daf50c51b1610930f963dcd17b770e17a
|
[
"MIT"
] | null | null | null |
numpy-arrays/code.py
|
patelshival/ga-dsmp
|
c355d28daf50c51b1610930f963dcd17b770e17a
|
[
"MIT"
] | null | null | null |
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
data = np.genfromtxt(path, delimiter=",", skip_header=1)
print("\nData: \n\n", data)
print("\nType of data: \n\n", type(data))
census = np.concatenate((data, new_record), axis=0)
print(census)
#Code starts here
# --------------
#Code starts here
age=census[:,0]
print(age)
max_age = np.max(age)
min_age = np.min(age)
age_mean = np.mean(age)
age_std = np.std(age)
print("max of age : ", max_age)
print("min of age : ", min_age)
print("mean of age : ", age_mean)
print("standard deviation of age : ", age_std)
# --------------
#Code starts here
race_0 = census[census[:,2] == 0]
race_1 = census[census[:,2] == 1]
race_2 = census[census[:,2] == 2]
race_3 = census[census[:,2] == 3]
race_4 = census[census[:,2] == 4]
len_0 = len(race_0)
len_1 = len(race_1)
len_2 = len(race_2)
len_3 = len(race_3)
len_4 = len(race_4)
minority_race = 3
print(race_0)
# --------------
#Code starts here
senior_citizens = census[census[:, 0] > 60]
working_hours = senior_citizens[:,6]
working_hours_sum = working_hours.sum()
senior_citizens_len = len(senior_citizens)
avg_working_hours = working_hours_sum / senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
high = census[census[:,1] > 10]
low = census[census[:,1] <= 10]
avg_pay_high = high[:, 7].mean()
avg_pay_low = low[:, 7].mean()
if avg_pay_high > avg_pay_low:
print("Better education leads to better pay")
else:
print("Better education does not lead to better pay")
| 20.590361
| 61
| 0.626682
|
import numpy as np
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
data = np.genfromtxt(path, delimiter=",", skip_header=1)
print("\nData: \n\n", data)
print("\nType of data: \n\n", type(data))
census = np.concatenate((data, new_record), axis=0)
print(census)
age=census[:,0]
print(age)
max_age = np.max(age)
min_age = np.min(age)
age_mean = np.mean(age)
age_std = np.std(age)
print("max of age : ", max_age)
print("min of age : ", min_age)
print("mean of age : ", age_mean)
print("standard deviation of age : ", age_std)
race_0 = census[census[:,2] == 0]
race_1 = census[census[:,2] == 1]
race_2 = census[census[:,2] == 2]
race_3 = census[census[:,2] == 3]
race_4 = census[census[:,2] == 4]
len_0 = len(race_0)
len_1 = len(race_1)
len_2 = len(race_2)
len_3 = len(race_3)
len_4 = len(race_4)
minority_race = 3
print(race_0)
senior_citizens = census[census[:, 0] > 60]
working_hours = senior_citizens[:,6]
working_hours_sum = working_hours.sum()
senior_citizens_len = len(senior_citizens)
avg_working_hours = working_hours_sum / senior_citizens_len
print(avg_working_hours)
high = census[census[:,1] > 10]
low = census[census[:,1] <= 10]
avg_pay_high = high[:, 7].mean()
avg_pay_low = low[:, 7].mean()
if avg_pay_high > avg_pay_low:
print("Better education leads to better pay")
else:
print("Better education does not lead to better pay")
| true
| true
|
f709a1ff552f62232b89d6d9363ed677443a46fd
| 3,345
|
py
|
Python
|
RecoTracker/TrackProducer/test/refitFromMINIAOD.py
|
malbouis/cmssw
|
16173a30d3f0c9ecc5419c474bb4d272c58b65c8
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
RecoTracker/TrackProducer/test/refitFromMINIAOD.py
|
malbouis/cmssw
|
16173a30d3f0c9ecc5419c474bb4d272c58b65c8
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
RecoTracker/TrackProducer/test/refitFromMINIAOD.py
|
malbouis/cmssw
|
16173a30d3f0c9ecc5419c474bb4d272c58b65c8
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
from Configuration.Eras.Era_Run2_2016_cff import Run2_2016
process = cms.Process('RECO2',Run2_2016)
options = VarParsing.VarParsing('analysis')
options.register('globalTag',
"auto:run2_mc", # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"input file name")
options.parseArguments()
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(options.inputFiles),
secondaryFileNames = cms.untracked.vstring()
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(options.maxEvents)
)
process.options = cms.untracked.PSet()
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step1 nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.RECOSIMoutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string(''),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('step1_RECO.root'),
outputCommands = process.RECOSIMEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
process.RECOSIMoutput.outputCommands = cms.untracked.vstring("keep *_myRefittedTracks_*_*")
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, options.globalTag, '')
import RecoTracker.TrackProducer.trackProducerFromPatMuons_cfi
process.tracksFromMuons = RecoTracker.TrackProducer.trackProducerFromPatMuons_cfi.trackProducerFromPatMuons.clone(
src = "slimmedMuons",
innerTrackOnly = True
)
import RecoTracker.TrackProducer.TrackRefitter_cfi
process.myRefittedTracks = RecoTracker.TrackProducer.TrackRefitter_cfi.TrackRefitter.clone(
src = 'tracksFromMuons',
NavigationSchool = '',
Fitter = 'FlexibleKFFittingSmoother'
)
# Path and EndPath definitions
process.reconstruction_step = cms.Path(process.tracksFromMuons*process.myRefittedTracks)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RECOSIMoutput_step = cms.EndPath(process.RECOSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.reconstruction_step,process.endjob_step,process.RECOSIMoutput_step)
| 39.821429
| 115
| 0.75994
|
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
from Configuration.Eras.Era_Run2_2016_cff import Run2_2016
process = cms.Process('RECO2',Run2_2016)
options = VarParsing.VarParsing('analysis')
options.register('globalTag',
"auto:run2_mc",
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"input file name")
options.parseArguments()
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(options.inputFiles),
secondaryFileNames = cms.untracked.vstring()
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(options.maxEvents)
)
process.options = cms.untracked.PSet()
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step1 nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
process.RECOSIMoutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string(''),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('step1_RECO.root'),
outputCommands = process.RECOSIMEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
process.RECOSIMoutput.outputCommands = cms.untracked.vstring("keep *_myRefittedTracks_*_*")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, options.globalTag, '')
import RecoTracker.TrackProducer.trackProducerFromPatMuons_cfi
process.tracksFromMuons = RecoTracker.TrackProducer.trackProducerFromPatMuons_cfi.trackProducerFromPatMuons.clone(
src = "slimmedMuons",
innerTrackOnly = True
)
import RecoTracker.TrackProducer.TrackRefitter_cfi
process.myRefittedTracks = RecoTracker.TrackProducer.TrackRefitter_cfi.TrackRefitter.clone(
src = 'tracksFromMuons',
NavigationSchool = '',
Fitter = 'FlexibleKFFittingSmoother'
)
process.reconstruction_step = cms.Path(process.tracksFromMuons*process.myRefittedTracks)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RECOSIMoutput_step = cms.EndPath(process.RECOSIMoutput)
process.schedule = cms.Schedule(process.reconstruction_step,process.endjob_step,process.RECOSIMoutput_step)
| true
| true
|
f709a34963612148e07d6218f601266cbad70915
| 1,709
|
py
|
Python
|
domains/explore/problems/training/problem346_EE.py
|
patras91/rae_release
|
0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30
|
[
"BSD-3-Clause"
] | 1
|
2021-09-28T12:56:56.000Z
|
2021-09-28T12:56:56.000Z
|
domains/explore/problems/training/problem346_EE.py
|
patras91/rae_release
|
0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30
|
[
"BSD-3-Clause"
] | null | null | null |
domains/explore/problems/training/problem346_EE.py
|
patras91/rae_release
|
0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30
|
[
"BSD-3-Clause"
] | 1
|
2022-03-31T16:30:39.000Z
|
2022-03-31T16:30:39.000Z
|
__author__ = 'patras'
from domain_exploreEnv import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
DURATION.COUNTER = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
rv.TYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.EQUIPMENT = {'survey': 'e1', 'monitor': 'e2', 'screen': 'e3', 'sample': 'e4', 'process': 'e5'}
rv.EQUIPMENTTYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.LOCATIONS = ['base', 'z1', 'z2', 'z3', 'z4']
rv.EDGES = {'base': {'z1': 20, 'z2': 50, 'z3': 20, 'z4': 50}, 'z1': {'base': 20, 'z2': 30, 'z4': 50}, 'z2': {'base': 50, 'z1': 30, 'z3': 30}, 'z3': {'base': 20, 'z2': 30, 'z4': 30}, 'z4': {'base': 50, 'z3': 30, 'z1': 50}}
def ResetState():
state.loc = {'r1': 'base', 'r2': 'base', 'UAV': 'base'}
state.charge = { 'UAV': 80, 'r1': 50, 'r2': 50}
state.data = { 'UAV': 1, 'r1': 3, 'r2': 3}
state.pos = {'c1': 'base', 'e1': 'base', 'e2': 'base', 'e3': 'base', 'e4': 'base', 'e5': 'base', 'o1': 'UAV'}
state.load = {'r1': NIL, 'r2': NIL, 'UAV': 'o1'}
state.storm = {'active': False}
tasks = {
5: [['doActivities', 'UAV', [['survey', 'z2'], ['survey', 'z4'], ['survey', 'base']]]],
}
eventsEnv = {
}
| 29.465517
| 221
| 0.486834
|
__author__ = 'patras'
from domain_exploreEnv import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
DURATION.COUNTER = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
rv.TYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.EQUIPMENT = {'survey': 'e1', 'monitor': 'e2', 'screen': 'e3', 'sample': 'e4', 'process': 'e5'}
rv.EQUIPMENTTYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.LOCATIONS = ['base', 'z1', 'z2', 'z3', 'z4']
rv.EDGES = {'base': {'z1': 20, 'z2': 50, 'z3': 20, 'z4': 50}, 'z1': {'base': 20, 'z2': 30, 'z4': 50}, 'z2': {'base': 50, 'z1': 30, 'z3': 30}, 'z3': {'base': 20, 'z2': 30, 'z4': 30}, 'z4': {'base': 50, 'z3': 30, 'z1': 50}}
def ResetState():
state.loc = {'r1': 'base', 'r2': 'base', 'UAV': 'base'}
state.charge = { 'UAV': 80, 'r1': 50, 'r2': 50}
state.data = { 'UAV': 1, 'r1': 3, 'r2': 3}
state.pos = {'c1': 'base', 'e1': 'base', 'e2': 'base', 'e3': 'base', 'e4': 'base', 'e5': 'base', 'o1': 'UAV'}
state.load = {'r1': NIL, 'r2': NIL, 'UAV': 'o1'}
state.storm = {'active': False}
tasks = {
5: [['doActivities', 'UAV', [['survey', 'z2'], ['survey', 'z4'], ['survey', 'base']]]],
}
eventsEnv = {
}
| true
| true
|
f709a3cce919ff24eb1d2474804b1ef4b4319607
| 2,597
|
py
|
Python
|
Lib/importlib/test/benchmark.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | 1
|
2020-11-26T18:53:46.000Z
|
2020-11-26T18:53:46.000Z
|
Lib/importlib/test/benchmark.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | null | null | null |
Lib/importlib/test/benchmark.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | 2
|
2018-08-06T04:37:38.000Z
|
2022-02-27T18:07:12.000Z
|
from . import util
from .source import util as source_util
import gc
import decimal
import imp
import importlib
import sys
import timeit
def bench_cache(import_, repeat, number):
"""Measure the time it takes to pull from sys.modules."""
name = '<benchmark import>'
with util.uncache(name):
module = imp.new_module(name)
sys.modules[name] = module
runs = []
for x in range(repeat):
start_time = timeit.default_timer()
for y in range(number):
import_(name)
end_time = timeit.default_timer()
runs.append(end_time - start_time)
return min(runs)
def bench_importing_source(import_, repeat, number, loc=100000):
"""Measure importing source from disk.
For worst-case scenario, the line endings are \\r\\n and thus require
universal newline translation.
"""
name = '__benchmark'
with source_util.create_modules(name) as mapping:
with open(mapping[name], 'w') as file:
for x in range(loc):
file.write("{0}\r\n".format(x))
with util.import_state(path=[mapping['.root']]):
runs = []
for x in range(repeat):
start_time = timeit.default_timer()
for y in range(number):
try:
import_(name)
finally:
del sys.modules[name]
end_time = timeit.default_timer()
runs.append(end_time - start_time)
return min(runs)
def main(import_):
args = [('sys.modules', bench_cache, 5, 500000),
('source', bench_importing_source, 5, 10000)]
test_msg = "{test}, {number} times (best of {repeat}):"
result_msg = "{result:.2f} secs"
gc.disable()
try:
for name, meth, repeat, number in args:
result = meth(import_, repeat, number)
print(test_msg.format(test=name, repeat=repeat,
number=number).ljust(40),
result_msg.format(result=result).rjust(10))
finally:
gc.enable()
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser()
parser.add_option('-b', '--builtin', dest='builtin', action='store_true',
default=False, help="use the built-in __import__")
options, args = parser.parse_args()
if args:
raise RuntimeError("unrecognized args: {0}".format(args))
import_ = __import__
if not options.builtin:
import_ = importlib.__import__
main(import_)
| 31.289157
| 77
| 0.58298
|
from . import util
from .source import util as source_util
import gc
import decimal
import imp
import importlib
import sys
import timeit
def bench_cache(import_, repeat, number):
name = '<benchmark import>'
with util.uncache(name):
module = imp.new_module(name)
sys.modules[name] = module
runs = []
for x in range(repeat):
start_time = timeit.default_timer()
for y in range(number):
import_(name)
end_time = timeit.default_timer()
runs.append(end_time - start_time)
return min(runs)
def bench_importing_source(import_, repeat, number, loc=100000):
name = '__benchmark'
with source_util.create_modules(name) as mapping:
with open(mapping[name], 'w') as file:
for x in range(loc):
file.write("{0}\r\n".format(x))
with util.import_state(path=[mapping['.root']]):
runs = []
for x in range(repeat):
start_time = timeit.default_timer()
for y in range(number):
try:
import_(name)
finally:
del sys.modules[name]
end_time = timeit.default_timer()
runs.append(end_time - start_time)
return min(runs)
def main(import_):
args = [('sys.modules', bench_cache, 5, 500000),
('source', bench_importing_source, 5, 10000)]
test_msg = "{test}, {number} times (best of {repeat}):"
result_msg = "{result:.2f} secs"
gc.disable()
try:
for name, meth, repeat, number in args:
result = meth(import_, repeat, number)
print(test_msg.format(test=name, repeat=repeat,
number=number).ljust(40),
result_msg.format(result=result).rjust(10))
finally:
gc.enable()
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser()
parser.add_option('-b', '--builtin', dest='builtin', action='store_true',
default=False, help="use the built-in __import__")
options, args = parser.parse_args()
if args:
raise RuntimeError("unrecognized args: {0}".format(args))
import_ = __import__
if not options.builtin:
import_ = importlib.__import__
main(import_)
| true
| true
|
f709a461b1f54fd83eb4c7764434dd937ee90766
| 13,269
|
py
|
Python
|
Server/ChatBot/venv/Lib/site-packages/tensorflow/core/framework/tensor_pb2.py
|
sozuer53/BBC
|
31bb128cb1e1a19db955fd673d67cf0e92bac3a4
|
[
"Apache-2.0"
] | 3
|
2018-11-27T06:30:23.000Z
|
2021-05-30T15:56:32.000Z
|
Server/ChatBot/venv/Lib/site-packages/tensorflow/core/framework/tensor_pb2.py
|
sozuer53/BBC
|
31bb128cb1e1a19db955fd673d67cf0e92bac3a4
|
[
"Apache-2.0"
] | 3
|
2020-09-26T01:09:47.000Z
|
2022-02-10T02:12:08.000Z
|
Server/ChatBot/venv/Lib/site-packages/tensorflow/core/framework/tensor_pb2.py
|
sozuer53/BBC
|
31bb128cb1e1a19db955fd673d67cf0e92bac3a4
|
[
"Apache-2.0"
] | 6
|
2020-04-13T15:33:30.000Z
|
2020-06-21T19:26:55.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/tensor.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import resource_handle_pb2 as tensorflow_dot_core_dot_framework_dot_resource__handle__pb2
from tensorflow.core.framework import tensor_shape_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2
from tensorflow.core.framework import types_pb2 as tensorflow_dot_core_dot_framework_dot_types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/tensor.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n&tensorflow/core/framework/tensor.proto\x12\ntensorflow\x1a/tensorflow/core/framework/resource_handle.proto\x1a,tensorflow/core/framework/tensor_shape.proto\x1a%tensorflow/core/framework/types.proto\"\xdc\x03\n\x0bTensorProto\x12#\n\x05\x64type\x18\x01 \x01(\x0e\x32\x14.tensorflow.DataType\x12\x32\n\x0ctensor_shape\x18\x02 \x01(\x0b\x32\x1c.tensorflow.TensorShapeProto\x12\x16\n\x0eversion_number\x18\x03 \x01(\x05\x12\x16\n\x0etensor_content\x18\x04 \x01(\x0c\x12\x14\n\x08half_val\x18\r \x03(\x05\x42\x02\x10\x01\x12\x15\n\tfloat_val\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x16\n\ndouble_val\x18\x06 \x03(\x01\x42\x02\x10\x01\x12\x13\n\x07int_val\x18\x07 \x03(\x05\x42\x02\x10\x01\x12\x12\n\nstring_val\x18\x08 \x03(\x0c\x12\x18\n\x0cscomplex_val\x18\t \x03(\x02\x42\x02\x10\x01\x12\x15\n\tint64_val\x18\n \x03(\x03\x42\x02\x10\x01\x12\x14\n\x08\x62ool_val\x18\x0b \x03(\x08\x42\x02\x10\x01\x12\x18\n\x0c\x64\x63omplex_val\x18\x0c \x03(\x01\x42\x02\x10\x01\x12<\n\x13resource_handle_val\x18\x0e \x03(\x0b\x32\x1f.tensorflow.ResourceHandleProto\x12\x37\n\x0bvariant_val\x18\x0f \x03(\x0b\x32\".tensorflow.VariantTensorDataProto\"g\n\x16VariantTensorDataProto\x12\x11\n\ttype_name\x18\x01 \x01(\t\x12\x10\n\x08metadata\x18\x02 \x01(\x0c\x12(\n\x07tensors\x18\x03 \x03(\x0b\x32\x17.tensorflow.TensorProtoB-\n\x18org.tensorflow.frameworkB\x0cTensorProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_resource__handle__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_types__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_TENSORPROTO = _descriptor.Descriptor(
name='TensorProto',
full_name='tensorflow.TensorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dtype', full_name='tensorflow.TensorProto.dtype', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor_shape', full_name='tensorflow.TensorProto.tensor_shape', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version_number', full_name='tensorflow.TensorProto.version_number', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor_content', full_name='tensorflow.TensorProto.tensor_content', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='half_val', full_name='tensorflow.TensorProto.half_val', index=4,
number=13, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='float_val', full_name='tensorflow.TensorProto.float_val', index=5,
number=5, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='double_val', full_name='tensorflow.TensorProto.double_val', index=6,
number=6, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='int_val', full_name='tensorflow.TensorProto.int_val', index=7,
number=7, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='string_val', full_name='tensorflow.TensorProto.string_val', index=8,
number=8, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scomplex_val', full_name='tensorflow.TensorProto.scomplex_val', index=9,
number=9, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='int64_val', full_name='tensorflow.TensorProto.int64_val', index=10,
number=10, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='bool_val', full_name='tensorflow.TensorProto.bool_val', index=11,
number=11, type=8, cpp_type=7, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='dcomplex_val', full_name='tensorflow.TensorProto.dcomplex_val', index=12,
number=12, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='resource_handle_val', full_name='tensorflow.TensorProto.resource_handle_val', index=13,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='variant_val', full_name='tensorflow.TensorProto.variant_val', index=14,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=189,
serialized_end=665,
)
_VARIANTTENSORDATAPROTO = _descriptor.Descriptor(
name='VariantTensorDataProto',
full_name='tensorflow.VariantTensorDataProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type_name', full_name='tensorflow.VariantTensorDataProto.type_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='metadata', full_name='tensorflow.VariantTensorDataProto.metadata', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensors', full_name='tensorflow.VariantTensorDataProto.tensors', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=667,
serialized_end=770,
)
_TENSORPROTO.fields_by_name['dtype'].enum_type = tensorflow_dot_core_dot_framework_dot_types__pb2._DATATYPE
_TENSORPROTO.fields_by_name['tensor_shape'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2._TENSORSHAPEPROTO
_TENSORPROTO.fields_by_name['resource_handle_val'].message_type = tensorflow_dot_core_dot_framework_dot_resource__handle__pb2._RESOURCEHANDLEPROTO
_TENSORPROTO.fields_by_name['variant_val'].message_type = _VARIANTTENSORDATAPROTO
_VARIANTTENSORDATAPROTO.fields_by_name['tensors'].message_type = _TENSORPROTO
DESCRIPTOR.message_types_by_name['TensorProto'] = _TENSORPROTO
DESCRIPTOR.message_types_by_name['VariantTensorDataProto'] = _VARIANTTENSORDATAPROTO
TensorProto = _reflection.GeneratedProtocolMessageType('TensorProto', (_message.Message,), dict(
DESCRIPTOR = _TENSORPROTO,
__module__ = 'tensorflow.core.framework.tensor_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.TensorProto)
))
_sym_db.RegisterMessage(TensorProto)
VariantTensorDataProto = _reflection.GeneratedProtocolMessageType('VariantTensorDataProto', (_message.Message,), dict(
DESCRIPTOR = _VARIANTTENSORDATAPROTO,
__module__ = 'tensorflow.core.framework.tensor_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.VariantTensorDataProto)
))
_sym_db.RegisterMessage(VariantTensorDataProto)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\014TensorProtosP\001\370\001\001'))
_TENSORPROTO.fields_by_name['half_val'].has_options = True
_TENSORPROTO.fields_by_name['half_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['float_val'].has_options = True
_TENSORPROTO.fields_by_name['float_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['double_val'].has_options = True
_TENSORPROTO.fields_by_name['double_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['int_val'].has_options = True
_TENSORPROTO.fields_by_name['int_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['scomplex_val'].has_options = True
_TENSORPROTO.fields_by_name['scomplex_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['int64_val'].has_options = True
_TENSORPROTO.fields_by_name['int64_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['bool_val'].has_options = True
_TENSORPROTO.fields_by_name['bool_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['dcomplex_val'].has_options = True
_TENSORPROTO.fields_by_name['dcomplex_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
# @@protoc_insertion_point(module_scope)
| 53.504032
| 1,407
| 0.768332
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import resource_handle_pb2 as tensorflow_dot_core_dot_framework_dot_resource__handle__pb2
from tensorflow.core.framework import tensor_shape_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2
from tensorflow.core.framework import types_pb2 as tensorflow_dot_core_dot_framework_dot_types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/tensor.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n&tensorflow/core/framework/tensor.proto\x12\ntensorflow\x1a/tensorflow/core/framework/resource_handle.proto\x1a,tensorflow/core/framework/tensor_shape.proto\x1a%tensorflow/core/framework/types.proto\"\xdc\x03\n\x0bTensorProto\x12#\n\x05\x64type\x18\x01 \x01(\x0e\x32\x14.tensorflow.DataType\x12\x32\n\x0ctensor_shape\x18\x02 \x01(\x0b\x32\x1c.tensorflow.TensorShapeProto\x12\x16\n\x0eversion_number\x18\x03 \x01(\x05\x12\x16\n\x0etensor_content\x18\x04 \x01(\x0c\x12\x14\n\x08half_val\x18\r \x03(\x05\x42\x02\x10\x01\x12\x15\n\tfloat_val\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x16\n\ndouble_val\x18\x06 \x03(\x01\x42\x02\x10\x01\x12\x13\n\x07int_val\x18\x07 \x03(\x05\x42\x02\x10\x01\x12\x12\n\nstring_val\x18\x08 \x03(\x0c\x12\x18\n\x0cscomplex_val\x18\t \x03(\x02\x42\x02\x10\x01\x12\x15\n\tint64_val\x18\n \x03(\x03\x42\x02\x10\x01\x12\x14\n\x08\x62ool_val\x18\x0b \x03(\x08\x42\x02\x10\x01\x12\x18\n\x0c\x64\x63omplex_val\x18\x0c \x03(\x01\x42\x02\x10\x01\x12<\n\x13resource_handle_val\x18\x0e \x03(\x0b\x32\x1f.tensorflow.ResourceHandleProto\x12\x37\n\x0bvariant_val\x18\x0f \x03(\x0b\x32\".tensorflow.VariantTensorDataProto\"g\n\x16VariantTensorDataProto\x12\x11\n\ttype_name\x18\x01 \x01(\t\x12\x10\n\x08metadata\x18\x02 \x01(\x0c\x12(\n\x07tensors\x18\x03 \x03(\x0b\x32\x17.tensorflow.TensorProtoB-\n\x18org.tensorflow.frameworkB\x0cTensorProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_resource__handle__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_types__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_TENSORPROTO = _descriptor.Descriptor(
name='TensorProto',
full_name='tensorflow.TensorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dtype', full_name='tensorflow.TensorProto.dtype', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor_shape', full_name='tensorflow.TensorProto.tensor_shape', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version_number', full_name='tensorflow.TensorProto.version_number', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor_content', full_name='tensorflow.TensorProto.tensor_content', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='half_val', full_name='tensorflow.TensorProto.half_val', index=4,
number=13, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='float_val', full_name='tensorflow.TensorProto.float_val', index=5,
number=5, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='double_val', full_name='tensorflow.TensorProto.double_val', index=6,
number=6, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='int_val', full_name='tensorflow.TensorProto.int_val', index=7,
number=7, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='string_val', full_name='tensorflow.TensorProto.string_val', index=8,
number=8, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scomplex_val', full_name='tensorflow.TensorProto.scomplex_val', index=9,
number=9, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='int64_val', full_name='tensorflow.TensorProto.int64_val', index=10,
number=10, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='bool_val', full_name='tensorflow.TensorProto.bool_val', index=11,
number=11, type=8, cpp_type=7, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='dcomplex_val', full_name='tensorflow.TensorProto.dcomplex_val', index=12,
number=12, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='resource_handle_val', full_name='tensorflow.TensorProto.resource_handle_val', index=13,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='variant_val', full_name='tensorflow.TensorProto.variant_val', index=14,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=189,
serialized_end=665,
)
_VARIANTTENSORDATAPROTO = _descriptor.Descriptor(
name='VariantTensorDataProto',
full_name='tensorflow.VariantTensorDataProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type_name', full_name='tensorflow.VariantTensorDataProto.type_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='metadata', full_name='tensorflow.VariantTensorDataProto.metadata', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensors', full_name='tensorflow.VariantTensorDataProto.tensors', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=667,
serialized_end=770,
)
_TENSORPROTO.fields_by_name['dtype'].enum_type = tensorflow_dot_core_dot_framework_dot_types__pb2._DATATYPE
_TENSORPROTO.fields_by_name['tensor_shape'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2._TENSORSHAPEPROTO
_TENSORPROTO.fields_by_name['resource_handle_val'].message_type = tensorflow_dot_core_dot_framework_dot_resource__handle__pb2._RESOURCEHANDLEPROTO
_TENSORPROTO.fields_by_name['variant_val'].message_type = _VARIANTTENSORDATAPROTO
_VARIANTTENSORDATAPROTO.fields_by_name['tensors'].message_type = _TENSORPROTO
DESCRIPTOR.message_types_by_name['TensorProto'] = _TENSORPROTO
DESCRIPTOR.message_types_by_name['VariantTensorDataProto'] = _VARIANTTENSORDATAPROTO
TensorProto = _reflection.GeneratedProtocolMessageType('TensorProto', (_message.Message,), dict(
DESCRIPTOR = _TENSORPROTO,
__module__ = 'tensorflow.core.framework.tensor_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.TensorProto)
))
_sym_db.RegisterMessage(TensorProto)
VariantTensorDataProto = _reflection.GeneratedProtocolMessageType('VariantTensorDataProto', (_message.Message,), dict(
DESCRIPTOR = _VARIANTTENSORDATAPROTO,
__module__ = 'tensorflow.core.framework.tensor_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.VariantTensorDataProto)
))
_sym_db.RegisterMessage(VariantTensorDataProto)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\014TensorProtosP\001\370\001\001'))
_TENSORPROTO.fields_by_name['half_val'].has_options = True
_TENSORPROTO.fields_by_name['half_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['float_val'].has_options = True
_TENSORPROTO.fields_by_name['float_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['double_val'].has_options = True
_TENSORPROTO.fields_by_name['double_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['int_val'].has_options = True
_TENSORPROTO.fields_by_name['int_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['scomplex_val'].has_options = True
_TENSORPROTO.fields_by_name['scomplex_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['int64_val'].has_options = True
_TENSORPROTO.fields_by_name['int64_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['bool_val'].has_options = True
_TENSORPROTO.fields_by_name['bool_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['dcomplex_val'].has_options = True
_TENSORPROTO.fields_by_name['dcomplex_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
# @@protoc_insertion_point(module_scope)
| true
| true
|
f709a4c39c96d3a1e8421aefec3136f85f663333
| 4,568
|
py
|
Python
|
readthedocs/search/documents.py
|
srijan-deepsource/readthedocs.org
|
ec45216d9ce946a486ef472a8ae3e243742d3aed
|
[
"MIT"
] | null | null | null |
readthedocs/search/documents.py
|
srijan-deepsource/readthedocs.org
|
ec45216d9ce946a486ef472a8ae3e243742d3aed
|
[
"MIT"
] | null | null | null |
readthedocs/search/documents.py
|
srijan-deepsource/readthedocs.org
|
ec45216d9ce946a486ef472a8ae3e243742d3aed
|
[
"MIT"
] | 1
|
2020-09-17T08:38:30.000Z
|
2020-09-17T08:38:30.000Z
|
import logging
from django.conf import settings
from django_elasticsearch_dsl import DocType, Index, fields
from elasticsearch import Elasticsearch
from readthedocs.projects.models import HTMLFile, Project
project_conf = settings.ES_INDEXES['project']
project_index = Index(project_conf['name'])
project_index.settings(**project_conf['settings'])
page_conf = settings.ES_INDEXES['page']
page_index = Index(page_conf['name'])
page_index.settings(**page_conf['settings'])
log = logging.getLogger(__name__)
class RTDDocTypeMixin:
def update(self, *args, **kwargs):
# Hack a fix to our broken connection pooling
# This creates a new connection on every request,
# but actually works :)
log.info('Hacking Elastic indexing to fix connection pooling')
self.using = Elasticsearch(**settings.ELASTICSEARCH_DSL['default'])
super().update(*args, **kwargs)
@project_index.doc_type
class ProjectDocument(RTDDocTypeMixin, DocType):
# Metadata
url = fields.TextField(attr='get_absolute_url')
users = fields.NestedField(
properties={
'username': fields.TextField(),
'id': fields.IntegerField(),
}
)
language = fields.KeywordField()
modified_model_field = 'modified_date'
class Meta:
model = Project
fields = ('name', 'slug', 'description')
ignore_signals = True
@page_index.doc_type
class PageDocument(RTDDocTypeMixin, DocType):
# Metadata
project = fields.KeywordField(attr='project.slug')
version = fields.KeywordField(attr='version.slug')
path = fields.KeywordField(attr='processed_json.path')
full_path = fields.KeywordField(attr='path')
rank = fields.IntegerField()
# Searchable content
title = fields.TextField(attr='processed_json.title')
sections = fields.NestedField(
attr='processed_json.sections',
properties={
'id': fields.KeywordField(),
'title': fields.TextField(),
'content': fields.TextField(),
}
)
domains = fields.NestedField(
properties={
'role_name': fields.KeywordField(),
# For linking to the URL
'anchor': fields.KeywordField(),
# For showing in the search result
'type_display': fields.TextField(),
'docstrings': fields.TextField(),
# Simple analyzer breaks on `.`,
# otherwise search results are too strict for this use case
'name': fields.TextField(analyzer='simple'),
}
)
modified_model_field = 'modified_date'
class Meta:
model = HTMLFile
fields = ('commit', 'build')
ignore_signals = True
def prepare_rank(self, html_file):
if not (-10 <= html_file.rank <= 10):
return 0
return html_file.rank
def prepare_domains(self, html_file):
"""Prepares and returns the values for domains field."""
if not html_file.version.is_sphinx_type:
return []
all_domains = []
try:
domains_qs = html_file.sphinx_domains.exclude(
domain='std',
type__in=['doc', 'label']
).iterator()
all_domains = [
{
'role_name': domain.role_name,
'anchor': domain.anchor,
'type_display': domain.type_display,
'docstrings': html_file.processed_json.get(
'domain_data', {}
).get(domain.anchor, ''),
'name': domain.name,
}
for domain in domains_qs
]
log.debug(
"[%s] [%s] Total domains for file %s are: %s",
html_file.project.slug,
html_file.version.slug,
html_file.path,
len(all_domains)
)
except Exception:
log.exception(
"[%s] [%s] Error preparing domain data for file %s",
html_file.project.slug,
html_file.version.slug,
html_file.path
)
return all_domains
def get_queryset(self):
"""
Ignore certain files from indexing.
- Files from external versions
- Ignored files
"""
queryset = super().get_queryset()
queryset = (
queryset
.internal()
.exclude(ignore=True)
)
return queryset
| 28.911392
| 75
| 0.577496
|
import logging
from django.conf import settings
from django_elasticsearch_dsl import DocType, Index, fields
from elasticsearch import Elasticsearch
from readthedocs.projects.models import HTMLFile, Project
project_conf = settings.ES_INDEXES['project']
project_index = Index(project_conf['name'])
project_index.settings(**project_conf['settings'])
page_conf = settings.ES_INDEXES['page']
page_index = Index(page_conf['name'])
page_index.settings(**page_conf['settings'])
log = logging.getLogger(__name__)
class RTDDocTypeMixin:
def update(self, *args, **kwargs):
log.info('Hacking Elastic indexing to fix connection pooling')
self.using = Elasticsearch(**settings.ELASTICSEARCH_DSL['default'])
super().update(*args, **kwargs)
@project_index.doc_type
class ProjectDocument(RTDDocTypeMixin, DocType):
url = fields.TextField(attr='get_absolute_url')
users = fields.NestedField(
properties={
'username': fields.TextField(),
'id': fields.IntegerField(),
}
)
language = fields.KeywordField()
modified_model_field = 'modified_date'
class Meta:
model = Project
fields = ('name', 'slug', 'description')
ignore_signals = True
@page_index.doc_type
class PageDocument(RTDDocTypeMixin, DocType):
project = fields.KeywordField(attr='project.slug')
version = fields.KeywordField(attr='version.slug')
path = fields.KeywordField(attr='processed_json.path')
full_path = fields.KeywordField(attr='path')
rank = fields.IntegerField()
title = fields.TextField(attr='processed_json.title')
sections = fields.NestedField(
attr='processed_json.sections',
properties={
'id': fields.KeywordField(),
'title': fields.TextField(),
'content': fields.TextField(),
}
)
domains = fields.NestedField(
properties={
'role_name': fields.KeywordField(),
'anchor': fields.KeywordField(),
'type_display': fields.TextField(),
'docstrings': fields.TextField(),
'name': fields.TextField(analyzer='simple'),
}
)
modified_model_field = 'modified_date'
class Meta:
model = HTMLFile
fields = ('commit', 'build')
ignore_signals = True
def prepare_rank(self, html_file):
if not (-10 <= html_file.rank <= 10):
return 0
return html_file.rank
def prepare_domains(self, html_file):
if not html_file.version.is_sphinx_type:
return []
all_domains = []
try:
domains_qs = html_file.sphinx_domains.exclude(
domain='std',
type__in=['doc', 'label']
).iterator()
all_domains = [
{
'role_name': domain.role_name,
'anchor': domain.anchor,
'type_display': domain.type_display,
'docstrings': html_file.processed_json.get(
'domain_data', {}
).get(domain.anchor, ''),
'name': domain.name,
}
for domain in domains_qs
]
log.debug(
"[%s] [%s] Total domains for file %s are: %s",
html_file.project.slug,
html_file.version.slug,
html_file.path,
len(all_domains)
)
except Exception:
log.exception(
"[%s] [%s] Error preparing domain data for file %s",
html_file.project.slug,
html_file.version.slug,
html_file.path
)
return all_domains
def get_queryset(self):
queryset = super().get_queryset()
queryset = (
queryset
.internal()
.exclude(ignore=True)
)
return queryset
| true
| true
|
f709a519386f92fbdb79f8035b8677fa2a7251b5
| 2,773
|
py
|
Python
|
pdm/cli/commands/show.py
|
julie777/pdm
|
a6029ca02105d79da4841c701edf73f7315f74eb
|
[
"MIT"
] | 1
|
2022-03-02T19:43:46.000Z
|
2022-03-02T19:43:46.000Z
|
pdm/cli/commands/show.py
|
julie777/pdm
|
a6029ca02105d79da4841c701edf73f7315f74eb
|
[
"MIT"
] | 1
|
2022-03-20T07:36:27.000Z
|
2022-03-20T07:36:27.000Z
|
pdm/cli/commands/show.py
|
julie777/pdm
|
a6029ca02105d79da4841c701edf73f7315f74eb
|
[
"MIT"
] | null | null | null |
import argparse
from packaging.version import Version
from pdm import termui
from pdm.cli.commands.base import BaseCommand
from pdm.exceptions import PdmUsageError
from pdm.models.candidates import Candidate
from pdm.models.project_info import ProjectInfo
from pdm.models.requirements import parse_requirement
from pdm.project import Project
from pdm.utils import normalize_name
def filter_stable(candidate: Candidate) -> bool:
assert candidate.version
return not Version(candidate.version).is_prerelease
class Command(BaseCommand):
"""Show the package information"""
metadata_keys = ["name", "version", "summary", "license", "platform", "keywords"]
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"package",
type=normalize_name,
nargs=argparse.OPTIONAL,
help="Specify the package name, or show this package if not given",
)
for option in self.metadata_keys:
parser.add_argument(
f"--{option}", action="store_true", help=f"Show {option}"
)
def handle(self, project: Project, options: argparse.Namespace) -> None:
package = options.package
if package:
req = parse_requirement(package)
repository = project.get_repository()
# reverse the result so that latest is at first.
matches = repository.find_candidates(req, True, True)
latest = next(iter(matches), None)
if not latest:
project.core.ui.echo(
termui.yellow(f"No match found for the package {package!r}"),
err=True,
)
return
latest_stable = next(filter(filter_stable, matches), None)
metadata = latest.prepare(project.environment).metadata
else:
if not project.meta.name:
raise PdmUsageError("This project is not a package")
metadata = project.meta
package = normalize_name(metadata.name)
latest_stable = None
assert metadata
project_info = ProjectInfo(metadata)
if any(getattr(options, key, None) for key in self.metadata_keys):
for key in self.metadata_keys:
if getattr(options, key, None):
project.core.ui.echo(project_info[key])
return
installed = project.environment.get_working_set().get(package)
if latest_stable:
project_info.latest_stable_version = str(latest_stable.version)
if installed:
project_info.installed_version = str(installed.version)
project.core.ui.display_columns(list(project_info.generate_rows()))
| 37.986301
| 85
| 0.639019
|
import argparse
from packaging.version import Version
from pdm import termui
from pdm.cli.commands.base import BaseCommand
from pdm.exceptions import PdmUsageError
from pdm.models.candidates import Candidate
from pdm.models.project_info import ProjectInfo
from pdm.models.requirements import parse_requirement
from pdm.project import Project
from pdm.utils import normalize_name
def filter_stable(candidate: Candidate) -> bool:
assert candidate.version
return not Version(candidate.version).is_prerelease
class Command(BaseCommand):
metadata_keys = ["name", "version", "summary", "license", "platform", "keywords"]
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"package",
type=normalize_name,
nargs=argparse.OPTIONAL,
help="Specify the package name, or show this package if not given",
)
for option in self.metadata_keys:
parser.add_argument(
f"--{option}", action="store_true", help=f"Show {option}"
)
def handle(self, project: Project, options: argparse.Namespace) -> None:
package = options.package
if package:
req = parse_requirement(package)
repository = project.get_repository()
matches = repository.find_candidates(req, True, True)
latest = next(iter(matches), None)
if not latest:
project.core.ui.echo(
termui.yellow(f"No match found for the package {package!r}"),
err=True,
)
return
latest_stable = next(filter(filter_stable, matches), None)
metadata = latest.prepare(project.environment).metadata
else:
if not project.meta.name:
raise PdmUsageError("This project is not a package")
metadata = project.meta
package = normalize_name(metadata.name)
latest_stable = None
assert metadata
project_info = ProjectInfo(metadata)
if any(getattr(options, key, None) for key in self.metadata_keys):
for key in self.metadata_keys:
if getattr(options, key, None):
project.core.ui.echo(project_info[key])
return
installed = project.environment.get_working_set().get(package)
if latest_stable:
project_info.latest_stable_version = str(latest_stable.version)
if installed:
project_info.installed_version = str(installed.version)
project.core.ui.display_columns(list(project_info.generate_rows()))
| true
| true
|
f709a58c0695a29b53bac9a2a62d67edf3e465a0
| 124
|
py
|
Python
|
thirdweb/types/collection/__init__.py
|
princetonwong/python-sdk
|
f35181d97620e29d055498fca75f3702f3bb2449
|
[
"Apache-2.0"
] | 1
|
2022-02-18T16:59:12.000Z
|
2022-02-18T16:59:12.000Z
|
thirdweb/types/collection/__init__.py
|
princetonwong/python-sdk
|
f35181d97620e29d055498fca75f3702f3bb2449
|
[
"Apache-2.0"
] | null | null | null |
thirdweb/types/collection/__init__.py
|
princetonwong/python-sdk
|
f35181d97620e29d055498fca75f3702f3bb2449
|
[
"Apache-2.0"
] | null | null | null |
"""
Deprecated. Use types.bundle instead.
"""
from .types import CreateCollectionArg, CollectionMetadata, MintCollectionArg
| 24.8
| 77
| 0.806452
|
from .types import CreateCollectionArg, CollectionMetadata, MintCollectionArg
| true
| true
|
f709a61da35f0ca43aa59d98bda805127bea4373
| 622
|
py
|
Python
|
examples/dataversity.py
|
ettoreleandrotognoli/etto-robot
|
602b6c00ac925ccdbf33e60f06feb5835c246d31
|
[
"Apache-2.0"
] | null | null | null |
examples/dataversity.py
|
ettoreleandrotognoli/etto-robot
|
602b6c00ac925ccdbf33e60f06feb5835c246d31
|
[
"Apache-2.0"
] | 6
|
2020-12-17T10:19:15.000Z
|
2021-03-31T23:23:19.000Z
|
examples/dataversity.py
|
ettoreleandrotognoli/etto-robot
|
602b6c00ac925ccdbf33e60f06feb5835c246d31
|
[
"Apache-2.0"
] | 1
|
2021-08-30T20:38:00.000Z
|
2021-08-30T20:38:00.000Z
|
from robot import Robot
from robot.collector.shortcut import *
collector = pipe(
const('http://www.dataversity.net/category/education/daily-data/'),
get(),
css('#primary article'),
foreach(dict(
pipe(
css('a[href]'), attr('href'), any(), url(),
get(),
dict(
body=pipe(css('.entry-content p'), as_text())
)
),
title=pipe(css('.entry-title'), as_text()),
url=pipe(css('a[href]'), attr('href'), any(), url()),
))
)
with Robot() as robot:
result = robot.sync_run(collector)
for r in result:
print(r)
| 24.88
| 71
| 0.533762
|
from robot import Robot
from robot.collector.shortcut import *
collector = pipe(
const('http://www.dataversity.net/category/education/daily-data/'),
get(),
css('#primary article'),
foreach(dict(
pipe(
css('a[href]'), attr('href'), any(), url(),
get(),
dict(
body=pipe(css('.entry-content p'), as_text())
)
),
title=pipe(css('.entry-title'), as_text()),
url=pipe(css('a[href]'), attr('href'), any(), url()),
))
)
with Robot() as robot:
result = robot.sync_run(collector)
for r in result:
print(r)
| true
| true
|
f709a66c3ccd87f772fd79dba1cc07610dc2d391
| 216
|
py
|
Python
|
Unidad 2/Ejercicios Plataforma/Ejercicio3.py
|
angelxehg/utzac-ppy
|
fb88bcc661518bb35c08a102a67c20d0659f71db
|
[
"MIT"
] | null | null | null |
Unidad 2/Ejercicios Plataforma/Ejercicio3.py
|
angelxehg/utzac-ppy
|
fb88bcc661518bb35c08a102a67c20d0659f71db
|
[
"MIT"
] | null | null | null |
Unidad 2/Ejercicios Plataforma/Ejercicio3.py
|
angelxehg/utzac-ppy
|
fb88bcc661518bb35c08a102a67c20d0659f71db
|
[
"MIT"
] | null | null | null |
cadena = input("\33[0mIngrese la cadena a separar: \33[34m")
separador = input("\33[0mIngrese el carácter espaciador: \33[34m")[0]
print("\33[0m")
print("Resultado:\33[33m", cadena.replace(' ', separador), "\33[0m")
| 43.2
| 69
| 0.685185
|
cadena = input("\33[0mIngrese la cadena a separar: \33[34m")
separador = input("\33[0mIngrese el carácter espaciador: \33[34m")[0]
print("\33[0m")
print("Resultado:\33[33m", cadena.replace(' ', separador), "\33[0m")
| true
| true
|
f709a7666ac46a954430541cfb50b7e737579f2e
| 10,407
|
py
|
Python
|
high_order_layers_torch/FunctionalConvolution.py
|
jloveric/high-order-layers-torch
|
a50ccf0cf82c21fdda4c20c671e7d233a0b6f793
|
[
"MIT"
] | 4
|
2021-12-05T11:09:51.000Z
|
2021-12-11T20:07:37.000Z
|
high_order_layers_torch/FunctionalConvolution.py
|
jloveric/high-order-layers-torch
|
a50ccf0cf82c21fdda4c20c671e7d233a0b6f793
|
[
"MIT"
] | 1
|
2022-03-12T01:03:58.000Z
|
2022-03-12T01:03:58.000Z
|
high_order_layers_torch/FunctionalConvolution.py
|
jloveric/high-order-layers-torch
|
a50ccf0cf82c21fdda4c20c671e7d233a0b6f793
|
[
"MIT"
] | null | null | null |
from .LagrangePolynomial import LagrangeExpand
from pytorch_lightning import LightningModule, Trainer
from high_order_layers_torch.PolynomialLayers import *
from torch.nn import Conv2d
import torch.nn as nn
import torch
from .utils import *
def conv2d_wrapper(
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
padding_mode: str = 'zeros',
weight_magnitude: float = 1.0,
rescale_output: bool = False,
verbose: bool = False,
** kwargs
):
"""
Inputs need to be an exact clone of those in torch conv2d including
defaults. Function allows you to pass extra arguments without braking
conv2d.
"""
conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
# Bias should always be false as the bias is already included in these methods.
bias=False,
padding_mode=padding_mode,
)
in_features = in_channels*kernel_size*kernel_size
if verbose is True:
print('in_channels', in_channels, 'out_channels', out_channels)
print('conv.weight.shape', conv.weight.shape)
# We don't want to use the standard conv initialization
# since this is a bit different.
if rescale_output is False:
conv.weight.data.uniform_(-weight_magnitude/in_features,
weight_magnitude/in_features)
elif rescale_output is True:
conv.weight.data.uniform_(-weight_magnitude, weight_magnitude)
else:
print('Using kaiming for weight initialization')
return conv
class Expansion2d(nn.Module):
def __init__(self, basis=None):
"""
Expand an input by a function defined by basis.
Args :
- basis: function to expand input by.
"""
super().__init__()
if basis == None:
raise Exception(
'You must define the basis function in ExpansionLayer2D')
self.basis = basis
def build(self, input_shape):
pass
def __call__(self, inputs):
"""
Expand input
Args :
inputs : Tensor of shape [batches, channels, height, width]
Return :
Tensor of shape [batches, channels*(basis size), height, width]
"""
res = self.basis(
inputs) # outputs [basis_size, batches, channels, height, width]
res = res.permute(1, 3, 4, 2, 0)
res = torch.reshape(
res, [res.shape[0], res.shape[1],
res.shape[2], res.shape[3]*res.shape[4]]
)
res = res.permute(0, 3, 1, 2)
return res
class Expansion1d(nn.Module):
def __init__(self, basis=None):
"""
Expand an input by a function defined by basis.
Args :
- basis: function to expand input by.
"""
super().__init__()
if basis == None:
raise Exception(
'You must define the basis function in ExpansionLayer2D')
self.basis = basis
def build(self, input_shape):
pass
def __call__(self, inputs):
"""
Expand input
Args :
inputs : Tensor of shape [batches, channels, width]
Return :
Tensor of shape [batches, channels*(basis size), width]
"""
res = self.basis(
inputs) # outputs [basis_size, batches, channels, width]
res = res.permute(1, 3, 2, 0)
res = torch.reshape(
res, [res.shape[0], res.shape[1], res.shape[2]*res.shape[3]]
)
res = res.permute(0, 2, 1) # batches, basis_size*channels, width
return res
class FourierConvolution2d(nn.Module):
def __init__(self, n: int, in_channels: int, kernel_size: int, length: float = 2.0, rescale_output=False, *args, **kwargs):
"""
Fourier series convolutional layer.
Args :
- n : number of fourier series components. n=1 is a constant, n=3 contains both first sin an consine components.
- in_channels : number of input channels
- kernel_size : size of the kernel
- length : Range of the polynomial interpolation points. length = 2 implies [-1, 1] so the interpolation points
are in that range. Anything outside that range could grow.
- rescale_output: If rescale output is True then the output is divided by the number of inputs for each output,
in effect taking the average. This is generally not necessary for the fourier series.
"""
super().__init__()
self.poly = Expansion2d(FourierExpand(n, length))
self._channels = n*in_channels
self.conv = conv2d_wrapper(in_channels=self._channels,
kernel_size=kernel_size, **kwargs)
self._total_in = in_channels*kernel_size*kernel_size
self._rescale = 1.0
if rescale_output is True:
self._rescale = 1.0/self._total_in
def forward(self, x):
x = self.poly(x)
out = self.conv(x)
return out*self._rescale
class PolynomialConvolution2d(nn.Module):
def __init__(self, n: int, in_channels: int, kernel_size: int, length: float = 2.0, rescale_output=False, periodicity: float = None, *args, **kwargs):
"""
Polynomial convolutional layer.
Args :
- n : number of weights or nodes. Polynomial order is n-1 so quadratic would be n=3.
- in_channels : number of input channels
- kernel_size : size of the kernel
- length : Range of the polynomial interpolation points. length = 2 implies [-1, 1] so the interpolation points
are in that range. Anything outside that range could grow.
- rescale_output: If rescale output is True then the output is divided by the number of inputs for each output,
in effect taking the average.
"""
super().__init__()
self.poly = Expansion2d(LagrangeExpand(n, length=length))
self._channels = n*in_channels
self.periodicity = periodicity
self.conv = conv2d_wrapper(in_channels=self._channels,
kernel_size=kernel_size, **kwargs)
self._total_in = in_channels*kernel_size*kernel_size
self._rescale = 1.0
if rescale_output is True:
self._rescale = 1.0/self._total_in
def forward(self, x):
periodicity = self.periodicity
if periodicity is not None:
x = make_periodic(x, periodicity)
x = self.poly(x)
out = self.conv(x)
return out*self._rescale
class PiecewisePolynomialConvolution2d(nn.Module):
def __init__(self, n: int, segments: int, in_channels: int, kernel_size: int, length: float = 2.0, rescale_output: bool = False, periodicity: float = None, *args, **kwargs):
"""
Piecewise continuous polynomial convolutional layer. The boundary between each polynomial are continuous.
Args :
- n : number of weights or nodes. Polynomial order is n-1 so quadratic would be n=3.
- segments: The number of segments in the piecewise polynomial.
- in_channels : number of input channels
- kernel_size : size of the kernel
- length : Range of the piecewise polynomial interpolation points. length = 2 implies [-1, 1] so the interpolation points
are in that range.
- rescale_output: If rescale output is True then the output is divided by the number of inputs for each output,
in effect taking the average.
"""
super().__init__()
self.poly = Expansion2d(
PiecewisePolynomialExpand(n=n, segments=segments, length=length))
self._channels = ((n-1)*segments+1)*in_channels
self.periodicity = periodicity
self.conv = conv2d_wrapper(in_channels=self._channels,
kernel_size=kernel_size, **kwargs)
self._total_in = in_channels*kernel_size*kernel_size
self._rescale = 1.0
if rescale_output is True:
self._rescale = 1.0/self._total_in
def forward(self, x):
periodicity = self.periodicity
if periodicity is not None:
x = make_periodic(x, periodicity)
x = self.poly(x)
out = self.conv(x)
return out*self._rescale
class PiecewiseDiscontinuousPolynomialConvolution2d(nn.Module):
def __init__(self, n: int, segments: int, in_channels: int, kernel_size: int, length: float = 2.0, rescale_output: bool = False, periodicity: float = None, *args, **kwargs):
"""
Discontinuous piecewise polynomial convolutional layer. The boundary between each polynomial can be discontinuous.
Args :
- n : number of weights or nodes. Polynomial order is n-1 so quadratic would be n=3.
- segments: The number of segments in the piecewise polynomial.
- in_channels : number of input channels
- kernel_size : size of the kernel
- length : Range of the piecewise polynomial interpolation points. length = 2 implies [-1, 1] so the interpolation points
are in that range.
- rescale_output: If rescale output is True then the output is divided by the number of inputs for each output,
in effect taking the average.
"""
super().__init__()
self.poly = Expansion2d(
PiecewiseDiscontinuousPolynomialExpand(n=n, segments=segments, length=length))
self._channels = n*segments*in_channels
self.periodicity = periodicity
self.conv = conv2d_wrapper(in_channels=self._channels,
kernel_size=kernel_size, **kwargs)
self._total_in = in_channels*kernel_size*kernel_size
self._rescale = 1.0
if rescale_output is True:
self._rescale = 1.0/self._total_in
def forward(self, x):
periodicity = self.periodicity
if periodicity is not None:
x = make_periodic(x, periodicity)
x = self.poly(x)
out = self.conv(x)
return out*self._rescale
| 38.977528
| 178
| 0.618142
|
from .LagrangePolynomial import LagrangeExpand
from pytorch_lightning import LightningModule, Trainer
from high_order_layers_torch.PolynomialLayers import *
from torch.nn import Conv2d
import torch.nn as nn
import torch
from .utils import *
def conv2d_wrapper(
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
padding_mode: str = 'zeros',
weight_magnitude: float = 1.0,
rescale_output: bool = False,
verbose: bool = False,
** kwargs
):
conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=False,
padding_mode=padding_mode,
)
in_features = in_channels*kernel_size*kernel_size
if verbose is True:
print('in_channels', in_channels, 'out_channels', out_channels)
print('conv.weight.shape', conv.weight.shape)
# since this is a bit different.
if rescale_output is False:
conv.weight.data.uniform_(-weight_magnitude/in_features,
weight_magnitude/in_features)
elif rescale_output is True:
conv.weight.data.uniform_(-weight_magnitude, weight_magnitude)
else:
print('Using kaiming for weight initialization')
return conv
class Expansion2d(nn.Module):
def __init__(self, basis=None):
super().__init__()
if basis == None:
raise Exception(
'You must define the basis function in ExpansionLayer2D')
self.basis = basis
def build(self, input_shape):
pass
def __call__(self, inputs):
res = self.basis(
inputs) # outputs [basis_size, batches, channels, height, width]
res = res.permute(1, 3, 4, 2, 0)
res = torch.reshape(
res, [res.shape[0], res.shape[1],
res.shape[2], res.shape[3]*res.shape[4]]
)
res = res.permute(0, 3, 1, 2)
return res
class Expansion1d(nn.Module):
def __init__(self, basis=None):
super().__init__()
if basis == None:
raise Exception(
'You must define the basis function in ExpansionLayer2D')
self.basis = basis
def build(self, input_shape):
pass
def __call__(self, inputs):
res = self.basis(
inputs) # outputs [basis_size, batches, channels, width]
res = res.permute(1, 3, 2, 0)
res = torch.reshape(
res, [res.shape[0], res.shape[1], res.shape[2]*res.shape[3]]
)
res = res.permute(0, 2, 1) # batches, basis_size*channels, width
return res
class FourierConvolution2d(nn.Module):
def __init__(self, n: int, in_channels: int, kernel_size: int, length: float = 2.0, rescale_output=False, *args, **kwargs):
super().__init__()
self.poly = Expansion2d(FourierExpand(n, length))
self._channels = n*in_channels
self.conv = conv2d_wrapper(in_channels=self._channels,
kernel_size=kernel_size, **kwargs)
self._total_in = in_channels*kernel_size*kernel_size
self._rescale = 1.0
if rescale_output is True:
self._rescale = 1.0/self._total_in
def forward(self, x):
x = self.poly(x)
out = self.conv(x)
return out*self._rescale
class PolynomialConvolution2d(nn.Module):
def __init__(self, n: int, in_channels: int, kernel_size: int, length: float = 2.0, rescale_output=False, periodicity: float = None, *args, **kwargs):
super().__init__()
self.poly = Expansion2d(LagrangeExpand(n, length=length))
self._channels = n*in_channels
self.periodicity = periodicity
self.conv = conv2d_wrapper(in_channels=self._channels,
kernel_size=kernel_size, **kwargs)
self._total_in = in_channels*kernel_size*kernel_size
self._rescale = 1.0
if rescale_output is True:
self._rescale = 1.0/self._total_in
def forward(self, x):
periodicity = self.periodicity
if periodicity is not None:
x = make_periodic(x, periodicity)
x = self.poly(x)
out = self.conv(x)
return out*self._rescale
class PiecewisePolynomialConvolution2d(nn.Module):
def __init__(self, n: int, segments: int, in_channels: int, kernel_size: int, length: float = 2.0, rescale_output: bool = False, periodicity: float = None, *args, **kwargs):
super().__init__()
self.poly = Expansion2d(
PiecewisePolynomialExpand(n=n, segments=segments, length=length))
self._channels = ((n-1)*segments+1)*in_channels
self.periodicity = periodicity
self.conv = conv2d_wrapper(in_channels=self._channels,
kernel_size=kernel_size, **kwargs)
self._total_in = in_channels*kernel_size*kernel_size
self._rescale = 1.0
if rescale_output is True:
self._rescale = 1.0/self._total_in
def forward(self, x):
periodicity = self.periodicity
if periodicity is not None:
x = make_periodic(x, periodicity)
x = self.poly(x)
out = self.conv(x)
return out*self._rescale
class PiecewiseDiscontinuousPolynomialConvolution2d(nn.Module):
def __init__(self, n: int, segments: int, in_channels: int, kernel_size: int, length: float = 2.0, rescale_output: bool = False, periodicity: float = None, *args, **kwargs):
super().__init__()
self.poly = Expansion2d(
PiecewiseDiscontinuousPolynomialExpand(n=n, segments=segments, length=length))
self._channels = n*segments*in_channels
self.periodicity = periodicity
self.conv = conv2d_wrapper(in_channels=self._channels,
kernel_size=kernel_size, **kwargs)
self._total_in = in_channels*kernel_size*kernel_size
self._rescale = 1.0
if rescale_output is True:
self._rescale = 1.0/self._total_in
def forward(self, x):
periodicity = self.periodicity
if periodicity is not None:
x = make_periodic(x, periodicity)
x = self.poly(x)
out = self.conv(x)
return out*self._rescale
| true
| true
|
f709a78792f34be38d389105354669425719c2f6
| 459
|
py
|
Python
|
packages/python/plotly/plotly/validators/parcats/line/colorbar/title/font/_color.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/parcats/line/colorbar/title/font/_color.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/parcats/line/colorbar/title/font/_color.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="parcats.line.colorbar.title.font",
**kwargs,
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
| 27
| 66
| 0.623094
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="parcats.line.colorbar.title.font",
**kwargs,
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
| true
| true
|
f709a7abeea7c161af86f8de2d2b4ca3e795964c
| 3,261
|
py
|
Python
|
python/ccxt/test/test_trade.py
|
DavidFelsen/ccxt
|
6497b7da13d2ea3a9e56207b46b8691938b07839
|
[
"MIT"
] | 500
|
2017-06-28T14:43:21.000Z
|
2022-03-11T14:19:00.000Z
|
python/ccxt/test/test_trade.py
|
DavidFelsen/ccxt
|
6497b7da13d2ea3a9e56207b46b8691938b07839
|
[
"MIT"
] | 214
|
2017-07-03T17:39:26.000Z
|
2017-09-20T02:41:20.000Z
|
python/ccxt/test/test_trade.py
|
DavidFelsen/ccxt
|
6497b7da13d2ea3a9e56207b46b8691938b07839
|
[
"MIT"
] | 127
|
2017-06-30T05:49:24.000Z
|
2021-11-05T22:39:56.000Z
|
import numbers # noqa: E402
try:
basestring # basestring was removed in Python 3
except NameError:
basestring = str
def test_trade(exchange, trade, symbol, now):
assert trade
sampleTrade = {
'info': {'a': 1, 'b': 2, 'c': 3}, # the original decoded JSON as is
'id': '12345-67890:09876/54321', # string trade id
'timestamp': 1502962946216, # Unix timestamp in milliseconds
'datetime': '2017-08-17 12:42:48.000', # ISO8601 datetime with milliseconds
'symbol': 'ETH/BTC', # symbol
'order': '12345-67890:09876/54321', # string order id or None/None/null
'type': 'limit', # order type, 'market', 'limit' or None/None/null
'side': 'buy', # direction of the trade, 'buy' or 'sell'
'takerOrMaker': 'taker', # string, 'taker' or 'maker'
'price': 0.06917684, # float price in quote currency
'amount': 1.5, # amount of base currency
'cost': 0.10376526, # total cost(including fees), `price * amount`
}
keys = list(sampleTrade.keys())
for i in range(0, len(keys)):
key = keys[i]
assert key in trade
fee = trade['fee'] if ('fee' in trade) else None
fees = trade['fees'] if ('fees' in trade) else None
# logical XOR
if fee or fees:
assert not (fee and fees)
if fee:
assert('cost' in fee) and ('currency' in fee)
if fees:
assert isinstance(fees, list)
for i in range(0, len(fees)):
fee = fees[i]
assert('cost' in fee) and ('currency' in fee)
id = trade['id']
assert(id is None) or (isinstance(id, basestring))
timestamp = trade['timestamp']
assert isinstance(timestamp, numbers.Real) or timestamp is None
if timestamp:
assert timestamp > 1230940800000 # 03 Jan 2009 - first block
assert timestamp < 2147483648000 # 19 Jan 2038 - int32 overflows
adjustedNow = now + 60000
assert timestamp < adjustedNow, 'trade.timestamp is greater than or equal to current time: trade: ' + exchange.iso8601(timestamp) + ' now: ' + exchange.iso8601(now)
assert trade['datetime'] == exchange.iso8601(timestamp)
assert trade['symbol'] == symbol, 'trade symbol is not equal to requested symbol: trade: ' + trade['symbol'] + ' requested: ' + symbol
assert trade['type'] is None or isinstance(trade['type'], basestring)
assert trade['side'] is None or trade['side'] == 'buy' or trade['side'] == 'sell', 'unexpected trade side ' + trade['side']
assert trade['order'] is None or isinstance(trade['order'], basestring)
assert isinstance(trade['price'], numbers.Real), 'trade.price is not a number'
assert trade['price'] > 0
assert isinstance(trade['amount'], numbers.Real), 'trade.amount is not a number'
assert trade['amount'] >= 0
assert trade['cost'] is None or isinstance(trade['cost'], numbers.Real), 'trade.cost is not a number'
assert trade['cost'] is None or trade['cost'] >= 0
takerOrMaker = trade['takerOrMaker']
assert takerOrMaker is None or takerOrMaker == 'taker' or takerOrMaker == 'maker'
| 48.671642
| 172
| 0.601349
|
import numbers
try:
basestring
except NameError:
basestring = str
def test_trade(exchange, trade, symbol, now):
assert trade
sampleTrade = {
'info': {'a': 1, 'b': 2, 'c': 3},
'id': '12345-67890:09876/54321',
'timestamp': 1502962946216,
'datetime': '2017-08-17 12:42:48.000',
'symbol': 'ETH/BTC',
'order': '12345-67890:09876/54321',
'type': 'limit',
'side': 'buy',
'takerOrMaker': 'taker',
'price': 0.06917684,
'amount': 1.5,
'cost': 0.10376526,
}
keys = list(sampleTrade.keys())
for i in range(0, len(keys)):
key = keys[i]
assert key in trade
fee = trade['fee'] if ('fee' in trade) else None
fees = trade['fees'] if ('fees' in trade) else None
if fee or fees:
assert not (fee and fees)
if fee:
assert('cost' in fee) and ('currency' in fee)
if fees:
assert isinstance(fees, list)
for i in range(0, len(fees)):
fee = fees[i]
assert('cost' in fee) and ('currency' in fee)
id = trade['id']
assert(id is None) or (isinstance(id, basestring))
timestamp = trade['timestamp']
assert isinstance(timestamp, numbers.Real) or timestamp is None
if timestamp:
assert timestamp > 1230940800000
assert timestamp < 2147483648000
adjustedNow = now + 60000
assert timestamp < adjustedNow, 'trade.timestamp is greater than or equal to current time: trade: ' + exchange.iso8601(timestamp) + ' now: ' + exchange.iso8601(now)
assert trade['datetime'] == exchange.iso8601(timestamp)
assert trade['symbol'] == symbol, 'trade symbol is not equal to requested symbol: trade: ' + trade['symbol'] + ' requested: ' + symbol
assert trade['type'] is None or isinstance(trade['type'], basestring)
assert trade['side'] is None or trade['side'] == 'buy' or trade['side'] == 'sell', 'unexpected trade side ' + trade['side']
assert trade['order'] is None or isinstance(trade['order'], basestring)
assert isinstance(trade['price'], numbers.Real), 'trade.price is not a number'
assert trade['price'] > 0
assert isinstance(trade['amount'], numbers.Real), 'trade.amount is not a number'
assert trade['amount'] >= 0
assert trade['cost'] is None or isinstance(trade['cost'], numbers.Real), 'trade.cost is not a number'
assert trade['cost'] is None or trade['cost'] >= 0
takerOrMaker = trade['takerOrMaker']
assert takerOrMaker is None or takerOrMaker == 'taker' or takerOrMaker == 'maker'
| true
| true
|
f709a7ecd6bb9ade98ca43b8a364ed1073609efa
| 322
|
py
|
Python
|
simple_app.py
|
lykius/hesiod
|
091ba1b06cfa870133415fc1df6efdd8e50a2cfe
|
[
"MIT"
] | 19
|
2020-12-11T15:40:55.000Z
|
2022-01-17T16:55:13.000Z
|
simple_app.py
|
lykius/hesiod
|
091ba1b06cfa870133415fc1df6efdd8e50a2cfe
|
[
"MIT"
] | null | null | null |
simple_app.py
|
lykius/hesiod
|
091ba1b06cfa870133415fc1df6efdd8e50a2cfe
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from pprint import pprint
from hesiod import get_cfg_copy, hmain
template_file = Path("tests/configs/templates/complex.yaml")
base_cfg_dir = Path("tests/configs/bases")
@hmain(base_cfg_dir, template_cfg_file=template_file)
def test() -> None:
cfg = get_cfg_copy()
pprint(cfg)
test()
| 18.941176
| 60
| 0.757764
|
from pathlib import Path
from pprint import pprint
from hesiod import get_cfg_copy, hmain
template_file = Path("tests/configs/templates/complex.yaml")
base_cfg_dir = Path("tests/configs/bases")
@hmain(base_cfg_dir, template_cfg_file=template_file)
def test() -> None:
cfg = get_cfg_copy()
pprint(cfg)
test()
| true
| true
|
f709a7eefbbbaab83c4f1985daeb1cbebd252f53
| 6,119
|
py
|
Python
|
gita/utils.py
|
CD3/gita
|
9881cf81d46a41ab05ae558e7dcc7dd846a8ce2d
|
[
"MIT"
] | null | null | null |
gita/utils.py
|
CD3/gita
|
9881cf81d46a41ab05ae558e7dcc7dd846a8ce2d
|
[
"MIT"
] | null | null | null |
gita/utils.py
|
CD3/gita
|
9881cf81d46a41ab05ae558e7dcc7dd846a8ce2d
|
[
"MIT"
] | null | null | null |
import os
import yaml
import asyncio
import platform
from functools import lru_cache
from typing import List, Dict, Coroutine, Union
from . import info
from . import common
def get_path_fname() -> str:
"""
Return the file name that stores the repo locations.
"""
root = common.get_config_dir()
return os.path.join(root, 'repo_path')
@lru_cache()
def get_repos() -> Dict[str, str]:
"""
Return a `dict` of repo name to repo absolute path
"""
path_file = get_path_fname()
repos = {}
# Each line is a repo path and repo name separated by ,
if os.path.isfile(path_file) and os.stat(path_file).st_size > 0:
with open(path_file) as f:
for line in f:
line = line.rstrip()
if not line: # blank line
continue
path, name = line.split(',')
if not is_git(path):
continue
if name not in repos:
repos[name] = path
else: # repo name collision for different paths: include parent path name
par_name = os.path.basename(os.path.dirname(path))
repos[os.path.join(par_name, name)] = path
return repos
def get_choices() -> List[Union[str, None]]:
"""
Return all repo names and an additional empty list. This is a workaround of
argparse's problem with coexisting nargs='*' and choices.
See https://utcc.utoronto.ca/~cks/space/blog/python/ArgparseNargsChoicesLimitation
and
https://bugs.python.org/issue27227
"""
repos = list(get_repos())
repos.append([])
return repos
def is_git(path: str) -> bool:
"""
Return True if the path is a git repo.
"""
# An alternative is to call `git rev-parse --is-inside-work-tree`
# I don't see why that one is better yet.
# For a regular git repo, .git is a folder, for a worktree repo, .git is a file.
# However, git submodule repo also has .git as a file.
# A more reliable way to differentiable regular and worktree repos is to
# compare the result of `git rev-parse --git-dir` and
# `git rev-parse --git-common-dir`
loc = os.path.join(path, '.git')
# TODO: we can display the worktree repos in a different font.
return os.path.exists(loc)
def rename_repo(repos: Dict[str, str], repo: str, new_name: str):
"""
Write new repo name to file
"""
path = repos[repo]
del repos[repo]
repos[new_name] = path
write_to_repo_file(repos, 'w')
def write_to_repo_file(repos: Dict[str, str], mode: str):
"""
"""
data = ''.join(f'{path},{name}\n' for name, path in repos.items())
fname = get_path_fname()
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, mode) as f:
f.write(data)
def add_repos(repos: Dict[str, str], new_paths: List[str]):
"""
Write new repo paths to file
"""
existing_paths = set(repos.values())
new_paths = set(os.path.abspath(p) for p in new_paths if is_git(p))
new_paths = new_paths - existing_paths
if new_paths:
print(f"Found {len(new_paths)} new repo(s).")
new_repos = {
os.path.basename(os.path.normpath(path)): path
for path in new_paths}
write_to_repo_file(new_repos, 'a+')
else:
print('No new repos found!')
async def run_async(repo_name: str, path: str, cmds: List[str]) -> Union[None, str]:
"""
Run `cmds` asynchronously in `path` directory. Return the `path` if
execution fails.
"""
process = await asyncio.create_subprocess_exec(
*cmds,
stdin=asyncio.subprocess.DEVNULL,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
start_new_session=True,
cwd=path)
stdout, stderr = await process.communicate()
for pipe in (stdout, stderr):
if pipe:
print(format_output(pipe.decode(), f'{repo_name}: '))
# The existence of stderr is not good indicator since git sometimes write
# to stderr even if the execution is successful, e.g. git fetch
if process.returncode != 0:
return path
def format_output(s: str, prefix: str):
"""
Prepends every line in given string with the given prefix.
"""
return ''.join([f'{prefix}{line}' for line in s.splitlines(keepends=True)])
def exec_async_tasks(tasks: List[Coroutine]) -> List[Union[None, str]]:
"""
Execute tasks asynchronously
"""
# TODO: asyncio API is nicer in python 3.7
if platform.system() == 'Windows':
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
try:
errors = loop.run_until_complete(asyncio.gather(*tasks))
finally:
loop.close()
return errors
def describe(repos: Dict[str, str]) -> str:
"""
Return the status of all repos
"""
if repos:
name_width = max(len(n) for n in repos) + 1
funcs = info.get_info_funcs()
for name in sorted(repos):
path = repos[name]
display_items = ' '.join(f(path) for f in funcs)
yield f'{name:<{name_width}}{display_items}'
def get_cmds_from_files() -> Dict[str, Dict[str, str]]:
"""
Parse delegated git commands from default config file
and custom config file.
Example return
{
'branch': {'help': 'show local branches'},
'clean': {'cmd': 'clean -dfx',
'help': 'remove all untracked files/folders'},
}
"""
# default config file
fname = os.path.join(os.path.dirname(__file__), "cmds.yml")
with open(fname, 'r') as stream:
cmds = yaml.load(stream, Loader=yaml.FullLoader)
# custom config file
root = common.get_config_dir()
fname = os.path.join(root, 'cmds.yml')
custom_cmds = {}
if os.path.isfile(fname) and os.path.getsize(fname):
with open(fname, 'r') as stream:
custom_cmds = yaml.load(stream, Loader=yaml.FullLoader)
# custom commands shadow default ones
cmds.update(custom_cmds)
return cmds
| 30.748744
| 90
| 0.618565
|
import os
import yaml
import asyncio
import platform
from functools import lru_cache
from typing import List, Dict, Coroutine, Union
from . import info
from . import common
def get_path_fname() -> str:
root = common.get_config_dir()
return os.path.join(root, 'repo_path')
@lru_cache()
def get_repos() -> Dict[str, str]:
path_file = get_path_fname()
repos = {}
if os.path.isfile(path_file) and os.stat(path_file).st_size > 0:
with open(path_file) as f:
for line in f:
line = line.rstrip()
if not line:
continue
path, name = line.split(',')
if not is_git(path):
continue
if name not in repos:
repos[name] = path
else:
par_name = os.path.basename(os.path.dirname(path))
repos[os.path.join(par_name, name)] = path
return repos
def get_choices() -> List[Union[str, None]]:
repos = list(get_repos())
repos.append([])
return repos
def is_git(path: str) -> bool:
# For a regular git repo, .git is a folder, for a worktree repo, .git is a file.
# However, git submodule repo also has .git as a file.
# A more reliable way to differentiable regular and worktree repos is to
# compare the result of `git rev-parse --git-dir` and
# `git rev-parse --git-common-dir`
loc = os.path.join(path, '.git')
# TODO: we can display the worktree repos in a different font.
return os.path.exists(loc)
def rename_repo(repos: Dict[str, str], repo: str, new_name: str):
path = repos[repo]
del repos[repo]
repos[new_name] = path
write_to_repo_file(repos, 'w')
def write_to_repo_file(repos: Dict[str, str], mode: str):
data = ''.join(f'{path},{name}\n' for name, path in repos.items())
fname = get_path_fname()
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, mode) as f:
f.write(data)
def add_repos(repos: Dict[str, str], new_paths: List[str]):
existing_paths = set(repos.values())
new_paths = set(os.path.abspath(p) for p in new_paths if is_git(p))
new_paths = new_paths - existing_paths
if new_paths:
print(f"Found {len(new_paths)} new repo(s).")
new_repos = {
os.path.basename(os.path.normpath(path)): path
for path in new_paths}
write_to_repo_file(new_repos, 'a+')
else:
print('No new repos found!')
async def run_async(repo_name: str, path: str, cmds: List[str]) -> Union[None, str]:
process = await asyncio.create_subprocess_exec(
*cmds,
stdin=asyncio.subprocess.DEVNULL,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
start_new_session=True,
cwd=path)
stdout, stderr = await process.communicate()
for pipe in (stdout, stderr):
if pipe:
print(format_output(pipe.decode(), f'{repo_name}: '))
# The existence of stderr is not good indicator since git sometimes write
# to stderr even if the execution is successful, e.g. git fetch
if process.returncode != 0:
return path
def format_output(s: str, prefix: str):
return ''.join([f'{prefix}{line}' for line in s.splitlines(keepends=True)])
def exec_async_tasks(tasks: List[Coroutine]) -> List[Union[None, str]]:
# TODO: asyncio API is nicer in python 3.7
if platform.system() == 'Windows':
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
try:
errors = loop.run_until_complete(asyncio.gather(*tasks))
finally:
loop.close()
return errors
def describe(repos: Dict[str, str]) -> str:
if repos:
name_width = max(len(n) for n in repos) + 1
funcs = info.get_info_funcs()
for name in sorted(repos):
path = repos[name]
display_items = ' '.join(f(path) for f in funcs)
yield f'{name:<{name_width}}{display_items}'
def get_cmds_from_files() -> Dict[str, Dict[str, str]]:
# default config file
fname = os.path.join(os.path.dirname(__file__), "cmds.yml")
with open(fname, 'r') as stream:
cmds = yaml.load(stream, Loader=yaml.FullLoader)
# custom config file
root = common.get_config_dir()
fname = os.path.join(root, 'cmds.yml')
custom_cmds = {}
if os.path.isfile(fname) and os.path.getsize(fname):
with open(fname, 'r') as stream:
custom_cmds = yaml.load(stream, Loader=yaml.FullLoader)
# custom commands shadow default ones
cmds.update(custom_cmds)
return cmds
| true
| true
|
f709a7fc512ab2f029c40750148094564f225011
| 4,364
|
py
|
Python
|
Speech Recognition/Jarvis.py
|
KALVS/RandomStuff
|
a347d73ee3621597c6efa731b36194d1743ef36c
|
[
"MIT"
] | null | null | null |
Speech Recognition/Jarvis.py
|
KALVS/RandomStuff
|
a347d73ee3621597c6efa731b36194d1743ef36c
|
[
"MIT"
] | null | null | null |
Speech Recognition/Jarvis.py
|
KALVS/RandomStuff
|
a347d73ee3621597c6efa731b36194d1743ef36c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Requires PyAudio and PySpeech and more.
import speech_recognition as sr
from time import ctime
import time
import os
from gtts import gTTS
import random
from pygame import mixer
from pyicloud import PyiCloudService
from datetime import date
import re
from re import findall, finditer
from urllib.request import urlopen
#iCloud stuff. You gotta add you icloud login details here.
iCloudService = PyiCloudService('icloudemail.com', 'icloudPassword')
#Speech recognition recogniser used to call recognise audio google
r = sr.Recognizer()
##A phrase used to awaken Oswald
awaken = ["Jarvis"]
awake = False
#mixer is used to play the saved audio file which is Jarvis 'speaking'
mixer.init()
##Opening phrases
welcome_phrases = ['What can I do for you?', 'What\'s up?', 'How can I be of assistance?']
greeting = random.randint(0, len(welcome_phrases)-1)
def speak(audioString):
print(audioString)
tts = gTTS(text=audioString, lang='en')
tts.save("audio.mp3")
os.system("mpg321 audio.mp3")
mixer.music.load('audio.mp3')
mixer.music.play()
def recordAudio():
# Record Audio
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
# Speech recognition using Google Speech Recognition
data = ""
try:
# Uses the default API key
# To use another API key: `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
data = r.recognize_google(audio)
print("You said: " + data)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
return data
def awakenAlarm():
# Record Audio
with sr.Microphone() as source:
audio = r.listen(source)
# Speech recognition using Google Speech Recognition
data = ""
try:
# Uses the default API key
# To use another API key: `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
speak('Processing')
data = r.recognize_google(audio)
print("You said: " + data)
for i in range(0, len(awaken)):
if awaken[i] in data:
awake = True
speak(welcome_phrases[greeting])
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
return data
def jarvis(data):
if "weather" in data:
weather = 'http://api.openweathermap.org/data/2.5/weather?q=Brisbane,AU&appid=eccbc53293f9233984b66fc892ee71fe'
weather_data = urlopen(weather).read()
weather_data = str(weather_data)
minimal_temp = findall('"temp_min":(.*),"temp_max"', weather_data)
minimal_temp = float(minimal_temp[0])
maximum_temp = findall('"temp_max":(.*)},"vis', weather_data)
maximum_temp = float(maximum_temp[0])
minimal_temp = minimal_temp - 273.15
maximum_temp = maximum_temp - 273.15
avg_temp = (minimal_temp + maximum_temp) / 2
speak(str(avg_temp))
if "events for today" in data:
from_dt = date.today()
to_dt = date.today()
iCalEvents = iCloudService.calendar.events(from_dt, to_dt)
iCalEvents = str(iCalEvents)
iCalEvent_titles = findall("'title': '(.*)', 'location", iCalEvents)
iCalEvent_location = findall("'location': (.*), 'startDate", iCalEvents)
#iCalEvent = str(iCalEvents[0])
#iCaltitle = findall("'title': '([ A-Za-z]*)'", iCalEvent)
print(iCalEvents)
for i in iCalEvent_titles:
print(iCalEvent_titles)
print(iCalEvent_location)
if "how are you" in data:
speak("I am fine")
if "what time is it" in data:
speak(ctime())
if "where is" in data:
data = data.split(" ")
location = data[2]
speak("Hold on Frank, I will show you where " + location + " is.")
os.system("chromium-browser https://www.google.nl/maps/place/" + location + "/&")
# initialization
#while(awake == False):
# data = awakenAlarm()
while 1:
data = recordAudio()
jarvis(data)
| 31.623188
| 119
| 0.656279
|
import speech_recognition as sr
from time import ctime
import time
import os
from gtts import gTTS
import random
from pygame import mixer
from pyicloud import PyiCloudService
from datetime import date
import re
from re import findall, finditer
from urllib.request import urlopen
iCloudService = PyiCloudService('icloudemail.com', 'icloudPassword')
r = sr.Recognizer()
lse
mixer.init()
= ['What can I do for you?', 'What\'s up?', 'How can I be of assistance?']
greeting = random.randint(0, len(welcome_phrases)-1)
def speak(audioString):
print(audioString)
tts = gTTS(text=audioString, lang='en')
tts.save("audio.mp3")
os.system("mpg321 audio.mp3")
mixer.music.load('audio.mp3')
mixer.music.play()
def recordAudio():
# Record Audio
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
# Speech recognition using Google Speech Recognition
data = ""
try:
# Uses the default API key
# To use another API key: `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
data = r.recognize_google(audio)
print("You said: " + data)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
return data
def awakenAlarm():
# Record Audio
with sr.Microphone() as source:
audio = r.listen(source)
# Speech recognition using Google Speech Recognition
data = ""
try:
# Uses the default API key
# To use another API key: `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
speak('Processing')
data = r.recognize_google(audio)
print("You said: " + data)
for i in range(0, len(awaken)):
if awaken[i] in data:
awake = True
speak(welcome_phrases[greeting])
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
return data
def jarvis(data):
if "weather" in data:
weather = 'http://api.openweathermap.org/data/2.5/weather?q=Brisbane,AU&appid=eccbc53293f9233984b66fc892ee71fe'
weather_data = urlopen(weather).read()
weather_data = str(weather_data)
minimal_temp = findall('"temp_min":(.*),"temp_max"', weather_data)
minimal_temp = float(minimal_temp[0])
maximum_temp = findall('"temp_max":(.*)},"vis', weather_data)
maximum_temp = float(maximum_temp[0])
minimal_temp = minimal_temp - 273.15
maximum_temp = maximum_temp - 273.15
avg_temp = (minimal_temp + maximum_temp) / 2
speak(str(avg_temp))
if "events for today" in data:
from_dt = date.today()
to_dt = date.today()
iCalEvents = iCloudService.calendar.events(from_dt, to_dt)
iCalEvents = str(iCalEvents)
iCalEvent_titles = findall("'title': '(.*)', 'location", iCalEvents)
iCalEvent_location = findall("'location': (.*), 'startDate", iCalEvents)
#iCalEvent = str(iCalEvents[0])
#iCaltitle = findall("'title': '([ A-Za-z]*)'", iCalEvent)
print(iCalEvents)
for i in iCalEvent_titles:
print(iCalEvent_titles)
print(iCalEvent_location)
if "how are you" in data:
speak("I am fine")
if "what time is it" in data:
speak(ctime())
if "where is" in data:
data = data.split(" ")
location = data[2]
speak("Hold on Frank, I will show you where " + location + " is.")
os.system("chromium-browser https://www.google.nl/maps/place/" + location + "/&")
# initialization
#while(awake == False):
# data = awakenAlarm()
while 1:
data = recordAudio()
jarvis(data)
| true
| true
|
f709a87041d5c05d76449fc6fb9f3500d01c2824
| 57,267
|
py
|
Python
|
timm/models/byobnet.py
|
KnockerPulsar/pytorch-image-models
|
893f5dde27ae6b17389f738bd6e37160e2868c72
|
[
"Apache-2.0"
] | null | null | null |
timm/models/byobnet.py
|
KnockerPulsar/pytorch-image-models
|
893f5dde27ae6b17389f738bd6e37160e2868c72
|
[
"Apache-2.0"
] | null | null | null |
timm/models/byobnet.py
|
KnockerPulsar/pytorch-image-models
|
893f5dde27ae6b17389f738bd6e37160e2868c72
|
[
"Apache-2.0"
] | null | null | null |
""" Bring-Your-Own-Blocks Network
A flexible network w/ dataclass based config for stacking those NN blocks.
This model is currently used to implement the following networks:
GPU Efficient (ResNets) - gernet_l/m/s (original versions called genet, but this was already used (by SENet author)).
Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
Code and weights: https://github.com/idstcv/GPU-Efficient-Networks, licensed Apache 2.0
RepVGG - repvgg_*
Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
Code and weights: https://github.com/DingXiaoH/RepVGG, licensed MIT
In all cases the models have been modified to fit within the design of ByobNet. I've remapped
the original weights and verified accuracies.
For GPU Efficient nets, I used the original names for the blocks since they were for the most part
the same as original residual blocks in ResNe(X)t, DarkNet, and other existing models. Note also some
changes introduced in RegNet were also present in the stem and bottleneck blocks for this model.
A significant number of different network archs can be implemented here, including variants of the
above nets that include attention.
Hacked together by / copyright Ross Wightman, 2021.
"""
import math
from dataclasses import dataclass, field, replace
from typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg, named_apply
from .layers import ClassifierHead, ConvBnAct, BatchNormAct2d, DropPath, AvgPool2dSame, \
create_conv2d, get_act_layer, convert_norm_act, get_attn, make_divisible, to_2tuple
from .registry import register_model
__all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block']
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = {
# GPU-Efficient (ResNet) weights
'gernet_s': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_s-756b4751.pth'),
'gernet_m': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_m-0873c53a.pth'),
'gernet_l': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_l-f31e2e8d.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
# RepVGG weights
'repvgg_a2': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_a2-c1ee6d2b.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b0': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b0-80ac3f1b.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b1': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1-77ca2989.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b1g4': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1g4-abde5d92.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b2': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2-25b7494e.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b2g4': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2g4-165a85f2.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b3': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3-199bc50d.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b3g4': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3g4-73c370bf.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
# experimental configs
'resnet51q': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth',
first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8),
test_input_size=(3, 288, 288), crop_pct=1.0),
'resnet61q': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8),
test_input_size=(3, 288, 288), crop_pct=1.0, interpolation='bicubic'),
'resnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'seresnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'eca_resnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'bat_resnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic',
min_input_size=(3, 256, 256)),
'resnet32ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'resnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'seresnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'eca_resnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnet50t': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnext50ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
}
@dataclass
class ByoBlockCfg:
type: Union[str, nn.Module]
d: int # block depth (number of block repeats in stage)
c: int # number of output channels for each block in stage
s: int = 2 # stride of stage (first block)
gs: Optional[Union[int, Callable]] = None # group-size of blocks in stage, conv is depthwise if gs == 1
br: float = 1. # bottleneck-ratio of blocks in stage
# NOTE: these config items override the model cfgs that are applied to all blocks by default
attn_layer: Optional[str] = None
attn_kwargs: Optional[Dict[str, Any]] = None
self_attn_layer: Optional[str] = None
self_attn_kwargs: Optional[Dict[str, Any]] = None
block_kwargs: Optional[Dict[str, Any]] = None
@dataclass
class ByoModelCfg:
blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...]
downsample: str = 'conv1x1'
stem_type: str = '3x3'
stem_pool: Optional[str] = 'maxpool'
stem_chs: int = 32
width_factor: float = 1.0
num_features: int = 0 # num out_channels for final conv, no final 1x1 conv if 0
zero_init_last: bool = True # zero init last weight (usually bn) in residual path
fixed_input_size: bool = False # model constrained to a fixed-input size / img_size must be provided on creation
act_layer: str = 'relu'
norm_layer: str = 'batchnorm'
# NOTE: these config items will be overridden by the block cfg (per-block) if they are set there
attn_layer: Optional[str] = None
attn_kwargs: dict = field(default_factory=lambda: dict())
self_attn_layer: Optional[str] = None
self_attn_kwargs: dict = field(default_factory=lambda: dict())
block_kwargs: Dict[str, Any] = field(default_factory=lambda: dict())
def _rep_vgg_bcfg(d=(4, 6, 16, 1), wf=(1., 1., 1., 1.), groups=0):
c = (64, 128, 256, 512)
group_size = 0
if groups > 0:
group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0
bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for d, c, wf in zip(d, c, wf)])
return bcfg
def interleave_blocks(
types: Tuple[str, str], d, every: Union[int, List[int]] = 1, first: bool = False, **kwargs
) -> Tuple[ByoBlockCfg]:
""" interleave 2 block types in stack
"""
assert len(types) == 2
if isinstance(every, int):
every = list(range(0 if first else every, d, every + 1))
if not every:
every = [d - 1]
set(every)
blocks = []
for i in range(d):
block_type = types[1] if i in every else types[0]
blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)]
return tuple(blocks)
model_cfgs = dict(
gernet_l=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.),
),
stem_chs=32,
stem_pool=None,
num_features=2560,
),
gernet_m=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.),
),
stem_chs=32,
stem_pool=None,
num_features=2560,
),
gernet_s=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.),
),
stem_chs=13,
stem_pool=None,
num_features=1920,
),
repvgg_a2=ByoModelCfg(
blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)),
stem_type='rep',
stem_chs=64,
),
repvgg_b0=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(1., 1., 1., 2.5)),
stem_type='rep',
stem_chs=64,
),
repvgg_b1=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b1g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.), groups=4),
stem_type='rep',
stem_chs=64,
),
repvgg_b2=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b2g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.), groups=4),
stem_type='rep',
stem_chs=64,
),
repvgg_b3=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b3g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.), groups=4),
stem_type='rep',
stem_chs=64,
),
# 4 x conv stem w/ 2 act, no maxpool, 2,4,6,4 repeats, group size 32 in first 3 blocks
# DW convs in last block, 2048 pre-FC, silu act
resnet51q=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0),
),
stem_chs=128,
stem_type='quad2',
stem_pool=None,
num_features=2048,
act_layer='silu',
),
# 4 x conv stem w/ 4 act, no maxpool, 1,4,6,4 repeats, edge block first, group size 32 in next 2 blocks
# DW convs in last block, 4 conv for each bottle block, 2048 pre-FC, silu act
resnet61q=ByoModelCfg(
blocks=(
ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0),
),
stem_chs=128,
stem_type='quad',
stem_pool=None,
num_features=2048,
act_layer='silu',
block_kwargs=dict(extra_conv=True),
),
# A series of ResNeXt-26 models w/ one of none, GC, SE, ECA, BAT attn, group size 32, SiLU act,
# and a tiered stem w/ maxpool
resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
),
gcresnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='gca',
),
seresnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='se',
),
eca_resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='eca',
),
bat_resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='bat',
attn_kwargs=dict(block_size=8)
),
# ResNet-32 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, no pre-fc feat layer, tiered stem w/o maxpool
resnet32ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=0,
act_layer='silu',
),
# ResNet-33 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, 1280 pre-FC feat, tiered stem w/o maxpool
resnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
),
# A series of ResNet-33 (2, 3, 3, 2) models w/ one of GC, SE, ECA attn, no groups, SiLU act, 1280 pre-FC feat
# and a tiered stem w/ no maxpool
gcresnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='gca',
),
seresnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='se',
),
eca_resnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='eca',
),
gcresnet50t=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
attn_layer='gca',
),
gcresnext50ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
# stem_pool=None,
act_layer='silu',
attn_layer='gca',
),
)
@register_model
def gernet_l(pretrained=False, **kwargs):
""" GEResNet-Large (GENet-Large from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs)
@register_model
def gernet_m(pretrained=False, **kwargs):
""" GEResNet-Medium (GENet-Normal from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs)
@register_model
def gernet_s(pretrained=False, **kwargs):
""" EResNet-Small (GENet-Small from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs)
@register_model
def repvgg_a2(pretrained=False, **kwargs):
""" RepVGG-A2
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b0(pretrained=False, **kwargs):
""" RepVGG-B0
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b1(pretrained=False, **kwargs):
""" RepVGG-B1
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b1g4(pretrained=False, **kwargs):
""" RepVGG-B1g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b2(pretrained=False, **kwargs):
""" RepVGG-B2
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b2g4(pretrained=False, **kwargs):
""" RepVGG-B2g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b3(pretrained=False, **kwargs):
""" RepVGG-B3
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b3g4(pretrained=False, **kwargs):
""" RepVGG-B3g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs)
@register_model
def resnet51q(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs)
@register_model
def resnet61q(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs)
@register_model
def resnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs)
@register_model
def seresnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs)
@register_model
def eca_resnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def bat_resnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def resnet32ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs)
@register_model
def resnet33ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnet33ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs)
@register_model
def seresnet33ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs)
@register_model
def eca_resnet33ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnet50t(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs)
@register_model
def gcresnext50ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs)
def expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]:
if not isinstance(stage_blocks_cfg, Sequence):
stage_blocks_cfg = (stage_blocks_cfg,)
block_cfgs = []
for i, cfg in enumerate(stage_blocks_cfg):
block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)]
return block_cfgs
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
@dataclass
class LayerFn:
conv_norm_act: Callable = ConvBnAct
norm_act: Callable = BatchNormAct2d
act: Callable = nn.ReLU
attn: Optional[Callable] = None
self_attn: Optional[Callable] = None
class DownsampleAvg(nn.Module):
def __init__(self, in_chs, out_chs, stride=1, dilation=1, apply_act=False, layers: LayerFn = None):
""" AvgPool Downsampling as in 'D' ResNet variants."""
super(DownsampleAvg, self).__init__()
layers = layers or LayerFn()
avg_stride = stride if dilation == 1 else 1
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
else:
self.pool = nn.Identity()
self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act)
def forward(self, x):
return self.conv(self.pool(x))
def create_downsample(downsample_type, layers: LayerFn, **kwargs):
if downsample_type == 'avg':
return DownsampleAvg(**kwargs)
else:
return layers.conv_norm_act(kwargs.pop('in_chs'), kwargs.pop('out_chs'), kernel_size=1, **kwargs)
class BasicBlock(nn.Module):
""" ResNet Basic Block - kxk + kxk
"""
def __init__(
self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), group_size=None, bottle_ratio=1.0,
downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None,
drop_path_rate=0.):
super(BasicBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0])
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, out_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv2_kxk.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
# residual path
x = self.conv1_kxk(x)
x = self.conv2_kxk(x)
x = self.attn(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class BottleneckBlock(nn.Module):
""" ResNet-like Bottleneck Block - 1x1 - kxk - 1x1
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None,
downsample='avg', attn_last=False, linear_out=False, extra_conv=False, layers: LayerFn = None,
drop_block=None, drop_path_rate=0.):
super(BottleneckBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
if extra_conv:
self.conv2b_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block)
else:
self.conv2b_kxk = nn.Identity()
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv3_1x1.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_1x1(x)
x = self.conv2_kxk(x)
x = self.conv2b_kxk(x)
x = self.attn(x)
x = self.conv3_1x1(x)
x = self.attn_last(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class DarkBlock(nn.Module):
""" DarkNet-like (1x1 + 3x3 w/ stride) block
The GE-Net impl included a 1x1 + 3x3 block in their search space. It was not used in the feature models.
This block is pretty much a DarkNet block (also DenseNet) hence the name. Neither DarkNet or DenseNet
uses strides within the block (external 3x3 or maxpool downsampling is done in front of the block repeats).
If one does want to use a lot of these blocks w/ stride, I'd recommend using the EdgeBlock (3x3 /w stride + 1x1)
for more optimal compute.
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,
downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None,
drop_path_rate=0.):
super(DarkBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv2_kxk.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_1x1(x)
x = self.attn(x)
x = self.conv2_kxk(x)
x = self.attn_last(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class EdgeBlock(nn.Module):
""" EdgeResidual-like (3x3 + 1x1) block
A two layer block like DarkBlock, but with the order of the 3x3 and 1x1 convs reversed.
Very similar to the EfficientNet Edge-Residual block but this block it ends with activations, is
intended to be used with either expansion or bottleneck contraction, and can use DW/group/non-grouped convs.
FIXME is there a more common 3x3 + 1x1 conv block to name this after?
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,
downsample='avg', attn_last=False, linear_out=False, layers: LayerFn = None,
drop_block=None, drop_path_rate=0.):
super(EdgeBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_kxk = layers.conv_norm_act(
in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv2_1x1.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_kxk(x)
x = self.attn(x)
x = self.conv2_1x1(x)
x = self.attn_last(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class RepVggBlock(nn.Module):
""" RepVGG Block.
Adapted from impl at https://github.com/DingXiaoH/RepVGG
This version does not currently support the deploy optimization. It is currently fixed in 'train' mode.
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,
downsample='', layers: LayerFn = None, drop_block=None, drop_path_rate=0.):
super(RepVggBlock, self).__init__()
layers = layers or LayerFn()
groups = num_groups(group_size, in_chs)
use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1]
self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None
self.conv_kxk = layers.conv_norm_act(
in_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block, apply_act=False)
self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False)
self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity()
self.act = layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
# NOTE this init overrides that base model init with specific changes for the block type
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.normal_(m.weight, .1, .1)
nn.init.normal_(m.bias, 0, .1)
if hasattr(self.attn, 'reset_parameters'):
self.attn.reset_parameters()
def forward(self, x):
if self.identity is None:
x = self.conv_1x1(x) + self.conv_kxk(x)
else:
identity = self.identity(x)
x = self.conv_1x1(x) + self.conv_kxk(x)
x = self.drop_path(x) # not in the paper / official impl, experimental
x = x + identity
x = self.attn(x) # no attn in the paper / official impl, experimental
x = self.act(x)
return x
class SelfAttnBlock(nn.Module):
""" ResNet-like Bottleneck Block - 1x1 - optional kxk - self attn - 1x1
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None,
downsample='avg', extra_conv=False, linear_out=False, post_attn_na=True, feat_size=None,
layers: LayerFn = None, drop_block=None, drop_path_rate=0.):
super(SelfAttnBlock, self).__init__()
assert layers is not None
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
if extra_conv:
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
stride = 1 # striding done via conv if enabled
else:
self.conv2_kxk = nn.Identity()
opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size)
# FIXME need to dilate self attn to have dilated network support, moop moop
self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs)
self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity()
self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv3_1x1.bn.weight)
if hasattr(self.self_attn, 'reset_parameters'):
self.self_attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_1x1(x)
x = self.conv2_kxk(x)
x = self.self_attn(x)
x = self.post_attn(x)
x = self.conv3_1x1(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
_block_registry = dict(
basic=BasicBlock,
bottle=BottleneckBlock,
dark=DarkBlock,
edge=EdgeBlock,
rep=RepVggBlock,
self_attn=SelfAttnBlock,
)
def register_block(block_type:str, block_fn: nn.Module):
_block_registry[block_type] = block_fn
def create_block(block: Union[str, nn.Module], **kwargs):
if isinstance(block, (nn.Module, partial)):
return block(**kwargs)
assert block in _block_registry, f'Unknown block type ({block}'
return _block_registry[block](**kwargs)
class Stem(nn.Sequential):
def __init__(self, in_chs, out_chs, kernel_size=3, stride=4, pool='maxpool',
num_rep=3, num_act=None, chs_decay=0.5, layers: LayerFn = None):
super().__init__()
assert stride in (2, 4)
layers = layers or LayerFn()
if isinstance(out_chs, (list, tuple)):
num_rep = len(out_chs)
stem_chs = out_chs
else:
stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1]
self.stride = stride
self.feature_info = [] # track intermediate features
prev_feat = ''
stem_strides = [2] + [1] * (num_rep - 1)
if stride == 4 and not pool:
# set last conv in stack to be strided if stride == 4 and no pooling layer
stem_strides[-1] = 2
num_act = num_rep if num_act is None else num_act
# if num_act < num_rep, first convs in stack won't have bn + act
stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act
prev_chs = in_chs
curr_stride = 1
for i, (ch, s, na) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)):
layer_fn = layers.conv_norm_act if na else create_conv2d
conv_name = f'conv{i + 1}'
if i > 0 and s > 1:
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s))
prev_chs = ch
curr_stride *= s
prev_feat = conv_name
if pool and 'max' in pool.lower():
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
self.add_module('pool', nn.MaxPool2d(3, 2, 1))
curr_stride *= 2
prev_feat = 'pool'
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
assert curr_stride == stride
def create_byob_stem(in_chs, out_chs, stem_type='', pool_type='', feat_prefix='stem', layers: LayerFn = None):
layers = layers or LayerFn()
assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', '7x7', '3x3')
if 'quad' in stem_type:
# based on NFNet stem, stack of 4 3x3 convs
num_act = 2 if 'quad2' in stem_type else None
stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers)
elif 'tiered' in stem_type:
# 3x3 stack of 3 convs as in my ResNet-T
stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers)
elif 'deep' in stem_type:
# 3x3 stack of 3 convs as in ResNet-D
stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers)
elif 'rep' in stem_type:
stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers)
elif '7x7' in stem_type:
# 7x7 stem conv as in ResNet
if pool_type:
stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers)
else:
stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2)
else:
# 3x3 stem conv as in RegNet is the default
if pool_type:
stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers)
else:
stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2)
if isinstance(stem, Stem):
feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info]
else:
feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix)]
return stem, feature_info
def reduce_feat_size(feat_size, stride=2):
return None if feat_size is None else tuple([s // stride for s in feat_size])
def override_kwargs(block_kwargs, model_kwargs):
""" Override model level attn/self-attn/block kwargs w/ block level
NOTE: kwargs are NOT merged across levels, block_kwargs will fully replace model_kwargs
for the block if set to anything that isn't None.
i.e. an empty block_kwargs dict will remove kwargs set at model level for that block
"""
out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs
return out_kwargs or {} # make sure None isn't returned
def update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg, ):
layer_fns = block_kwargs['layers']
# override attn layer / args with block local config
attn_set = block_cfg.attn_layer is not None
if attn_set or block_cfg.attn_kwargs is not None:
# override attn layer config
if attn_set and not block_cfg.attn_layer:
# empty string for attn_layer type will disable attn for this block
attn_layer = None
else:
attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs)
attn_layer = block_cfg.attn_layer or model_cfg.attn_layer
attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None
layer_fns = replace(layer_fns, attn=attn_layer)
# override self-attn layer / args with block local cfg
self_attn_set = block_cfg.self_attn_layer is not None
if self_attn_set or block_cfg.self_attn_kwargs is not None:
# override attn layer config
if self_attn_set and not block_cfg.self_attn_layer: # attn_layer == ''
# empty string for self_attn_layer type will disable attn for this block
self_attn_layer = None
else:
self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs)
self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer
self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) \
if self_attn_layer is not None else None
layer_fns = replace(layer_fns, self_attn=self_attn_layer)
block_kwargs['layers'] = layer_fns
# add additional block_kwargs specified in block_cfg or model_cfg, precedence to block if set
block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs))
def create_byob_stages(
cfg: ByoModelCfg, drop_path_rate: float, output_stride: int, stem_feat: Dict[str, Any],
feat_size: Optional[int] = None,
layers: Optional[LayerFn] = None,
block_kwargs_fn: Optional[Callable] = update_block_kwargs):
layers = layers or LayerFn()
feature_info = []
block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks]
depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs]
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
dilation = 1
net_stride = stem_feat['reduction']
prev_chs = stem_feat['num_chs']
prev_feat = stem_feat
stages = []
for stage_idx, stage_block_cfgs in enumerate(block_cfgs):
stride = stage_block_cfgs[0].s
if stride != 1 and prev_feat:
feature_info.append(prev_feat)
if net_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
net_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
blocks = []
for block_idx, block_cfg in enumerate(stage_block_cfgs):
out_chs = make_divisible(block_cfg.c * cfg.width_factor)
group_size = block_cfg.gs
if isinstance(group_size, Callable):
group_size = group_size(out_chs, block_idx)
block_kwargs = dict( # Blocks used in this model must accept these arguments
in_chs=prev_chs,
out_chs=out_chs,
stride=stride if block_idx == 0 else 1,
dilation=(first_dilation, dilation),
group_size=group_size,
bottle_ratio=block_cfg.br,
downsample=cfg.downsample,
drop_path_rate=dpr[stage_idx][block_idx],
layers=layers,
)
if block_cfg.type in ('self_attn',):
# add feat_size arg for blocks that support/need it
block_kwargs['feat_size'] = feat_size
block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg)
blocks += [create_block(block_cfg.type, **block_kwargs)]
first_dilation = dilation
prev_chs = out_chs
if stride > 1 and block_idx == 0:
feat_size = reduce_feat_size(feat_size, stride)
stages += [nn.Sequential(*blocks)]
prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')
feature_info.append(prev_feat)
return nn.Sequential(*stages), feature_info
def get_layer_fns(cfg: ByoModelCfg):
act = get_act_layer(cfg.act_layer)
norm_act = convert_norm_act(norm_layer=cfg.norm_layer, act_layer=act)
conv_norm_act = partial(ConvBnAct, norm_layer=cfg.norm_layer, act_layer=act)
attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None
self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None
layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn)
return layer_fn
class ByobNet(nn.Module):
""" 'Bring-your-own-blocks' Net
A flexible network backbone that allows building model stem + blocks via
dataclass cfg definition w/ factory functions for module instantiation.
Current assumption is that both stem and blocks are in conv-bn-act order (w/ block ending in act).
"""
def __init__(self, cfg: ByoModelCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32,
zero_init_last=True, img_size=None, drop_rate=0., drop_path_rate=0.):
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
layers = get_layer_fns(cfg)
if cfg.fixed_input_size:
assert img_size is not None, 'img_size argument is required for fixed input size model'
feat_size = to_2tuple(img_size) if img_size is not None else None
self.feature_info = []
stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor))
self.stem, stem_feat = create_byob_stem(in_chans, stem_chs, cfg.stem_type, cfg.stem_pool, layers=layers)
self.feature_info.extend(stem_feat[:-1])
feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction'])
self.stages, stage_feat = create_byob_stages(
cfg, drop_path_rate, output_stride, stem_feat[-1], layers=layers, feat_size=feat_size)
self.feature_info.extend(stage_feat[:-1])
prev_chs = stage_feat[-1]['num_chs']
if cfg.num_features:
self.num_features = int(round(cfg.width_factor * cfg.num_features))
self.final_conv = layers.conv_norm_act(prev_chs, self.num_features, 1)
else:
self.num_features = prev_chs
self.final_conv = nn.Identity()
self.feature_info += [
dict(num_chs=self.num_features, reduction=stage_feat[-1]['reduction'], module='final_conv')]
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
# init weights
named_apply(partial(_init_weights, zero_init_last=zero_init_last), self)
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.final_conv(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _init_weights(module, name='', zero_init_last=False):
if isinstance(module, nn.Conv2d):
fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
fan_out //= module.groups
module.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
nn.init.normal_(module.weight, mean=0.0, std=0.01)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights(zero_init_last=zero_init_last)
def _create_byobnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ByobNet, variant, pretrained,
default_cfg=default_cfgs[variant],
model_cfg=model_cfgs[variant],
feature_cfg=dict(flatten_sequential=True),
**kwargs)
| 42.170103
| 134
| 0.626364
|
import math
from dataclasses import dataclass, field, replace
from typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg, named_apply
from .layers import ClassifierHead, ConvBnAct, BatchNormAct2d, DropPath, AvgPool2dSame, \
create_conv2d, get_act_layer, convert_norm_act, get_attn, make_divisible, to_2tuple
from .registry import register_model
__all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block']
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = {
'gernet_s': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_s-756b4751.pth'),
'gernet_m': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_m-0873c53a.pth'),
'gernet_l': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_l-f31e2e8d.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'repvgg_a2': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_a2-c1ee6d2b.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b0': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b0-80ac3f1b.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b1': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1-77ca2989.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b1g4': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1g4-abde5d92.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b2': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2-25b7494e.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b2g4': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2g4-165a85f2.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b3': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3-199bc50d.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b3g4': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3g4-73c370bf.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'resnet51q': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth',
first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8),
test_input_size=(3, 288, 288), crop_pct=1.0),
'resnet61q': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8),
test_input_size=(3, 288, 288), crop_pct=1.0, interpolation='bicubic'),
'resnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'seresnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'eca_resnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'bat_resnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic',
min_input_size=(3, 256, 256)),
'resnet32ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'resnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'seresnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'eca_resnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnet50t': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnext50ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
}
@dataclass
class ByoBlockCfg:
type: Union[str, nn.Module]
d: int
c: int
s: int = 2
gs: Optional[Union[int, Callable]] = None
br: float = 1.
attn_layer: Optional[str] = None
attn_kwargs: Optional[Dict[str, Any]] = None
self_attn_layer: Optional[str] = None
self_attn_kwargs: Optional[Dict[str, Any]] = None
block_kwargs: Optional[Dict[str, Any]] = None
@dataclass
class ByoModelCfg:
blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...]
downsample: str = 'conv1x1'
stem_type: str = '3x3'
stem_pool: Optional[str] = 'maxpool'
stem_chs: int = 32
width_factor: float = 1.0
num_features: int = 0
zero_init_last: bool = True
fixed_input_size: bool = False
act_layer: str = 'relu'
norm_layer: str = 'batchnorm'
attn_layer: Optional[str] = None
attn_kwargs: dict = field(default_factory=lambda: dict())
self_attn_layer: Optional[str] = None
self_attn_kwargs: dict = field(default_factory=lambda: dict())
block_kwargs: Dict[str, Any] = field(default_factory=lambda: dict())
def _rep_vgg_bcfg(d=(4, 6, 16, 1), wf=(1., 1., 1., 1.), groups=0):
c = (64, 128, 256, 512)
group_size = 0
if groups > 0:
group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0
bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for d, c, wf in zip(d, c, wf)])
return bcfg
def interleave_blocks(
types: Tuple[str, str], d, every: Union[int, List[int]] = 1, first: bool = False, **kwargs
) -> Tuple[ByoBlockCfg]:
assert len(types) == 2
if isinstance(every, int):
every = list(range(0 if first else every, d, every + 1))
if not every:
every = [d - 1]
set(every)
blocks = []
for i in range(d):
block_type = types[1] if i in every else types[0]
blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)]
return tuple(blocks)
model_cfgs = dict(
gernet_l=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.),
),
stem_chs=32,
stem_pool=None,
num_features=2560,
),
gernet_m=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.),
),
stem_chs=32,
stem_pool=None,
num_features=2560,
),
gernet_s=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.),
),
stem_chs=13,
stem_pool=None,
num_features=1920,
),
repvgg_a2=ByoModelCfg(
blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)),
stem_type='rep',
stem_chs=64,
),
repvgg_b0=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(1., 1., 1., 2.5)),
stem_type='rep',
stem_chs=64,
),
repvgg_b1=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b1g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.), groups=4),
stem_type='rep',
stem_chs=64,
),
repvgg_b2=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b2g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.), groups=4),
stem_type='rep',
stem_chs=64,
),
repvgg_b3=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b3g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.), groups=4),
stem_type='rep',
stem_chs=64,
),
resnet51q=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0),
),
stem_chs=128,
stem_type='quad2',
stem_pool=None,
num_features=2048,
act_layer='silu',
),
resnet61q=ByoModelCfg(
blocks=(
ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0),
),
stem_chs=128,
stem_type='quad',
stem_pool=None,
num_features=2048,
act_layer='silu',
block_kwargs=dict(extra_conv=True),
),
resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
),
gcresnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='gca',
),
seresnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='se',
),
eca_resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='eca',
),
bat_resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='bat',
attn_kwargs=dict(block_size=8)
),
resnet32ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=0,
act_layer='silu',
),
resnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
),
gcresnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='gca',
),
seresnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='se',
),
eca_resnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='eca',
),
gcresnet50t=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
attn_layer='gca',
),
gcresnext50ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='gca',
),
)
@register_model
def gernet_l(pretrained=False, **kwargs):
return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs)
@register_model
def gernet_m(pretrained=False, **kwargs):
return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs)
@register_model
def gernet_s(pretrained=False, **kwargs):
return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs)
@register_model
def repvgg_a2(pretrained=False, **kwargs):
return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b0(pretrained=False, **kwargs):
return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b1(pretrained=False, **kwargs):
return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b1g4(pretrained=False, **kwargs):
return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b2(pretrained=False, **kwargs):
return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b2g4(pretrained=False, **kwargs):
return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b3(pretrained=False, **kwargs):
return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b3g4(pretrained=False, **kwargs):
return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs)
@register_model
def resnet51q(pretrained=False, **kwargs):
return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs)
@register_model
def resnet61q(pretrained=False, **kwargs):
return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs)
@register_model
def resnext26ts(pretrained=False, **kwargs):
return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnext26ts(pretrained=False, **kwargs):
return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs)
@register_model
def seresnext26ts(pretrained=False, **kwargs):
return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs)
@register_model
def eca_resnext26ts(pretrained=False, **kwargs):
return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def bat_resnext26ts(pretrained=False, **kwargs):
return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def resnet32ts(pretrained=False, **kwargs):
return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs)
@register_model
def resnet33ts(pretrained=False, **kwargs):
return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnet33ts(pretrained=False, **kwargs):
return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs)
@register_model
def seresnet33ts(pretrained=False, **kwargs):
return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs)
@register_model
def eca_resnet33ts(pretrained=False, **kwargs):
return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnet50t(pretrained=False, **kwargs):
return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs)
@register_model
def gcresnext50ts(pretrained=False, **kwargs):
return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs)
def expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]:
if not isinstance(stage_blocks_cfg, Sequence):
stage_blocks_cfg = (stage_blocks_cfg,)
block_cfgs = []
for i, cfg in enumerate(stage_blocks_cfg):
block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)]
return block_cfgs
def num_groups(group_size, channels):
if not group_size:
return 1
else:
assert channels % group_size == 0
return channels // group_size
@dataclass
class LayerFn:
conv_norm_act: Callable = ConvBnAct
norm_act: Callable = BatchNormAct2d
act: Callable = nn.ReLU
attn: Optional[Callable] = None
self_attn: Optional[Callable] = None
class DownsampleAvg(nn.Module):
def __init__(self, in_chs, out_chs, stride=1, dilation=1, apply_act=False, layers: LayerFn = None):
super(DownsampleAvg, self).__init__()
layers = layers or LayerFn()
avg_stride = stride if dilation == 1 else 1
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
else:
self.pool = nn.Identity()
self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act)
def forward(self, x):
return self.conv(self.pool(x))
def create_downsample(downsample_type, layers: LayerFn, **kwargs):
if downsample_type == 'avg':
return DownsampleAvg(**kwargs)
else:
return layers.conv_norm_act(kwargs.pop('in_chs'), kwargs.pop('out_chs'), kernel_size=1, **kwargs)
class BasicBlock(nn.Module):
def __init__(
self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), group_size=None, bottle_ratio=1.0,
downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None,
drop_path_rate=0.):
super(BasicBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0])
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, out_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv2_kxk.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_kxk(x)
x = self.conv2_kxk(x)
x = self.attn(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class BottleneckBlock(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None,
downsample='avg', attn_last=False, linear_out=False, extra_conv=False, layers: LayerFn = None,
drop_block=None, drop_path_rate=0.):
super(BottleneckBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
if extra_conv:
self.conv2b_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block)
else:
self.conv2b_kxk = nn.Identity()
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv3_1x1.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_1x1(x)
x = self.conv2_kxk(x)
x = self.conv2b_kxk(x)
x = self.attn(x)
x = self.conv3_1x1(x)
x = self.attn_last(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class DarkBlock(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,
downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None,
drop_path_rate=0.):
super(DarkBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv2_kxk.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_1x1(x)
x = self.attn(x)
x = self.conv2_kxk(x)
x = self.attn_last(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class EdgeBlock(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,
downsample='avg', attn_last=False, linear_out=False, layers: LayerFn = None,
drop_block=None, drop_path_rate=0.):
super(EdgeBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_kxk = layers.conv_norm_act(
in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv2_1x1.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_kxk(x)
x = self.attn(x)
x = self.conv2_1x1(x)
x = self.attn_last(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class RepVggBlock(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,
downsample='', layers: LayerFn = None, drop_block=None, drop_path_rate=0.):
super(RepVggBlock, self).__init__()
layers = layers or LayerFn()
groups = num_groups(group_size, in_chs)
use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1]
self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None
self.conv_kxk = layers.conv_norm_act(
in_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block, apply_act=False)
self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False)
self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity()
self.act = layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.normal_(m.weight, .1, .1)
nn.init.normal_(m.bias, 0, .1)
if hasattr(self.attn, 'reset_parameters'):
self.attn.reset_parameters()
def forward(self, x):
if self.identity is None:
x = self.conv_1x1(x) + self.conv_kxk(x)
else:
identity = self.identity(x)
x = self.conv_1x1(x) + self.conv_kxk(x)
x = self.drop_path(x)
x = x + identity
x = self.attn(x)
x = self.act(x)
return x
class SelfAttnBlock(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None,
downsample='avg', extra_conv=False, linear_out=False, post_attn_na=True, feat_size=None,
layers: LayerFn = None, drop_block=None, drop_path_rate=0.):
super(SelfAttnBlock, self).__init__()
assert layers is not None
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
if extra_conv:
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
stride = 1
else:
self.conv2_kxk = nn.Identity()
opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size)
self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs)
self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity()
self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv3_1x1.bn.weight)
if hasattr(self.self_attn, 'reset_parameters'):
self.self_attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_1x1(x)
x = self.conv2_kxk(x)
x = self.self_attn(x)
x = self.post_attn(x)
x = self.conv3_1x1(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
_block_registry = dict(
basic=BasicBlock,
bottle=BottleneckBlock,
dark=DarkBlock,
edge=EdgeBlock,
rep=RepVggBlock,
self_attn=SelfAttnBlock,
)
def register_block(block_type:str, block_fn: nn.Module):
_block_registry[block_type] = block_fn
def create_block(block: Union[str, nn.Module], **kwargs):
if isinstance(block, (nn.Module, partial)):
return block(**kwargs)
assert block in _block_registry, f'Unknown block type ({block}'
return _block_registry[block](**kwargs)
class Stem(nn.Sequential):
def __init__(self, in_chs, out_chs, kernel_size=3, stride=4, pool='maxpool',
num_rep=3, num_act=None, chs_decay=0.5, layers: LayerFn = None):
super().__init__()
assert stride in (2, 4)
layers = layers or LayerFn()
if isinstance(out_chs, (list, tuple)):
num_rep = len(out_chs)
stem_chs = out_chs
else:
stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1]
self.stride = stride
self.feature_info = []
prev_feat = ''
stem_strides = [2] + [1] * (num_rep - 1)
if stride == 4 and not pool:
stem_strides[-1] = 2
num_act = num_rep if num_act is None else num_act
stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act
prev_chs = in_chs
curr_stride = 1
for i, (ch, s, na) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)):
layer_fn = layers.conv_norm_act if na else create_conv2d
conv_name = f'conv{i + 1}'
if i > 0 and s > 1:
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s))
prev_chs = ch
curr_stride *= s
prev_feat = conv_name
if pool and 'max' in pool.lower():
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
self.add_module('pool', nn.MaxPool2d(3, 2, 1))
curr_stride *= 2
prev_feat = 'pool'
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
assert curr_stride == stride
def create_byob_stem(in_chs, out_chs, stem_type='', pool_type='', feat_prefix='stem', layers: LayerFn = None):
layers = layers or LayerFn()
assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', '7x7', '3x3')
if 'quad' in stem_type:
# based on NFNet stem, stack of 4 3x3 convs
num_act = 2 if 'quad2' in stem_type else None
stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers)
elif 'tiered' in stem_type:
# 3x3 stack of 3 convs as in my ResNet-T
stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers)
elif 'deep' in stem_type:
# 3x3 stack of 3 convs as in ResNet-D
stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers)
elif 'rep' in stem_type:
stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers)
elif '7x7' in stem_type:
# 7x7 stem conv as in ResNet
if pool_type:
stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers)
else:
stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2)
else:
# 3x3 stem conv as in RegNet is the default
if pool_type:
stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers)
else:
stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2)
if isinstance(stem, Stem):
feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info]
else:
feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix)]
return stem, feature_info
def reduce_feat_size(feat_size, stride=2):
return None if feat_size is None else tuple([s // stride for s in feat_size])
def override_kwargs(block_kwargs, model_kwargs):
out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs
return out_kwargs or {} # make sure None isn't returned
def update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg, ):
layer_fns = block_kwargs['layers']
attn_set = block_cfg.attn_layer is not None
if attn_set or block_cfg.attn_kwargs is not None:
if attn_set and not block_cfg.attn_layer:
attn_layer = None
else:
attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs)
attn_layer = block_cfg.attn_layer or model_cfg.attn_layer
attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None
layer_fns = replace(layer_fns, attn=attn_layer)
self_attn_set = block_cfg.self_attn_layer is not None
if self_attn_set or block_cfg.self_attn_kwargs is not None:
if self_attn_set and not block_cfg.self_attn_layer:
self_attn_layer = None
else:
self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs)
self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer
self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) \
if self_attn_layer is not None else None
layer_fns = replace(layer_fns, self_attn=self_attn_layer)
block_kwargs['layers'] = layer_fns
block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs))
def create_byob_stages(
cfg: ByoModelCfg, drop_path_rate: float, output_stride: int, stem_feat: Dict[str, Any],
feat_size: Optional[int] = None,
layers: Optional[LayerFn] = None,
block_kwargs_fn: Optional[Callable] = update_block_kwargs):
layers = layers or LayerFn()
feature_info = []
block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks]
depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs]
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
dilation = 1
net_stride = stem_feat['reduction']
prev_chs = stem_feat['num_chs']
prev_feat = stem_feat
stages = []
for stage_idx, stage_block_cfgs in enumerate(block_cfgs):
stride = stage_block_cfgs[0].s
if stride != 1 and prev_feat:
feature_info.append(prev_feat)
if net_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
net_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
blocks = []
for block_idx, block_cfg in enumerate(stage_block_cfgs):
out_chs = make_divisible(block_cfg.c * cfg.width_factor)
group_size = block_cfg.gs
if isinstance(group_size, Callable):
group_size = group_size(out_chs, block_idx)
block_kwargs = dict(
in_chs=prev_chs,
out_chs=out_chs,
stride=stride if block_idx == 0 else 1,
dilation=(first_dilation, dilation),
group_size=group_size,
bottle_ratio=block_cfg.br,
downsample=cfg.downsample,
drop_path_rate=dpr[stage_idx][block_idx],
layers=layers,
)
if block_cfg.type in ('self_attn',):
block_kwargs['feat_size'] = feat_size
block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg)
blocks += [create_block(block_cfg.type, **block_kwargs)]
first_dilation = dilation
prev_chs = out_chs
if stride > 1 and block_idx == 0:
feat_size = reduce_feat_size(feat_size, stride)
stages += [nn.Sequential(*blocks)]
prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')
feature_info.append(prev_feat)
return nn.Sequential(*stages), feature_info
def get_layer_fns(cfg: ByoModelCfg):
act = get_act_layer(cfg.act_layer)
norm_act = convert_norm_act(norm_layer=cfg.norm_layer, act_layer=act)
conv_norm_act = partial(ConvBnAct, norm_layer=cfg.norm_layer, act_layer=act)
attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None
self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None
layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn)
return layer_fn
class ByobNet(nn.Module):
def __init__(self, cfg: ByoModelCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32,
zero_init_last=True, img_size=None, drop_rate=0., drop_path_rate=0.):
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
layers = get_layer_fns(cfg)
if cfg.fixed_input_size:
assert img_size is not None, 'img_size argument is required for fixed input size model'
feat_size = to_2tuple(img_size) if img_size is not None else None
self.feature_info = []
stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor))
self.stem, stem_feat = create_byob_stem(in_chans, stem_chs, cfg.stem_type, cfg.stem_pool, layers=layers)
self.feature_info.extend(stem_feat[:-1])
feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction'])
self.stages, stage_feat = create_byob_stages(
cfg, drop_path_rate, output_stride, stem_feat[-1], layers=layers, feat_size=feat_size)
self.feature_info.extend(stage_feat[:-1])
prev_chs = stage_feat[-1]['num_chs']
if cfg.num_features:
self.num_features = int(round(cfg.width_factor * cfg.num_features))
self.final_conv = layers.conv_norm_act(prev_chs, self.num_features, 1)
else:
self.num_features = prev_chs
self.final_conv = nn.Identity()
self.feature_info += [
dict(num_chs=self.num_features, reduction=stage_feat[-1]['reduction'], module='final_conv')]
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
named_apply(partial(_init_weights, zero_init_last=zero_init_last), self)
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.final_conv(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _init_weights(module, name='', zero_init_last=False):
if isinstance(module, nn.Conv2d):
fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
fan_out //= module.groups
module.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
nn.init.normal_(module.weight, mean=0.0, std=0.01)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights(zero_init_last=zero_init_last)
def _create_byobnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ByobNet, variant, pretrained,
default_cfg=default_cfgs[variant],
model_cfg=model_cfgs[variant],
feature_cfg=dict(flatten_sequential=True),
**kwargs)
| true
| true
|
f709a8d7e31687d2292fa97fcf708ceecab9433b
| 285
|
py
|
Python
|
src/14/14682.py
|
youngdaLee/Baekjoon
|
7d858d557dbbde6603fe4e8af2891c2b0e1940c0
|
[
"MIT"
] | 11
|
2020-09-20T15:17:11.000Z
|
2022-03-17T12:43:33.000Z
|
src/14/14682.py
|
youngdaLee/Baekjoon
|
7d858d557dbbde6603fe4e8af2891c2b0e1940c0
|
[
"MIT"
] | 3
|
2021-10-30T07:51:36.000Z
|
2022-03-09T05:19:23.000Z
|
src/14/14682.py
|
youngdaLee/Baekjoon
|
7d858d557dbbde6603fe4e8af2891c2b0e1940c0
|
[
"MIT"
] | 13
|
2021-01-21T03:19:08.000Z
|
2022-03-28T10:44:58.000Z
|
"""
14682. Shifty Sum
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 60 ms
해결 날짜: 2020년 9월 20일
"""
def main():
N, k = [int(input()) for _ in range(2)]
res = N
for _ in range(k):
N *= 10
res += N
print(res)
if __name__ == '__main__':
main()
| 12.954545
| 43
| 0.526316
|
def main():
N, k = [int(input()) for _ in range(2)]
res = N
for _ in range(k):
N *= 10
res += N
print(res)
if __name__ == '__main__':
main()
| true
| true
|
f709a967b1234667309531a39b0693c8f8ce9bc0
| 5,348
|
py
|
Python
|
sarpy/io/general/nitf_elements/graphics.py
|
pressler-vsc/sarpy
|
fa6c951c42b9a7d9df2edfa53c771494cb0246fb
|
[
"MIT"
] | 1
|
2021-02-04T08:44:18.000Z
|
2021-02-04T08:44:18.000Z
|
sarpy/io/general/nitf_elements/graphics.py
|
pressler-vsc/sarpy
|
fa6c951c42b9a7d9df2edfa53c771494cb0246fb
|
[
"MIT"
] | null | null | null |
sarpy/io/general/nitf_elements/graphics.py
|
pressler-vsc/sarpy
|
fa6c951c42b9a7d9df2edfa53c771494cb0246fb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
The graphics header element definition.
"""
from .base import NITFElement, UserHeaderType, _IntegerDescriptor,\
_StringDescriptor, _StringEnumDescriptor, _NITFElementDescriptor
from .security import NITFSecurityTags
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class GraphicsSegmentHeader(NITFElement):
"""
Graphics segment subheader - see standards document MIL-STD-2500C for more
information.
"""
_ordering = (
'SY', 'SID', 'SNAME', 'Security', 'ENCRYP', 'SFMT',
'SSTRUCT', 'SDLVL', 'SALVL', 'SLOC', 'SBND1',
'SCOLOR', 'SBND2', 'SRES2', 'UserHeader')
_lengths = {
'SY': 2, 'SID': 10, 'SNAME': 20, 'ENCRYP': 1,
'SFMT': 1, 'SSTRUCT': 13, 'SDLVL': 3, 'SALVL': 3,
'SLOC': 10, 'SBND1': 10, 'SCOLOR': 1, 'SBND2': 10,
'SRES2': 2}
SY = _StringEnumDescriptor(
'SY', True, 2, {'SY', }, default_value='SY',
docstring='File part type.') # type: str
SID = _StringDescriptor(
'SID', True, 10, default_value='',
docstring='Graphic Identifier. This field shall contain a valid alphanumeric identification code '
'associated with the graphic. The valid codes are determined by the application.') # type: str
SNAME = _StringDescriptor(
'SNAME', True, 20, default_value='',
docstring='Graphic name. This field shall contain an alphanumeric name for the graphic.') # type: str
Security = _NITFElementDescriptor(
'Security', True, NITFSecurityTags, default_args={},
docstring='The security tags.') # type: NITFSecurityTags
ENCRYP = _StringEnumDescriptor(
'ENCRYP', True, 1, {'0'}, default_value='0',
docstring='Encryption.') # type: str
SFMT = _StringDescriptor(
'SFMT', True, 1, default_value='C',
docstring='Graphic Type. This field shall contain a valid indicator of the '
'representation type of the graphic.') # type: str
SSTRUCT = _IntegerDescriptor(
'SSTRUCT', True, 13, default_value=0,
docstring='Reserved for Future Use.') # type: int
SDLVL = _IntegerDescriptor(
'SDLVL', True, 3, default_value=1,
docstring='Graphic Display Level. This field shall contain a valid value that indicates '
'the graphic display level of the graphic relative to other displayed file '
'components in a composite display. The valid values are :code:`1-999`. '
'The display level of each displayable file component (image or graphic) '
'within a file shall be unique.') # type: int
SALVL = _IntegerDescriptor(
'SALVL', True, 3, default_value=0,
docstring='Graphic Attachment Level. This field shall contain a valid value '
'that indicates the attachment level of the graphic. Valid values for '
'this field are 0 and the display level value of any other '
'image or graphic in the file.') # type: int
SLOC = _IntegerDescriptor(
'SLOC', True, 10, default_value=0,
docstring='Graphic Location. The graphics location is specified by providing the location '
'of the graphic’s origin point relative to the position (location of the CCS, image, '
'or graphic to which it is attached. This field shall contain the graphic location '
'offset from the `ILOC` or `SLOC` value of the CCS, image, or graphic to which the graphic '
'is attached or from the origin of the CCS when the graphic is unattached (`SALVL = 0`). '
'A row and column value of :code:`0` indicates no offset. Positive row and column values indicate '
'offsets down and to the right, while negative row and column values indicate '
'offsets up and to the left.') # type: int
SBND1 = _IntegerDescriptor(
'SBND1', True, 10, default_value=0,
docstring='First Graphic Bound Location. This field shall contain an ordered pair of '
'integers defining a location in Cartesian coordinates for use with CGM graphics. It is '
'the upper left corner of the bounding box for the CGM graphic.') # type: int
SCOLOR = _StringEnumDescriptor(
'SCOLOR', True, 1, {'C', 'M'}, default_value='M',
docstring='Graphic Color. If `SFMT = C`, this field shall contain a :code:`C` if the CGM contains any '
'color pieces or an :code:`M` if it is monochrome (i.e., black, '
'white, or levels of grey).') # type: str
SBND2 = _IntegerDescriptor(
'SBND2', True, 10, default_value=0,
docstring='Second Graphic Bound Location. This field shall contain an ordered pair of '
'integers defining a location in Cartesian coordinates for use with CGM graphics. '
'It is the lower right corner of the bounding box for the CGM graphic.') # type: int
SRES2 = _IntegerDescriptor(
'SRES2', True, 2, default_value=0,
docstring='Reserved for Future Use.') # type: int
UserHeader = _NITFElementDescriptor(
'UserHeader', True, UserHeaderType, default_args={},
docstring='User defined header.') # type: UserHeaderType
| 56.294737
| 117
| 0.631077
|
from .base import NITFElement, UserHeaderType, _IntegerDescriptor,\
_StringDescriptor, _StringEnumDescriptor, _NITFElementDescriptor
from .security import NITFSecurityTags
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class GraphicsSegmentHeader(NITFElement):
_ordering = (
'SY', 'SID', 'SNAME', 'Security', 'ENCRYP', 'SFMT',
'SSTRUCT', 'SDLVL', 'SALVL', 'SLOC', 'SBND1',
'SCOLOR', 'SBND2', 'SRES2', 'UserHeader')
_lengths = {
'SY': 2, 'SID': 10, 'SNAME': 20, 'ENCRYP': 1,
'SFMT': 1, 'SSTRUCT': 13, 'SDLVL': 3, 'SALVL': 3,
'SLOC': 10, 'SBND1': 10, 'SCOLOR': 1, 'SBND2': 10,
'SRES2': 2}
SY = _StringEnumDescriptor(
'SY', True, 2, {'SY', }, default_value='SY',
docstring='File part type.')
SID = _StringDescriptor(
'SID', True, 10, default_value='',
docstring='Graphic Identifier. This field shall contain a valid alphanumeric identification code '
'associated with the graphic. The valid codes are determined by the application.')
SNAME = _StringDescriptor(
'SNAME', True, 20, default_value='',
docstring='Graphic name. This field shall contain an alphanumeric name for the graphic.')
Security = _NITFElementDescriptor(
'Security', True, NITFSecurityTags, default_args={},
docstring='The security tags.')
ENCRYP = _StringEnumDescriptor(
'ENCRYP', True, 1, {'0'}, default_value='0',
docstring='Encryption.')
SFMT = _StringDescriptor(
'SFMT', True, 1, default_value='C',
docstring='Graphic Type. This field shall contain a valid indicator of the '
'representation type of the graphic.')
SSTRUCT = _IntegerDescriptor(
'SSTRUCT', True, 13, default_value=0,
docstring='Reserved for Future Use.')
SDLVL = _IntegerDescriptor(
'SDLVL', True, 3, default_value=1,
docstring='Graphic Display Level. This field shall contain a valid value that indicates '
'the graphic display level of the graphic relative to other displayed file '
'components in a composite display. The valid values are :code:`1-999`. '
'The display level of each displayable file component (image or graphic) '
'within a file shall be unique.')
SALVL = _IntegerDescriptor(
'SALVL', True, 3, default_value=0,
docstring='Graphic Attachment Level. This field shall contain a valid value '
'that indicates the attachment level of the graphic. Valid values for '
'this field are 0 and the display level value of any other '
'image or graphic in the file.')
SLOC = _IntegerDescriptor(
'SLOC', True, 10, default_value=0,
docstring='Graphic Location. The graphics location is specified by providing the location '
'of the graphic’s origin point relative to the position (location of the CCS, image, '
'or graphic to which it is attached. This field shall contain the graphic location '
'offset from the `ILOC` or `SLOC` value of the CCS, image, or graphic to which the graphic '
'is attached or from the origin of the CCS when the graphic is unattached (`SALVL = 0`). '
'A row and column value of :code:`0` indicates no offset. Positive row and column values indicate '
'offsets down and to the right, while negative row and column values indicate '
'offsets up and to the left.')
SBND1 = _IntegerDescriptor(
'SBND1', True, 10, default_value=0,
docstring='First Graphic Bound Location. This field shall contain an ordered pair of '
'integers defining a location in Cartesian coordinates for use with CGM graphics. It is '
'the upper left corner of the bounding box for the CGM graphic.')
SCOLOR = _StringEnumDescriptor(
'SCOLOR', True, 1, {'C', 'M'}, default_value='M',
docstring='Graphic Color. If `SFMT = C`, this field shall contain a :code:`C` if the CGM contains any '
'color pieces or an :code:`M` if it is monochrome (i.e., black, '
'white, or levels of grey).')
SBND2 = _IntegerDescriptor(
'SBND2', True, 10, default_value=0,
docstring='Second Graphic Bound Location. This field shall contain an ordered pair of '
'integers defining a location in Cartesian coordinates for use with CGM graphics. '
'It is the lower right corner of the bounding box for the CGM graphic.')
SRES2 = _IntegerDescriptor(
'SRES2', True, 2, default_value=0,
docstring='Reserved for Future Use.')
UserHeader = _NITFElementDescriptor(
'UserHeader', True, UserHeaderType, default_args={},
docstring='User defined header.')
| true
| true
|
f709a9ab548efdde5cce699085047d8dc56830d2
| 7,366
|
py
|
Python
|
pyrseas/dbobject/column.py
|
andreypopp/Pyrseas
|
5fadc91bfd1e3e430e8f53d434df18b9abea3cb0
|
[
"BSD-3-Clause"
] | 1
|
2015-03-16T09:10:47.000Z
|
2015-03-16T09:10:47.000Z
|
pyrseas/dbobject/column.py
|
andreypopp/Pyrseas
|
5fadc91bfd1e3e430e8f53d434df18b9abea3cb0
|
[
"BSD-3-Clause"
] | null | null | null |
pyrseas/dbobject/column.py
|
andreypopp/Pyrseas
|
5fadc91bfd1e3e430e8f53d434df18b9abea3cb0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
pyrseas.column
~~~~~~~~~~~~~~
This module defines two classes: Column derived from
DbSchemaObject and ColumnDict derived from DbObjectDict.
"""
from pyrseas.dbobject import DbObjectDict, DbSchemaObject, quote_id
class Column(DbSchemaObject):
"A table column definition"
keylist = ['schema', 'table']
def to_map(self):
"""Convert a column to a YAML-suitable format
:return: dictionary
"""
if hasattr(self, 'dropped'):
return None
dct = self._base_map()
del dct['number'], dct['name'], dct['_table']
if hasattr(self, 'inherited'):
dct['inherited'] = (self.inherited != 0)
return {self.name: dct}
def add(self):
"""Return a string to specify the column in a CREATE or ALTER TABLE
:return: partial SQL statement
"""
stmt = "%s %s" % (quote_id(self.name), self.type)
if hasattr(self, 'not_null'):
stmt += ' NOT NULL'
if hasattr(self, 'default'):
if not self.default.startswith('nextval'):
stmt += ' DEFAULT ' + self.default
return (stmt, '' if not hasattr(self, 'description')
else self.comment())
def comment(self):
"""Return a SQL COMMENT statement for the column
:return: SQL statement
"""
return "COMMENT ON COLUMN %s.%s IS %s" % (
self._table.qualname(), self.name, self._comment_text())
def drop(self):
"""Return string to drop the column via ALTER TABLE
:return: SQL statement
"""
if hasattr(self, 'dropped'):
return ""
if hasattr(self, '_table'):
(comptype, objtype) = (self._table.objtype, 'COLUMN')
compname = self._table.qualname()
else:
# TODO: this is only a PG 9.1 feature, so more is required
(comptype, objtype) = ('TYPE', 'ATTRIBUTE')
compname = self.table
return "ALTER %s %s DROP %s %s" % (comptype, compname, objtype,
self.name)
def rename(self, newname):
"""Return SQL statement to RENAME the column
:param newname: the new name of the object
:return: SQL statement
"""
stmt = "ALTER TABLE %s RENAME COLUMN %s TO %s" % (
self._table.qualname(), self.name, newname)
self.name = newname
return stmt
def set_sequence_default(self):
"""Return SQL statements to set a nextval() DEFAULT
:return: list of SQL statements
"""
stmts = []
pth = self.set_search_path()
if pth:
stmts.append(pth)
stmts.append("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT %s" % (
quote_id(self.table), quote_id(self.name), self.default))
return stmts
def diff_map(self, incol):
"""Generate SQL to transform an existing column
:param insequence: a YAML map defining the new column
:return: list of partial SQL statements
Compares the column to an input column and generates partial
SQL statements to transform it into the one represented by the
input.
"""
stmts = []
base = "ALTER COLUMN %s " % self.name
# check NOT NULL
if not hasattr(self, 'not_null') and hasattr(incol, 'not_null'):
stmts.append(base + "SET NOT NULL")
if hasattr(self, 'not_null') and not hasattr(incol, 'not_null'):
stmts.append(base + "DROP NOT NULL")
# check data types
if not hasattr(self, 'type'):
raise ValueError("Column '%s' missing datatype" % self.name)
if not hasattr(incol, 'type'):
raise ValueError("Input column '%s' missing datatype" % incol.name)
if self.type != incol.type:
# validate type conversion?
stmts.append(base + "TYPE %s" % incol.type)
# check DEFAULTs
if not hasattr(self, 'default') and hasattr(incol, 'default'):
stmts.append(base + "SET DEFAULT %s" % incol.default)
if hasattr(self, 'default') and not hasattr(incol, 'default'):
stmts.append(base + "DROP DEFAULT")
return (", ".join(stmts), self.diff_description(incol))
class ColumnDict(DbObjectDict):
"The collection of columns in tables in a database"
cls = Column
query = \
"""SELECT nspname AS schema, relname AS table, attname AS name,
attnum AS number, format_type(atttypid, atttypmod) AS type,
attnotnull AS not_null, attinhcount AS inherited,
pg_get_expr(adbin, adrelid) AS default,
attisdropped AS dropped,
col_description(c.oid, attnum) AS description
FROM pg_attribute JOIN pg_class c ON (attrelid = c.oid)
JOIN pg_namespace ON (relnamespace = pg_namespace.oid)
LEFT JOIN pg_attrdef ON (attrelid = pg_attrdef.adrelid
AND attnum = pg_attrdef.adnum)
WHERE relkind in ('c', 'r', 'f')
AND (nspname != 'pg_catalog'
AND nspname != 'information_schema')
AND attnum > 0
ORDER BY nspname, relname, attnum"""
def _from_catalog(self):
"""Initialize the dictionary of columns by querying the catalogs"""
for col in self.fetch():
sch, tbl = col.key()
if (sch, tbl) not in self:
self[(sch, tbl)] = []
self[(sch, tbl)].append(col)
def from_map(self, table, incols):
"""Initialize the dictionary of columns by converting the input list
:param table: table or type owning the columns/attributes
:param incols: YAML list defining the columns
"""
if not incols:
raise ValueError("Table '%s' has no columns" % table.name)
cols = self[(table.schema, table.name)] = []
for col in incols:
for key in list(col.keys()):
if isinstance(col[key], dict):
arg = col[key]
else:
arg = {'type': col[key]}
cols.append(Column(schema=table.schema, table=table.name,
name=key, **arg))
def diff_map(self, incols):
"""Generate SQL to transform existing columns
:param incols: a YAML map defining the new columns
:return: list of SQL statements
Compares the existing column definitions, as fetched from the
catalogs, to the input map and generates SQL statements to
transform the columns accordingly.
This takes care of dropping columns that are not present in
the input map. It's separate so that it can be done last,
after other table, constraint and index changes.
"""
stmts = []
if not incols or not self:
return stmts
for (sch, tbl) in list(incols.keys()):
if (sch, tbl) in list(self.keys()):
for col in self[(sch, tbl)]:
if col.name not in [c.name for c in incols[(sch, tbl)]] \
and not hasattr(col, 'dropped'):
stmts.append(col.drop())
return stmts
| 36.83
| 79
| 0.562449
|
from pyrseas.dbobject import DbObjectDict, DbSchemaObject, quote_id
class Column(DbSchemaObject):
keylist = ['schema', 'table']
def to_map(self):
if hasattr(self, 'dropped'):
return None
dct = self._base_map()
del dct['number'], dct['name'], dct['_table']
if hasattr(self, 'inherited'):
dct['inherited'] = (self.inherited != 0)
return {self.name: dct}
def add(self):
stmt = "%s %s" % (quote_id(self.name), self.type)
if hasattr(self, 'not_null'):
stmt += ' NOT NULL'
if hasattr(self, 'default'):
if not self.default.startswith('nextval'):
stmt += ' DEFAULT ' + self.default
return (stmt, '' if not hasattr(self, 'description')
else self.comment())
def comment(self):
return "COMMENT ON COLUMN %s.%s IS %s" % (
self._table.qualname(), self.name, self._comment_text())
def drop(self):
if hasattr(self, 'dropped'):
return ""
if hasattr(self, '_table'):
(comptype, objtype) = (self._table.objtype, 'COLUMN')
compname = self._table.qualname()
else:
(comptype, objtype) = ('TYPE', 'ATTRIBUTE')
compname = self.table
return "ALTER %s %s DROP %s %s" % (comptype, compname, objtype,
self.name)
def rename(self, newname):
stmt = "ALTER TABLE %s RENAME COLUMN %s TO %s" % (
self._table.qualname(), self.name, newname)
self.name = newname
return stmt
def set_sequence_default(self):
stmts = []
pth = self.set_search_path()
if pth:
stmts.append(pth)
stmts.append("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT %s" % (
quote_id(self.table), quote_id(self.name), self.default))
return stmts
def diff_map(self, incol):
stmts = []
base = "ALTER COLUMN %s " % self.name
if not hasattr(self, 'not_null') and hasattr(incol, 'not_null'):
stmts.append(base + "SET NOT NULL")
if hasattr(self, 'not_null') and not hasattr(incol, 'not_null'):
stmts.append(base + "DROP NOT NULL")
if not hasattr(self, 'type'):
raise ValueError("Column '%s' missing datatype" % self.name)
if not hasattr(incol, 'type'):
raise ValueError("Input column '%s' missing datatype" % incol.name)
if self.type != incol.type:
stmts.append(base + "TYPE %s" % incol.type)
if not hasattr(self, 'default') and hasattr(incol, 'default'):
stmts.append(base + "SET DEFAULT %s" % incol.default)
if hasattr(self, 'default') and not hasattr(incol, 'default'):
stmts.append(base + "DROP DEFAULT")
return (", ".join(stmts), self.diff_description(incol))
class ColumnDict(DbObjectDict):
cls = Column
query = \
"""SELECT nspname AS schema, relname AS table, attname AS name,
attnum AS number, format_type(atttypid, atttypmod) AS type,
attnotnull AS not_null, attinhcount AS inherited,
pg_get_expr(adbin, adrelid) AS default,
attisdropped AS dropped,
col_description(c.oid, attnum) AS description
FROM pg_attribute JOIN pg_class c ON (attrelid = c.oid)
JOIN pg_namespace ON (relnamespace = pg_namespace.oid)
LEFT JOIN pg_attrdef ON (attrelid = pg_attrdef.adrelid
AND attnum = pg_attrdef.adnum)
WHERE relkind in ('c', 'r', 'f')
AND (nspname != 'pg_catalog'
AND nspname != 'information_schema')
AND attnum > 0
ORDER BY nspname, relname, attnum"""
def _from_catalog(self):
for col in self.fetch():
sch, tbl = col.key()
if (sch, tbl) not in self:
self[(sch, tbl)] = []
self[(sch, tbl)].append(col)
def from_map(self, table, incols):
if not incols:
raise ValueError("Table '%s' has no columns" % table.name)
cols = self[(table.schema, table.name)] = []
for col in incols:
for key in list(col.keys()):
if isinstance(col[key], dict):
arg = col[key]
else:
arg = {'type': col[key]}
cols.append(Column(schema=table.schema, table=table.name,
name=key, **arg))
def diff_map(self, incols):
stmts = []
if not incols or not self:
return stmts
for (sch, tbl) in list(incols.keys()):
if (sch, tbl) in list(self.keys()):
for col in self[(sch, tbl)]:
if col.name not in [c.name for c in incols[(sch, tbl)]] \
and not hasattr(col, 'dropped'):
stmts.append(col.drop())
return stmts
| true
| true
|
f709aa2bed50b695798989b4ab08357aee9cbc57
| 47,691
|
py
|
Python
|
tests/python/relay/test_op_level3.py
|
whn09/incubator-tvm
|
657a6fa6554cc8402eca225f80e1b2cc2803c71a
|
[
"Apache-2.0"
] | null | null | null |
tests/python/relay/test_op_level3.py
|
whn09/incubator-tvm
|
657a6fa6554cc8402eca225f80e1b2cc2803c71a
|
[
"Apache-2.0"
] | null | null | null |
tests/python/relay/test_op_level3.py
|
whn09/incubator-tvm
|
657a6fa6554cc8402eca225f80e1b2cc2803c71a
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level3 operator test cases.
"""
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm.error import TVMError
from tvm.relay import create_executor, transform
from tvm.relay.testing import check_grad, run_infer_type
import tvm.testing
def test_zeros_ones():
for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:
y = op(shape=(124, 50), dtype="float64")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((124, 50), "float64")
intrp = create_executor()
intrp_res = intrp.evaluate(y).asnumpy()
np.testing.assert_allclose(intrp_res, ref((124, 50), 'float64'))
def test_unary_identity():
for op, ref in [(relay.zeros_like, np.zeros_like),
(relay.ones_like, np.ones_like),
(relay.ceil, np.ceil),
(relay.floor, np.floor),
(relay.trunc, np.trunc),
(relay.round, np.round),
(relay.abs, np.abs),
(relay.copy, None), # np.copy
(relay.negative, np.negative),
(relay.sign, np.sign)]:
shape = (8, 9, 4)
x = relay.var("x", relay.TensorType(shape, "float32"))
y = op(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, "float32")
if ref is not None:
data = np.random.rand(*shape).astype('float32')
intrp = create_executor()
op_res = intrp.evaluate(y, { x: relay.const(data) })
ref_res = ref(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
def test_cast():
x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
y = x.astype("int32")
yy = run_infer_type(y)
assert "dtype=" in yy.astext()
assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")
x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
y = relay.cast(x, "int32")
yy = run_infer_type(y)
assert "dtype=" in yy.astext()
assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")
def test_clip():
a = relay.var("a", relay.TensorType((10, 4), "float32"))
y = relay.clip(a, 1., 4.)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((10, 4), "float32")
data = np.random.rand(10, 4).astype('float32')
intrp = create_executor()
op_res = intrp.evaluate(y, { a: relay.const(data) })
ref_res = np.clip(data, 1., 4.)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
def test_fixed_point_multiply():
# Test 23 * 1/16
# [m,s] = [0.5, -3] = frexp(1/16)
# M = 0.5*2^31 = 1073741824
# so M = 1073741824 and s = -3
a = relay.var("a", relay.TensorType((10, 4), "int32"))
y = relay.fixed_point_multiply(a, 1073741824, -3)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((10, 4), "int32")
data = 23*np.ones((10, 4)).astype('int32')
intrp = create_executor()
op_res = intrp.evaluate(y, { a: relay.const(data) })
ref_res = np.ones((10, 4)).astype('int32')
np.testing.assert_allclose(op_res.asnumpy(), ref_res, atol=1)
def test_reinterpret():
a = relay.var("a", relay.TensorType((1000, 4), "float32"))
y = relay.reinterpret(a, "int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000, 4), "int32")
data = np.random.randn(1000, 4).astype('float32') * 1000
intrp = create_executor()
op_res = intrp.evaluate(y, {a: relay.const(data)})
ref_res = data.view("int32")
np.testing.assert_equal(op_res.asnumpy(), ref_res)
def test_approximate_transcendental():
def C(x):
return relay.expr.const(x, "float32")
def approx_exp(x):
# An approximation derived from Opus,
# https://github.com/xiph/opus/blob/c1c247/celt/mathops.h#L147-L165
x = relay.minimum(relay.maximum(x, C(-88.0)), C(88.0))
x = C(127.0) + x * C(1.44269504)
xf = relay.floor(x)
i = relay.cast(xf, "int32")
x = x - xf
Y = C(0.99992522) + x * (C(0.69583354) + x * (C(0.22606716) + x * C(0.078024523)))
exponent = relay.left_shift(i, relay.expr.const(23, "int32"))
exponent = relay.reinterpret(exponent, "float32")
return exponent * Y
def approximate_sigmoid(x):
y = approx_exp(x)
return y / (y + C(1.0))
def approximate_tanh(x):
x = x * C(2.0)
y = approx_exp(x)
return (y - C(1.0)) / (y + C(1.0))
a = relay.var("a", relay.TensorType((1000,), "float32"))
y = approximate_sigmoid(a)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000,), "float32")
data = np.linspace(-5, 5, 1000).astype("float32")
intrp = create_executor()
op_res = intrp.evaluate(y, {a: relay.const(data)})
def reference_sigmoid(x):
return np.exp(-np.logaddexp(0, -x))
np.testing.assert_allclose(op_res.asnumpy(), reference_sigmoid(data), atol=2e-5, rtol=1e-9)
y = approximate_tanh(a)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000,), "float32")
data = np.linspace(-5, 5, 1000).astype("float32")
intrp = create_executor()
op_res = intrp.evaluate(y, {a: relay.const(data)})
def reference_tanh(x):
return np.tanh(x)
np.testing.assert_allclose(op_res.asnumpy(), reference_tanh(data), atol=4e-5, rtol=1e-9)
def test_squeeze():
def verify_squeeze(shape, dtype, axis):
x = relay.var("x", relay.TensorType(shape, dtype))
squeeze = relay.squeeze(x, axis=axis)
np_axis = tuple(axis) if axis is not None else None
data = np.random.random_sample(shape).astype(dtype)
intrp = create_executor()
op_res = intrp.evaluate(squeeze, { x : relay.const(data) })
ref_res = np.squeeze(data, axis=np_axis)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
verify_squeeze((1, 3, 2, 5), "float32", None)
verify_squeeze((1, 3, 1), "float32", [0])
verify_squeeze((1, 2, 1, 2, 1), "float32", [0, 2])
def test_transpose_infer_type():
n, t, d = te.size_var("n"), te.size_var("t"), 100
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.transpose(x, axes=(1, 0, 2))
assert "axes=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(t, n, 100), "float32")
y = relay.transpose(x)
assert "axes=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(100, t, n), "float32")
@tvm.testing.uses_gpu
def test_transpose():
def verify_transpose(dshape, axes):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.transpose(x, axes=axes)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.transpose(x_data, axes=axes)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_transpose((2, 3, 4), (0, 2, 1))
def test_squeeze_infer_type():
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x, axis=(2,))
assert "axis=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(1, 4), "float32")
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x)
assert "axis=" not in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(4,), "float32")
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_squeeze_bad_axes_infer_type():
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x, axis=(1,))
yy = run_infer_type(y)
def test_reshape_infer_type():
n, t, d1, d2 = 10, 20, 100, 20
x = relay.var("x", relay.TensorType((n, t, d1, d2), "float32"))
y = relay.reshape(x, newshape=(n, t, 2000))
assert "newshape=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, t, 2000), "float32")
@tvm.testing.uses_gpu
def test_reshape():
def verify_reshape(shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.reshape(x, newshape=newshape)
zz = run_infer_type(z)
assert "newshape=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(oshape, "float32")
func = relay.Function([x], z)
check_grad(func)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
ref_res = np.reshape(x_data, oshape)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_reshape((2, 3, 4), (8, 3), (8, 3))
verify_reshape((4, 7), (2, 7, 2), (2, 7, 2))
verify_reshape((2, 3, 4), (4, 0, 2), (4, 3, 2))
verify_reshape((2, 3, 4), (2, 0, 0), (2, 3, 4))
verify_reshape((2, 3, 4), (0, -1), (2, 12))
verify_reshape((2, 3, 4), (-1, 0), (8, 3))
verify_reshape((2, 3, 4), (2, -2), (2, 3, 4))
verify_reshape((2, 3, 4), (-2, 1, 1), (2, 3, 4, 1, 1))
verify_reshape((2, 3, 4), (-3, 4), (6, 4))
verify_reshape((2, 3, 4, 5), (-3, -3), (6, 20))
verify_reshape((2, 3, 4), (0, -3), (2, 12))
verify_reshape((2, 3, 4), (-3, -2), (6, 4))
verify_reshape((2, 3, 4), (-4, 1, 2, -2), (1, 2, 3, 4))
verify_reshape((2, 3, 4), (2, -4, -1, 3, -2), (2, 1, 3, 4))
def test_reshape_fail():
with pytest.raises(TVMError) as reshape_err:
x = relay.var("x", relay.TensorType([2,3], "float32"))
z = relay.reshape(x, [7])
zz = run_infer_type(z)
def test_reshape_like_infer_type():
# concrete shape
x = relay.var("x", relay.TensorType((1, 2, 3), "float32"))
y = relay.var("y", relay.TensorType((1,6), "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 6), "float32")
# symbolic shape
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.var("y", relay.TensorType((1, 8, 8), "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 8, 8), "float32")
@tvm.testing.uses_gpu
def test_reshape_like():
def verify_reshape_like(shape, oshape):
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=oshape).astype("float32")
ref_res = np.reshape(x_data, y_data.shape)
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("x", relay.TensorType(oshape, "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(ref_res.shape, "float32")
func = relay.Function([x, y], z)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_reshape_like((2, 3, 4), (1, 8, 3))
verify_reshape_like((4, 7), (2, 7, 2))
def test_take_infer_type():
def verify_take(dshape, indices_shape, oshape, axis=None):
x = relay.var("x", relay.TensorType(dshape, "float32"))
indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
y = relay.take(x, indices, axis=axis)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(oshape, "float32")
d1, d2, d3 = te.var("d1"), te.var("d2"), te.var("d3")
d4, d5, d6 = te.var("d4"), te.var("d5"), te.var("d6")
verify_take((d1,), (1,), (1,), 0)
verify_take((4,), (d1, d2), (d1, d2))
verify_take((3, 3, 3), (1, d2), (1, d2))
verify_take((d1, d2), (d3, d4, d5), (d3, d4, d5, d2), 0)
verify_take((d1, d2), (d3, d4, d5), (d1, d3, d4, d5), 1)
verify_take((d1, d2, d3, d4), (d5, d6), (d1, d2, d5, d6, d4), -2)
@tvm.testing.uses_gpu
def test_take():
def verify_take(src_shape, indices_src, axis=None, mode="clip"):
src_dtype = "float32"
indices_dtype = "int32"
indices_src = np.array(indices_src, dtype=indices_dtype)
x = relay.var("x", relay.TensorType(src_shape, src_dtype))
indices = relay.var("indices", relay.TensorType(indices_src.shape, indices_dtype))
z = relay.take(x, indices, axis=axis, mode=mode)
func = relay.Function([x, indices], z)
x_data = np.random.uniform(low=-1, high=1, size=src_shape).astype(src_dtype)
np_mode = "raise" if mode == "fast" else mode
ref_res = np.take(x_data, indices=indices_src, axis=axis, mode=np_mode)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, indices_src)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_take((4,), [1])
verify_take((4,), [[0,1,2,3]])
verify_take((3,3,3), [[11,25]])
verify_take((4,), [[0,1],[2,3]])
verify_take((4,), [1], 0)
verify_take((2,2), [[[1,0],[0,1]]], 0)
verify_take((2,2), [[[1,0],[0,1]]], 1)
verify_take((4,3,5,6), [[2,1,0,0]], -2)
verify_take((3,4), [-5, 20])
verify_take((3,4), [-5, 20], mode="wrap")
verify_take((3,4), [-1, 2], axis=0)
verify_take((3,4), [-1, 2], axis=0, mode="wrap")
verify_take((3,4), [-1, 2], axis=1)
verify_take((3,4), [-1, 2], axis=1, mode="wrap")
verify_take((3,3,3), [[11,25]], mode="fast")
verify_take((3,4), [0, 2], axis=0, mode="fast")
verify_take((3,4), [0, 2], axis=1, mode="fast")
def test_split_infer_type():
def verify_split(dshape, indices_or_sections, ret_type, axis=None):
x = relay.var("x", relay.ty.TensorType(dshape, "float32"))
y = relay.split(x, indices_or_sections, axis=axis)
yy = run_infer_type(y.astuple())
assert yy.checked_type == ret_type
idxd = tvm.tir.indexdiv
d1, d2, d3, d4 = te.var("d1"), te.var("d2"), te.var("d3"), te.var("d4")
axis = te.var("axis")
verify_split((5, 5, 2, 2), 5,
relay.ty.TupleType(tvm.runtime.convert([
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32")])),
axis=1)
verify_split((5, 5, 2, 2), 5,
relay.ty.TupleType(tvm.runtime.convert([
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32")])),
axis=0)
verify_split((d1, d2, d3, d4), 4,
relay.ty.TupleType(tvm.runtime.convert([
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32")])),
axis=2)
verify_split((d1, d2, d3, d4), 2,
relay.ty.TupleType(tvm.runtime.convert([
relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32"),
relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32")])),
axis=0)
verify_split((d1, d2, d3, d4), (2, 4, 7),
relay.ty.TupleType(tvm.runtime.convert([
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 3, d3, d4), "float32"),
relay.ty.TensorType((d1, (d2-7), d3, d4), "float32")])),
axis=1)
def test_full_infer_type():
# default settings: match input dtype
x = relay.var("x", relay.TensorType((), "int8"))
y = relay.full(x, ())
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((), "int8")
# change the shape and dtype
x = relay.var("x", relay.TensorType((), "float32"))
y = relay.full(x, (1, 2), "int8")
"shape=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 2), "int8")
@tvm.testing.uses_gpu
def test_full():
def verify_full(fill_value, src_shape, dtype):
x = relay.var("x", relay.scalar_type(dtype))
z = relay.full(x, src_shape, dtype)
func = relay.Function([x], z)
ref_res = np.full(src_shape, fill_value)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(np.array(fill_value, dtype))
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_full(4, (1, 3, 4, 4), "int32")
#verify_full(4, (1, 3, 4, 4), "int64") # This does not pass, python int32 is not upcast to int64, not sure how to fix it.
verify_full(4.0, (1, 4), "float32")
def test_full_like_infer_type():
# concrete shape
base = relay.var("base", relay.TensorType((1, 2, 3), "float32"))
fill = relay.var("fill", relay.TensorType((), "float32"))
y = relay.full_like(base, fill)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 2, 3), "float32")
# symbolic shape
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
base = relay.var("base", relay.TensorType((n, c, h, w), "float32"))
fill = relay.var("fill", relay.TensorType((), "float32"))
y = relay.full_like(base, fill)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")
@tvm.testing.uses_gpu
def test_full_like():
def verify_full_like(base, fill_value, dtype):
x_data = np.random.uniform(low=-1, high=1, size=base).astype(dtype)
x = relay.var("x", relay.TensorType(base, dtype))
y = relay.var("y", relay.scalar_type(dtype))
z = relay.full_like(x, y)
func = relay.Function([x, y], z)
ref_res = np.full_like(x_data, fill_value)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, np.array(fill_value, dtype))
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_full_like((1, 3, 4, 4), 4, "int32")
verify_full_like((1, 1), 44.0, "float32")
@tvm.testing.uses_gpu
def test_infer_type_leaky_relu():
n, c , h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.leaky_relu(x, alpha=0.1)
"alpha=0.1" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.nn.leaky_relu(x, alpha=0.1)
assert "alpha=0.1" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = np.where(x_data > 0, x_data, x_data * 0.1)
for target, ctx in tvm.testing.enabled_targets():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def verify_infer_type_prelu(data, alpha, axis, output, dtype="float32"):
x = relay.var("data", relay.TensorType(data, dtype))
if alpha:
y = relay.var("alpha", relay.TensorType(alpha, dtype))
else:
y = relay.var("alpha", relay.IncompleteType())
z = relay.nn.prelu(x, y, axis=axis)
zz = run_infer_type(z)
if axis != 1:
assert "axis" in z.astext()
assert zz.checked_type == relay.ty.TensorType(output, dtype)
if not alpha:
axis = axis if axis else 1
alpha_shape = (data[axis],)
assert zz.args[1].checked_type == relay.TensorType(alpha_shape, "float32")
if all(isinstance(v, tvm.tir.Var) == 1 for v in data) or not alpha:
return
func = relay.Function([x, y], z)
x_data = np.random.uniform(low=-1, high=1, size=data).astype(dtype)
a_data = np.random.uniform(low=-1, high=1, size=alpha).astype(dtype)
if axis == 1:
ref_res = (x_data < 0) * (x_data * a_data.reshape(3, 1, 1)) + (x_data>=0) * x_data
else:
ref_res = (x_data < 0) * (x_data * a_data.reshape(1, 1, 3)) + (x_data>=0) * x_data
for target, ctx in tvm.testing.enabled_targets():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data, a_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data, a_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_infer_type_prelu():
n, c , h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
verify_infer_type_prelu((n, c, h, w), (c,), 1, (n, c, h, w))
verify_infer_type_prelu((n, h, w, c), (c,), 3, (n, h, w, c))
verify_infer_type_prelu((n, c, h, w), None, 1, (n, c, h, w))
verify_infer_type_prelu((n, h, w, c), None, 3, (n, h, w, c))
verify_infer_type_prelu((1, 3, 2, 2), (3,), 1, (1, 3, 2, 2))
verify_infer_type_prelu((1, 2, 2, 3), (3,), 3, (1, 2, 2, 3))
verify_infer_type_prelu((1, 3, 2, 2), None, 1, (1, 3, 2, 2))
verify_infer_type_prelu((1, 2, 2, 3), None, 3, (1, 2, 2, 3))
@tvm.testing.uses_gpu
def test_arange():
def verify_arange(start, stop, step):
dtype = "float32"
if start is None and step is None:
x = relay.arange(relay.const(stop, dtype=dtype))
ref_res = np.arange(stop).astype(dtype)
elif start is None:
x = relay.arange(relay.const(stop, dtype=dtype), step=relay.const(step, dtype=dtype))
ref_res = np.arange(stop, step=step).astype(dtype)
elif step is None:
x = relay.arange(relay.const(start, dtype=dtype), relay.const(stop, dtype=dtype))
ref_res = np.arange(start, stop).astype(dtype)
else:
x = relay.arange(
relay.const(start, dtype=dtype),
relay.const(stop, dtype=dtype),
relay.const(step, dtype=dtype))
ref_res = np.arange(start, stop, step).astype(dtype)
func = relay.Function([], x)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)()
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_arange(None, 20, None)
verify_arange(None, 20, 2)
verify_arange(1, 20, None)
verify_arange(1, 20, 2)
# arange doesnt' support floating point right now, see type relation
# verify_arange(1, 20, 1.5)
verify_arange(1, 20.5, None)
verify_arange(1, 20, 3)
verify_arange(20, 1, -1)
# arange doesnt' support floating point right now, see type relation
# verify_arange(20, 1, -1.5)
@tvm.testing.uses_gpu
def test_meshgrid():
def verify_meshgrid(lengths, indexing="ij"):
input_vars = []
input_data = []
for i, length in enumerate(lengths):
input_name = "x_{}".format(i)
if length == 0:
# Scalar
input_vars.append(relay.var(input_name, relay.scalar_type("float32")))
input_data.append(np.array(1, "float32"))
else:
input_vars.append(relay.var(input_name, relay.TensorType((length,), "float32")))
input_data.append(np.arange(length).astype("float32"))
z = relay.meshgrid(input_vars, indexing=indexing).astuple()
func = relay.Function(input_vars, z)
# Get ref
ref_res = np.meshgrid(*input_data, indexing=indexing)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(*input_data)
assert len(op_res) == len(ref_res)
for i in range(len(op_res)):
tvm.testing.assert_allclose(op_res[i].asnumpy(), ref_res[i], rtol=1e-5)
verify_meshgrid([3, 5])
verify_meshgrid([4, 2], indexing="xy")
verify_meshgrid([3, 5, 2])
verify_meshgrid([3, 1, 5], indexing="xy")
# Length 0 signifies scalar.
verify_meshgrid([3, 5, 0])
@tvm.testing.uses_gpu
def test_tile():
def verify_tile(dshape, reps):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.tile(x, reps=reps)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.tile(x_data, reps=reps)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_tile((2, 3, 4), (3, 2, 1))
verify_tile((2, 3, 4), (1, 2))
verify_tile((2, 3), (3, 2, 1))
@tvm.testing.uses_gpu
def test_repeat():
def verify_repeat(dshape, repeats, axis):
x = relay.Var("x", relay.TensorType(dshape, "float32"))
func = relay.Function([x], relay.repeat(x, repeats, axis))
data = np.random.uniform(size=dshape).astype("float32")
ref_res = np.repeat(data, repeats, axis)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_repeat((3,), 2, 0)
verify_repeat((3, 10), 2, -1)
verify_repeat((3, 2, 4), 3, 1)
@tvm.testing.uses_gpu
def test_stack():
def verify_stack(dshapes, axis):
y = []
for shape in dshapes:
y.append(relay.var("input", relay.TensorType(shape, "float32")))
x = relay.Tuple(y)
z = relay.stack(x, axis=axis)
func = relay.Function(y, z)
x_data = [np.random.normal(size=shape).astype("float32") for shape in dshapes]
ref_res = np.stack(x_data, axis=axis)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(*x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_stack([(2,), (2,), (2,)], -1)
verify_stack([(2,), (2,), (2,)], 0)
verify_stack([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1)
verify_stack([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1)
verify_stack([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], 4)
@tvm.testing.uses_gpu
def test_reverse():
def verify_reverse(dshape, axis):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.reverse(x, axis=axis)
zz = run_infer_type(z)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.flip(x_data, axis)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_reverse((2, 3, 4), 1)
verify_reverse((4, 7), 0)
verify_reverse((2, 3, 4), -1)
@tvm.testing.uses_gpu
def test_reverse_sequence():
def verify_reverse_sequence(x_data, seq_lengths, batch_axis, seq_axis, ref_res):
seq_lengths_data = np.array(seq_lengths).astype("int32")
x = relay.var("x", relay.TensorType(x_data.shape, str(x_data.dtype)))
z = relay.reverse_sequence(x, relay.const(seq_lengths_data), seq_axis, batch_axis)
zz = run_infer_type(z)
assert zz.checked_type == x.type_annotation
func = relay.Function([x], z)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 5, 10, 15],
[4, 1, 6, 11],
[8, 9, 2, 7],
[12, 13, 14, 3]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 1, 0, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], -1, 0, np.array(result))
verify_reverse_sequence(indata.astype("float32"), [1, 2, 3, 4], 1, 0, np.array(result).astype("float32"))
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3],
[5, 4, 6, 7],
[10, 9, 8, 11],
[15, 14, 13, 12]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, 1, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, -1, np.array(result))
verify_reverse_sequence(indata.astype("float32"), [1, 2, 3, 4], 0, 1, np.array(result).astype("float32"))
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[15, 14, 13, 12]]
verify_reverse_sequence(indata, [-1, 0, 1, 5], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [[[[18, 19, 20], [21, 22, 23], [24, 25, 26]],
[[9, 10, 11], [12, 13, 14], [15, 16, 17]],
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]],
[[[45, 46, 47], [48, 49, 50], [51, 52, 53]],
[[36, 37, 38], [39, 40, 41], [42, 43, 44]],
[[27, 28, 29], [30, 31, 32], [33, 34, 35]]]]
verify_reverse_sequence(indata, [3, 3], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [[[[9, 10, 11], [21, 22, 23], [15, 16, 17]],
[[0, 1, 2], [12, 13, 14], [6, 7, 8]],
[[18, 19, 20], [3, 4, 5], [24, 25, 26]]],
[[[36, 37, 38], [48, 49, 50], [42, 43, 44]],
[[27, 28, 29], [39, 40, 41], [33, 34, 35]],
[[45, 46, 47], [30, 31, 32], [51, 52, 53]]]]
verify_reverse_sequence(indata, [2, 3, 2], 2, 1, np.array(result))
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = []
with pytest.raises(Exception) as execinfo:
verify_reverse_sequence(indata, [2, 3, 2, 4, 5], 1, 0, np.array(result))
assert "For reverse_sequnece seq_lengths size should match with dimension of batch axis," \
" but got dimension of batch_axis = 4, and seq_length size = 5" in execinfo.value.args[0]
def test_scatter():
def ref_scatter(data, indices, updates, axis=0):
idx = np.indices(indices.shape).reshape(indices.ndim, -1)
updated_idx = np.copy(idx)
indices = indices.reshape(-1)
for i in range(len(indices)):
updated_idx[axis, i] = indices[i]
scattered = np.copy(data)
scattered[tuple(updated_idx)] = updates[tuple(idx)]
return scattered
def verify_scatter(dshape, ishape, axis=0):
d = relay.var("d", relay.TensorType(dshape, "float32"))
i = relay.var("i", relay.TensorType(ishape, "int64"))
u = relay.var("u", relay.TensorType(ishape, "float32"))
z = relay.op.scatter(d, i, u, axis)
func = relay.Function([d, i, u], z)
data_np = np.random.uniform(size=dshape).astype("float32")
updates_np = np.random.uniform(size=ishape).astype("float32")
indices_np = np.random.randint(-dshape[axis], dshape[axis] - 1, ishape).astype("int64")
ref_res = ref_scatter(data_np, indices_np, updates_np, axis)
# TODO(mbrookhart): expand testing when adding more backend schedules
for target, ctx in [("llvm", tvm.cpu())]:
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data_np, indices_np, updates_np)
tvm.testing.assert_allclose(
op_res.asnumpy(), ref_res, rtol=1e-5)
verify_scatter((10, ), (10, ), 0)
verify_scatter((10, 5), (10, 5), -2)
verify_scatter((10, 5), (10, 5), -1)
verify_scatter((10, 5), (3, 5), 0)
verify_scatter((12, 4), (7, 2), 1)
verify_scatter((2, 3, 4), (1, 3, 4), 0)
verify_scatter((2, 3, 4), (2, 1, 4), 1)
verify_scatter((2, 3, 4), (2, 3, 1), 2)
verify_scatter((2, 3, 4, 5), (1, 3, 4, 5), 0)
verify_scatter((6, 3, 4, 5), (2, 3, 4, 5), 1)
verify_scatter((2, 3, 8, 5), (2, 3, 1, 1), 2)
verify_scatter((16, 16, 4, 5), (16, 16, 4, 5), 3)
def test_scatter_add():
def ref_scatter_add(data, indices, updates, axis=0):
output = np.copy(data)
for index in np.ndindex(*indices.shape):
new_index = list(index)
new_index[axis] = indices[index]
output[tuple(new_index)] += updates[index]
return output
def verify_scatter_add(dshape, ishape, axis=0):
d = relay.var("d", relay.TensorType(dshape, "float32"))
i = relay.var("i", relay.TensorType(ishape, "int64"))
u = relay.var("u", relay.TensorType(ishape, "float32"))
z = relay.op.scatter_add(d, i, u, axis)
func = relay.Function([d, i, u], z)
data_np = np.random.uniform(size=dshape).astype("float32")
updates_np = np.random.uniform(size=ishape).astype("float32")
indices_np = np.random.randint(-dshape[axis], dshape[axis] - 1, ishape).astype("int64")
ref_res = ref_scatter_add(data_np, indices_np, updates_np, axis)
# TODO(mbrookhart): expand testing when adding more backend schedules
for target, ctx in [("llvm", tvm.cpu())]:
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data_np, indices_np, updates_np)
tvm.testing.assert_allclose(
op_res.asnumpy(), ref_res, rtol=1e-5)
verify_scatter_add((10, ), (10, ), 0)
verify_scatter_add((10, 5), (10, 5), -2)
verify_scatter_add((10, 5), (10, 5), -1)
verify_scatter_add((10, 5), (3, 5), 0)
verify_scatter_add((12, 4), (7, 2), 1)
verify_scatter_add((2, 3, 4), (1, 3, 4), 0)
verify_scatter_add((2, 3, 4), (2, 1, 4), 1)
verify_scatter_add((2, 3, 4), (2, 3, 1), 2)
verify_scatter_add((2, 3, 4, 5), (1, 3, 4, 5), 0)
verify_scatter_add((6, 3, 4, 5), (2, 3, 4, 5), 1)
verify_scatter_add((2, 3, 8, 5), (2, 3, 1, 1), 2)
verify_scatter_add((16, 16, 4, 5), (16, 16, 4, 5), 3)
@tvm.testing.uses_gpu
def test_gather():
def verify_gather(data, axis, indices, ref_res):
data = np.asarray(data, dtype='float32')
indices = np.asarray(indices, dtype='int32')
ref_res = np.asarray(ref_res)
d = relay.var("x", relay.TensorType(data.shape, "float32"))
i = relay.var("y", relay.TensorType(indices.shape, "int32"))
z = relay.gather(d, axis, i)
func = relay.Function([d, i], z)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data, indices)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res,
rtol=1e-5)
verify_gather([[1, 2], [3, 4]],
1,
[[0, 0], [1, 0]],
[[1, 1], [4, 3]])
verify_gather([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]],
0,
[[[1, 0, 1], [1, 1, 0]]],
[[[6, 1, 8], [9, 10, 5]]])
verify_gather([[[-0.2321, -0.2024, -1.7624], [-0.3829, -0.4246, 0.2448],
[0.1822, 0.2360, -0.8965], [0.4497, -0.2224, 0.6103]],
[[0.0408, -0.7667, -0.4303], [-0.3216, 0.7489, -0.1502],
[0.0144, -0.4699, -0.0064], [-0.0768, -1.6064, 1.3390]]],
1,
[[[2, 2, 0], [1, 0, 3]], [[3, 2, 0], [1, 0, 0]]],
[[[0.1822, 0.2360, -1.7624], [-0.3829, -0.2024, 0.6103]],
[[-0.0768, -0.4699, -0.4303], [-0.3216, -0.7667, -0.4303]]])
verify_gather([[[0.3050, 1.6986, 1.1034], [0.7020, -0.6960, -2.1818],
[0.3116, -0.5773, -0.9912], [0.0835, -1.3915, -1.0720]],
[[0.1694, -0.6091, -0.6539], [-0.5234, -0.1218, 0.5084],
[0.2374, -1.9537, -2.0078], [-0.5700, -1.0302, 0.1558]]],
2,
[[[1, 1, 0, 1], [0, 0, 2, 2], [1, 2, 1, 2], [2, 2, 1, 0]],
[[0, 0, 1, 2], [2, 2, 1, 0], [1, 2, 0, 0], [0, 2, 0, 2]]],
[[[1.6986, 1.6986, 0.3050, 1.6986],
[0.7020, 0.7020, -2.1818, -2.1818],
[-0.5773, -0.9912, -0.5773, -0.9912],
[-1.0720, -1.0720, -1.3915, 0.0835]],
[[0.1694, 0.1694, -0.6091, -0.6539],
[0.5084, 0.5084, -0.1218, -0.5234],
[-1.9537, -2.0078, 0.2374, 0.2374],
[-0.5700, 0.1558, -0.5700, 0.1558]]])
@tvm.testing.uses_gpu
def test_gather_nd():
def verify_gather_nd(xshape, yshape, y_data):
x = relay.var("x", relay.TensorType(xshape, "float32"))
y = relay.var("y", relay.TensorType(yshape, "int32"))
z = relay.gather_nd(x, y)
func = relay.Function([x, y], z)
x_data = np.random.uniform(size=xshape).astype("float32")
ref_res = x_data[tuple(y_data)]
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_gather_nd((2, 2), (2, 3), [[1, 1, 0], [0, 1, 0]])
verify_gather_nd((2, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify_gather_nd((3, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify_gather_nd((3, 2), (2, 2, 3), [[[0, 1, 2], [2, 0, 1]], [[0, 0, 0], [1, 1, 1]]])
def _verify_infiniteness_ops(relay_op, ref_op):
for dtype in ['float32', 'float16', 'float16', 'int32', 'int16']:
shape = (2, 8, 8)
x = relay.var("x", relay.TensorType(shape, dtype))
y = relay_op(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, "bool")
data = np.random.uniform(size=shape).astype(dtype)
if dtype.startswith('float'):
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.infty
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan
intrp = create_executor()
op_res = intrp.evaluate(y, {x: data})
ref_res = ref_op(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
def test_isfinite():
_verify_infiniteness_ops(relay.isfinite, np.isfinite)
def test_isinf():
_verify_infiniteness_ops(relay.isinf, np.isinf)
@tvm.testing.uses_gpu
def test_unravel_index():
def verify_unravel_index(indices, shape, dtype):
x_data = np.array(indices).astype(dtype)
y_data = np.array(shape).astype(dtype)
x = relay.var("x", relay.TensorType(x_data.shape, dtype))
y = relay.var("y", relay.TensorType(y_data.shape, dtype))
z = relay.unravel_index(x, y)
zz = run_infer_type(z)
if len(x_data.shape) == 1:
out_shape = [y_data.shape[0], x_data.shape[0]]
else:
out_shape = [y_data.shape[0]]
assert zz.checked_type == relay.ty.TensorType(out_shape, dtype)
func = relay.Function([x, y], z)
ref_res = np.unravel_index(x_data, y_data)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
for dtype in ["int64", "int32"]:
verify_unravel_index([0, 1, 2, 3], [2, 2], dtype)
verify_unravel_index([144], [5, 5, 5, 2], dtype)
verify_unravel_index(144, [5, 5, 5, 2], dtype)
verify_unravel_index([100, 13, 5], [5, 5, 5, 2], dtype)
# In below example, 5 is out of bound for array of size 4.
# Numpy implementation throws error for it
# TVM implementation does not throw error instead it produces
# output which is inline with Tensorflow
# verify_unravel_index([0, 1, 2, 5], [2, 2], dtype)
@tvm.testing.uses_gpu
def test_sparse_to_dense():
def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected):
sparse_indices_data = np.array(sparse_indices)
sparse_values_data = np.array(sparse_values)
default_value_data = np.array(default_value)
a = relay.var("a", relay.TensorType(sparse_indices_data.shape, str(sparse_indices_data.dtype)))
b = relay.var("b", relay.TensorType(sparse_values_data.shape, str(sparse_values_data.dtype)))
if default_value is None:
args = [a, b]
d = relay.sparse_to_dense(a, output_shape, b)
else:
c = relay.var("c", relay.TensorType(default_value_data.shape, str(default_value_data.dtype)))
args = [a, b, c]
d = relay.sparse_to_dense(a, output_shape, b, c)
zz = run_infer_type(d)
assert zz.checked_type == relay.ty.TensorType(output_shape, str(sparse_values_data.dtype))
func = relay.Function(args, d)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
if default_value is None:
op_res = intrp.evaluate(func)(sparse_indices_data, sparse_values_data)
else:
op_res = intrp.evaluate(func)(
sparse_indices_data, sparse_values_data, default_value_data
)
tvm.testing.assert_allclose(op_res.asnumpy(), xpected, rtol=1e-5)
verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0]) # scalar
verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3]) # vector
verify_sparse_to_dense([[0, 0], [1, 2]], [1, 2], 0, [3, 4], [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]]) # nXd
verify_sparse_to_dense(
[[0, 0, 0], [1, 2, 3]],
[1, 2],
4,
[2, 3, 4],
[[[1, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 4]], [[4, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 2]]]
) # nXd
verify_sparse_to_dense([0, 1, 4], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1]) # floats
verify_sparse_to_dense(1, 3, None, [5], [0, 3, 0, 0, 0]) # default value not specified
#negative test cases
#sparse indices should be ints
#verify_sparse_to_dense([[0.1, 1.1, 4.1], [0,2,4]], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
#sparse_values should be 0d or 1d only
#verify_sparse_to_dense([[0, 1, 4], [0, 2, 4]], [[[3.1, 3.1, 3.1]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
#sparse_indices should not be > 2d tensor
#verify_sparse_to_dense([[[[0, 1, 4], [0, 2, 4]]]], [[[[3.1, 3.1, 3.1]]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
if __name__ == "__main__":
test_cast()
test_zeros_ones()
test_unary_identity()
test_clip()
test_transpose_infer_type()
test_transpose()
test_reshape_infer_type()
test_reshape()
test_reshape_fail()
test_reshape_like_infer_type()
test_reshape_like()
test_take_infer_type()
test_take()
test_full_infer_type()
test_full()
test_full_like_infer_type()
test_full_like()
test_infer_type_leaky_relu()
test_infer_type_prelu()
test_squeeze()
test_squeeze_infer_type()
test_squeeze_bad_axes_infer_type()
test_split_infer_type()
test_arange()
test_meshgrid()
test_reverse()
test_stack()
test_tile()
test_repeat()
test_gather_nd()
test_isfinite()
test_isinf()
test_unravel_index()
test_sparse_to_dense()
test_fixed_point_multiply()
| 42.204425
| 125
| 0.576461
|
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm.error import TVMError
from tvm.relay import create_executor, transform
from tvm.relay.testing import check_grad, run_infer_type
import tvm.testing
def test_zeros_ones():
for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:
y = op(shape=(124, 50), dtype="float64")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((124, 50), "float64")
intrp = create_executor()
intrp_res = intrp.evaluate(y).asnumpy()
np.testing.assert_allclose(intrp_res, ref((124, 50), 'float64'))
def test_unary_identity():
for op, ref in [(relay.zeros_like, np.zeros_like),
(relay.ones_like, np.ones_like),
(relay.ceil, np.ceil),
(relay.floor, np.floor),
(relay.trunc, np.trunc),
(relay.round, np.round),
(relay.abs, np.abs),
(relay.copy, None),
(relay.negative, np.negative),
(relay.sign, np.sign)]:
shape = (8, 9, 4)
x = relay.var("x", relay.TensorType(shape, "float32"))
y = op(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, "float32")
if ref is not None:
data = np.random.rand(*shape).astype('float32')
intrp = create_executor()
op_res = intrp.evaluate(y, { x: relay.const(data) })
ref_res = ref(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
def test_cast():
x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
y = x.astype("int32")
yy = run_infer_type(y)
assert "dtype=" in yy.astext()
assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")
x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
y = relay.cast(x, "int32")
yy = run_infer_type(y)
assert "dtype=" in yy.astext()
assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")
def test_clip():
a = relay.var("a", relay.TensorType((10, 4), "float32"))
y = relay.clip(a, 1., 4.)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((10, 4), "float32")
data = np.random.rand(10, 4).astype('float32')
intrp = create_executor()
op_res = intrp.evaluate(y, { a: relay.const(data) })
ref_res = np.clip(data, 1., 4.)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
def test_fixed_point_multiply():
a = relay.var("a", relay.TensorType((10, 4), "int32"))
y = relay.fixed_point_multiply(a, 1073741824, -3)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((10, 4), "int32")
data = 23*np.ones((10, 4)).astype('int32')
intrp = create_executor()
op_res = intrp.evaluate(y, { a: relay.const(data) })
ref_res = np.ones((10, 4)).astype('int32')
np.testing.assert_allclose(op_res.asnumpy(), ref_res, atol=1)
def test_reinterpret():
a = relay.var("a", relay.TensorType((1000, 4), "float32"))
y = relay.reinterpret(a, "int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000, 4), "int32")
data = np.random.randn(1000, 4).astype('float32') * 1000
intrp = create_executor()
op_res = intrp.evaluate(y, {a: relay.const(data)})
ref_res = data.view("int32")
np.testing.assert_equal(op_res.asnumpy(), ref_res)
def test_approximate_transcendental():
def C(x):
return relay.expr.const(x, "float32")
def approx_exp(x):
= relay.minimum(relay.maximum(x, C(-88.0)), C(88.0))
x = C(127.0) + x * C(1.44269504)
xf = relay.floor(x)
i = relay.cast(xf, "int32")
x = x - xf
Y = C(0.99992522) + x * (C(0.69583354) + x * (C(0.22606716) + x * C(0.078024523)))
exponent = relay.left_shift(i, relay.expr.const(23, "int32"))
exponent = relay.reinterpret(exponent, "float32")
return exponent * Y
def approximate_sigmoid(x):
y = approx_exp(x)
return y / (y + C(1.0))
def approximate_tanh(x):
x = x * C(2.0)
y = approx_exp(x)
return (y - C(1.0)) / (y + C(1.0))
a = relay.var("a", relay.TensorType((1000,), "float32"))
y = approximate_sigmoid(a)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000,), "float32")
data = np.linspace(-5, 5, 1000).astype("float32")
intrp = create_executor()
op_res = intrp.evaluate(y, {a: relay.const(data)})
def reference_sigmoid(x):
return np.exp(-np.logaddexp(0, -x))
np.testing.assert_allclose(op_res.asnumpy(), reference_sigmoid(data), atol=2e-5, rtol=1e-9)
y = approximate_tanh(a)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000,), "float32")
data = np.linspace(-5, 5, 1000).astype("float32")
intrp = create_executor()
op_res = intrp.evaluate(y, {a: relay.const(data)})
def reference_tanh(x):
return np.tanh(x)
np.testing.assert_allclose(op_res.asnumpy(), reference_tanh(data), atol=4e-5, rtol=1e-9)
def test_squeeze():
def verify_squeeze(shape, dtype, axis):
x = relay.var("x", relay.TensorType(shape, dtype))
squeeze = relay.squeeze(x, axis=axis)
np_axis = tuple(axis) if axis is not None else None
data = np.random.random_sample(shape).astype(dtype)
intrp = create_executor()
op_res = intrp.evaluate(squeeze, { x : relay.const(data) })
ref_res = np.squeeze(data, axis=np_axis)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
verify_squeeze((1, 3, 2, 5), "float32", None)
verify_squeeze((1, 3, 1), "float32", [0])
verify_squeeze((1, 2, 1, 2, 1), "float32", [0, 2])
def test_transpose_infer_type():
n, t, d = te.size_var("n"), te.size_var("t"), 100
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.transpose(x, axes=(1, 0, 2))
assert "axes=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(t, n, 100), "float32")
y = relay.transpose(x)
assert "axes=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(100, t, n), "float32")
@tvm.testing.uses_gpu
def test_transpose():
def verify_transpose(dshape, axes):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.transpose(x, axes=axes)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.transpose(x_data, axes=axes)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_transpose((2, 3, 4), (0, 2, 1))
def test_squeeze_infer_type():
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x, axis=(2,))
assert "axis=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(1, 4), "float32")
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x)
assert "axis=" not in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(4,), "float32")
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_squeeze_bad_axes_infer_type():
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x, axis=(1,))
yy = run_infer_type(y)
def test_reshape_infer_type():
n, t, d1, d2 = 10, 20, 100, 20
x = relay.var("x", relay.TensorType((n, t, d1, d2), "float32"))
y = relay.reshape(x, newshape=(n, t, 2000))
assert "newshape=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, t, 2000), "float32")
@tvm.testing.uses_gpu
def test_reshape():
def verify_reshape(shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.reshape(x, newshape=newshape)
zz = run_infer_type(z)
assert "newshape=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(oshape, "float32")
func = relay.Function([x], z)
check_grad(func)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
ref_res = np.reshape(x_data, oshape)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_reshape((2, 3, 4), (8, 3), (8, 3))
verify_reshape((4, 7), (2, 7, 2), (2, 7, 2))
verify_reshape((2, 3, 4), (4, 0, 2), (4, 3, 2))
verify_reshape((2, 3, 4), (2, 0, 0), (2, 3, 4))
verify_reshape((2, 3, 4), (0, -1), (2, 12))
verify_reshape((2, 3, 4), (-1, 0), (8, 3))
verify_reshape((2, 3, 4), (2, -2), (2, 3, 4))
verify_reshape((2, 3, 4), (-2, 1, 1), (2, 3, 4, 1, 1))
verify_reshape((2, 3, 4), (-3, 4), (6, 4))
verify_reshape((2, 3, 4, 5), (-3, -3), (6, 20))
verify_reshape((2, 3, 4), (0, -3), (2, 12))
verify_reshape((2, 3, 4), (-3, -2), (6, 4))
verify_reshape((2, 3, 4), (-4, 1, 2, -2), (1, 2, 3, 4))
verify_reshape((2, 3, 4), (2, -4, -1, 3, -2), (2, 1, 3, 4))
def test_reshape_fail():
with pytest.raises(TVMError) as reshape_err:
x = relay.var("x", relay.TensorType([2,3], "float32"))
z = relay.reshape(x, [7])
zz = run_infer_type(z)
def test_reshape_like_infer_type():
x = relay.var("x", relay.TensorType((1, 2, 3), "float32"))
y = relay.var("y", relay.TensorType((1,6), "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 6), "float32")
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.var("y", relay.TensorType((1, 8, 8), "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 8, 8), "float32")
@tvm.testing.uses_gpu
def test_reshape_like():
def verify_reshape_like(shape, oshape):
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=oshape).astype("float32")
ref_res = np.reshape(x_data, y_data.shape)
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("x", relay.TensorType(oshape, "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(ref_res.shape, "float32")
func = relay.Function([x, y], z)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_reshape_like((2, 3, 4), (1, 8, 3))
verify_reshape_like((4, 7), (2, 7, 2))
def test_take_infer_type():
def verify_take(dshape, indices_shape, oshape, axis=None):
x = relay.var("x", relay.TensorType(dshape, "float32"))
indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
y = relay.take(x, indices, axis=axis)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(oshape, "float32")
d1, d2, d3 = te.var("d1"), te.var("d2"), te.var("d3")
d4, d5, d6 = te.var("d4"), te.var("d5"), te.var("d6")
verify_take((d1,), (1,), (1,), 0)
verify_take((4,), (d1, d2), (d1, d2))
verify_take((3, 3, 3), (1, d2), (1, d2))
verify_take((d1, d2), (d3, d4, d5), (d3, d4, d5, d2), 0)
verify_take((d1, d2), (d3, d4, d5), (d1, d3, d4, d5), 1)
verify_take((d1, d2, d3, d4), (d5, d6), (d1, d2, d5, d6, d4), -2)
@tvm.testing.uses_gpu
def test_take():
def verify_take(src_shape, indices_src, axis=None, mode="clip"):
src_dtype = "float32"
indices_dtype = "int32"
indices_src = np.array(indices_src, dtype=indices_dtype)
x = relay.var("x", relay.TensorType(src_shape, src_dtype))
indices = relay.var("indices", relay.TensorType(indices_src.shape, indices_dtype))
z = relay.take(x, indices, axis=axis, mode=mode)
func = relay.Function([x, indices], z)
x_data = np.random.uniform(low=-1, high=1, size=src_shape).astype(src_dtype)
np_mode = "raise" if mode == "fast" else mode
ref_res = np.take(x_data, indices=indices_src, axis=axis, mode=np_mode)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, indices_src)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_take((4,), [1])
verify_take((4,), [[0,1,2,3]])
verify_take((3,3,3), [[11,25]])
verify_take((4,), [[0,1],[2,3]])
verify_take((4,), [1], 0)
verify_take((2,2), [[[1,0],[0,1]]], 0)
verify_take((2,2), [[[1,0],[0,1]]], 1)
verify_take((4,3,5,6), [[2,1,0,0]], -2)
verify_take((3,4), [-5, 20])
verify_take((3,4), [-5, 20], mode="wrap")
verify_take((3,4), [-1, 2], axis=0)
verify_take((3,4), [-1, 2], axis=0, mode="wrap")
verify_take((3,4), [-1, 2], axis=1)
verify_take((3,4), [-1, 2], axis=1, mode="wrap")
verify_take((3,3,3), [[11,25]], mode="fast")
verify_take((3,4), [0, 2], axis=0, mode="fast")
verify_take((3,4), [0, 2], axis=1, mode="fast")
def test_split_infer_type():
def verify_split(dshape, indices_or_sections, ret_type, axis=None):
x = relay.var("x", relay.ty.TensorType(dshape, "float32"))
y = relay.split(x, indices_or_sections, axis=axis)
yy = run_infer_type(y.astuple())
assert yy.checked_type == ret_type
idxd = tvm.tir.indexdiv
d1, d2, d3, d4 = te.var("d1"), te.var("d2"), te.var("d3"), te.var("d4")
axis = te.var("axis")
verify_split((5, 5, 2, 2), 5,
relay.ty.TupleType(tvm.runtime.convert([
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32")])),
axis=1)
verify_split((5, 5, 2, 2), 5,
relay.ty.TupleType(tvm.runtime.convert([
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32")])),
axis=0)
verify_split((d1, d2, d3, d4), 4,
relay.ty.TupleType(tvm.runtime.convert([
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32")])),
axis=2)
verify_split((d1, d2, d3, d4), 2,
relay.ty.TupleType(tvm.runtime.convert([
relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32"),
relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32")])),
axis=0)
verify_split((d1, d2, d3, d4), (2, 4, 7),
relay.ty.TupleType(tvm.runtime.convert([
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 3, d3, d4), "float32"),
relay.ty.TensorType((d1, (d2-7), d3, d4), "float32")])),
axis=1)
def test_full_infer_type():
x = relay.var("x", relay.TensorType((), "int8"))
y = relay.full(x, ())
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((), "int8")
x = relay.var("x", relay.TensorType((), "float32"))
y = relay.full(x, (1, 2), "int8")
"shape=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 2), "int8")
@tvm.testing.uses_gpu
def test_full():
def verify_full(fill_value, src_shape, dtype):
x = relay.var("x", relay.scalar_type(dtype))
z = relay.full(x, src_shape, dtype)
func = relay.Function([x], z)
ref_res = np.full(src_shape, fill_value)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(np.array(fill_value, dtype))
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_full(4, (1, 3, 4, 4), "int32")
base = relay.var("base", relay.TensorType((1, 2, 3), "float32"))
fill = relay.var("fill", relay.TensorType((), "float32"))
y = relay.full_like(base, fill)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 2, 3), "float32")
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
base = relay.var("base", relay.TensorType((n, c, h, w), "float32"))
fill = relay.var("fill", relay.TensorType((), "float32"))
y = relay.full_like(base, fill)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")
@tvm.testing.uses_gpu
def test_full_like():
def verify_full_like(base, fill_value, dtype):
x_data = np.random.uniform(low=-1, high=1, size=base).astype(dtype)
x = relay.var("x", relay.TensorType(base, dtype))
y = relay.var("y", relay.scalar_type(dtype))
z = relay.full_like(x, y)
func = relay.Function([x, y], z)
ref_res = np.full_like(x_data, fill_value)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, np.array(fill_value, dtype))
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_full_like((1, 3, 4, 4), 4, "int32")
verify_full_like((1, 1), 44.0, "float32")
@tvm.testing.uses_gpu
def test_infer_type_leaky_relu():
n, c , h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.leaky_relu(x, alpha=0.1)
"alpha=0.1" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.nn.leaky_relu(x, alpha=0.1)
assert "alpha=0.1" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = np.where(x_data > 0, x_data, x_data * 0.1)
for target, ctx in tvm.testing.enabled_targets():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def verify_infer_type_prelu(data, alpha, axis, output, dtype="float32"):
x = relay.var("data", relay.TensorType(data, dtype))
if alpha:
y = relay.var("alpha", relay.TensorType(alpha, dtype))
else:
y = relay.var("alpha", relay.IncompleteType())
z = relay.nn.prelu(x, y, axis=axis)
zz = run_infer_type(z)
if axis != 1:
assert "axis" in z.astext()
assert zz.checked_type == relay.ty.TensorType(output, dtype)
if not alpha:
axis = axis if axis else 1
alpha_shape = (data[axis],)
assert zz.args[1].checked_type == relay.TensorType(alpha_shape, "float32")
if all(isinstance(v, tvm.tir.Var) == 1 for v in data) or not alpha:
return
func = relay.Function([x, y], z)
x_data = np.random.uniform(low=-1, high=1, size=data).astype(dtype)
a_data = np.random.uniform(low=-1, high=1, size=alpha).astype(dtype)
if axis == 1:
ref_res = (x_data < 0) * (x_data * a_data.reshape(3, 1, 1)) + (x_data>=0) * x_data
else:
ref_res = (x_data < 0) * (x_data * a_data.reshape(1, 1, 3)) + (x_data>=0) * x_data
for target, ctx in tvm.testing.enabled_targets():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data, a_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data, a_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_infer_type_prelu():
n, c , h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
verify_infer_type_prelu((n, c, h, w), (c,), 1, (n, c, h, w))
verify_infer_type_prelu((n, h, w, c), (c,), 3, (n, h, w, c))
verify_infer_type_prelu((n, c, h, w), None, 1, (n, c, h, w))
verify_infer_type_prelu((n, h, w, c), None, 3, (n, h, w, c))
verify_infer_type_prelu((1, 3, 2, 2), (3,), 1, (1, 3, 2, 2))
verify_infer_type_prelu((1, 2, 2, 3), (3,), 3, (1, 2, 2, 3))
verify_infer_type_prelu((1, 3, 2, 2), None, 1, (1, 3, 2, 2))
verify_infer_type_prelu((1, 2, 2, 3), None, 3, (1, 2, 2, 3))
@tvm.testing.uses_gpu
def test_arange():
def verify_arange(start, stop, step):
dtype = "float32"
if start is None and step is None:
x = relay.arange(relay.const(stop, dtype=dtype))
ref_res = np.arange(stop).astype(dtype)
elif start is None:
x = relay.arange(relay.const(stop, dtype=dtype), step=relay.const(step, dtype=dtype))
ref_res = np.arange(stop, step=step).astype(dtype)
elif step is None:
x = relay.arange(relay.const(start, dtype=dtype), relay.const(stop, dtype=dtype))
ref_res = np.arange(start, stop).astype(dtype)
else:
x = relay.arange(
relay.const(start, dtype=dtype),
relay.const(stop, dtype=dtype),
relay.const(step, dtype=dtype))
ref_res = np.arange(start, stop, step).astype(dtype)
func = relay.Function([], x)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)()
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_arange(None, 20, None)
verify_arange(None, 20, 2)
verify_arange(1, 20, None)
verify_arange(1, 20, 2)
# verify_arange(1, 20, 1.5)
verify_arange(1, 20.5, None)
verify_arange(1, 20, 3)
verify_arange(20, 1, -1)
# arange doesnt' support floating point right now, see type relation
@tvm.testing.uses_gpu
def test_meshgrid():
def verify_meshgrid(lengths, indexing="ij"):
input_vars = []
input_data = []
for i, length in enumerate(lengths):
input_name = "x_{}".format(i)
if length == 0:
input_vars.append(relay.var(input_name, relay.scalar_type("float32")))
input_data.append(np.array(1, "float32"))
else:
input_vars.append(relay.var(input_name, relay.TensorType((length,), "float32")))
input_data.append(np.arange(length).astype("float32"))
z = relay.meshgrid(input_vars, indexing=indexing).astuple()
func = relay.Function(input_vars, z)
ref_res = np.meshgrid(*input_data, indexing=indexing)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(*input_data)
assert len(op_res) == len(ref_res)
for i in range(len(op_res)):
tvm.testing.assert_allclose(op_res[i].asnumpy(), ref_res[i], rtol=1e-5)
verify_meshgrid([3, 5])
verify_meshgrid([4, 2], indexing="xy")
verify_meshgrid([3, 5, 2])
verify_meshgrid([3, 1, 5], indexing="xy")
verify_meshgrid([3, 5, 0])
@tvm.testing.uses_gpu
def test_tile():
def verify_tile(dshape, reps):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.tile(x, reps=reps)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.tile(x_data, reps=reps)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_tile((2, 3, 4), (3, 2, 1))
verify_tile((2, 3, 4), (1, 2))
verify_tile((2, 3), (3, 2, 1))
@tvm.testing.uses_gpu
def test_repeat():
def verify_repeat(dshape, repeats, axis):
x = relay.Var("x", relay.TensorType(dshape, "float32"))
func = relay.Function([x], relay.repeat(x, repeats, axis))
data = np.random.uniform(size=dshape).astype("float32")
ref_res = np.repeat(data, repeats, axis)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_repeat((3,), 2, 0)
verify_repeat((3, 10), 2, -1)
verify_repeat((3, 2, 4), 3, 1)
@tvm.testing.uses_gpu
def test_stack():
def verify_stack(dshapes, axis):
y = []
for shape in dshapes:
y.append(relay.var("input", relay.TensorType(shape, "float32")))
x = relay.Tuple(y)
z = relay.stack(x, axis=axis)
func = relay.Function(y, z)
x_data = [np.random.normal(size=shape).astype("float32") for shape in dshapes]
ref_res = np.stack(x_data, axis=axis)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(*x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_stack([(2,), (2,), (2,)], -1)
verify_stack([(2,), (2,), (2,)], 0)
verify_stack([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1)
verify_stack([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1)
verify_stack([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], 4)
@tvm.testing.uses_gpu
def test_reverse():
def verify_reverse(dshape, axis):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.reverse(x, axis=axis)
zz = run_infer_type(z)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.flip(x_data, axis)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_reverse((2, 3, 4), 1)
verify_reverse((4, 7), 0)
verify_reverse((2, 3, 4), -1)
@tvm.testing.uses_gpu
def test_reverse_sequence():
def verify_reverse_sequence(x_data, seq_lengths, batch_axis, seq_axis, ref_res):
seq_lengths_data = np.array(seq_lengths).astype("int32")
x = relay.var("x", relay.TensorType(x_data.shape, str(x_data.dtype)))
z = relay.reverse_sequence(x, relay.const(seq_lengths_data), seq_axis, batch_axis)
zz = run_infer_type(z)
assert zz.checked_type == x.type_annotation
func = relay.Function([x], z)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 5, 10, 15],
[4, 1, 6, 11],
[8, 9, 2, 7],
[12, 13, 14, 3]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 1, 0, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], -1, 0, np.array(result))
verify_reverse_sequence(indata.astype("float32"), [1, 2, 3, 4], 1, 0, np.array(result).astype("float32"))
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3],
[5, 4, 6, 7],
[10, 9, 8, 11],
[15, 14, 13, 12]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, 1, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, -1, np.array(result))
verify_reverse_sequence(indata.astype("float32"), [1, 2, 3, 4], 0, 1, np.array(result).astype("float32"))
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[15, 14, 13, 12]]
verify_reverse_sequence(indata, [-1, 0, 1, 5], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [[[[18, 19, 20], [21, 22, 23], [24, 25, 26]],
[[9, 10, 11], [12, 13, 14], [15, 16, 17]],
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]],
[[[45, 46, 47], [48, 49, 50], [51, 52, 53]],
[[36, 37, 38], [39, 40, 41], [42, 43, 44]],
[[27, 28, 29], [30, 31, 32], [33, 34, 35]]]]
verify_reverse_sequence(indata, [3, 3], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [[[[9, 10, 11], [21, 22, 23], [15, 16, 17]],
[[0, 1, 2], [12, 13, 14], [6, 7, 8]],
[[18, 19, 20], [3, 4, 5], [24, 25, 26]]],
[[[36, 37, 38], [48, 49, 50], [42, 43, 44]],
[[27, 28, 29], [39, 40, 41], [33, 34, 35]],
[[45, 46, 47], [30, 31, 32], [51, 52, 53]]]]
verify_reverse_sequence(indata, [2, 3, 2], 2, 1, np.array(result))
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = []
with pytest.raises(Exception) as execinfo:
verify_reverse_sequence(indata, [2, 3, 2, 4, 5], 1, 0, np.array(result))
assert "For reverse_sequnece seq_lengths size should match with dimension of batch axis," \
" but got dimension of batch_axis = 4, and seq_length size = 5" in execinfo.value.args[0]
def test_scatter():
def ref_scatter(data, indices, updates, axis=0):
idx = np.indices(indices.shape).reshape(indices.ndim, -1)
updated_idx = np.copy(idx)
indices = indices.reshape(-1)
for i in range(len(indices)):
updated_idx[axis, i] = indices[i]
scattered = np.copy(data)
scattered[tuple(updated_idx)] = updates[tuple(idx)]
return scattered
def verify_scatter(dshape, ishape, axis=0):
d = relay.var("d", relay.TensorType(dshape, "float32"))
i = relay.var("i", relay.TensorType(ishape, "int64"))
u = relay.var("u", relay.TensorType(ishape, "float32"))
z = relay.op.scatter(d, i, u, axis)
func = relay.Function([d, i, u], z)
data_np = np.random.uniform(size=dshape).astype("float32")
updates_np = np.random.uniform(size=ishape).astype("float32")
indices_np = np.random.randint(-dshape[axis], dshape[axis] - 1, ishape).astype("int64")
ref_res = ref_scatter(data_np, indices_np, updates_np, axis)
for target, ctx in [("llvm", tvm.cpu())]:
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data_np, indices_np, updates_np)
tvm.testing.assert_allclose(
op_res.asnumpy(), ref_res, rtol=1e-5)
verify_scatter((10, ), (10, ), 0)
verify_scatter((10, 5), (10, 5), -2)
verify_scatter((10, 5), (10, 5), -1)
verify_scatter((10, 5), (3, 5), 0)
verify_scatter((12, 4), (7, 2), 1)
verify_scatter((2, 3, 4), (1, 3, 4), 0)
verify_scatter((2, 3, 4), (2, 1, 4), 1)
verify_scatter((2, 3, 4), (2, 3, 1), 2)
verify_scatter((2, 3, 4, 5), (1, 3, 4, 5), 0)
verify_scatter((6, 3, 4, 5), (2, 3, 4, 5), 1)
verify_scatter((2, 3, 8, 5), (2, 3, 1, 1), 2)
verify_scatter((16, 16, 4, 5), (16, 16, 4, 5), 3)
def test_scatter_add():
def ref_scatter_add(data, indices, updates, axis=0):
output = np.copy(data)
for index in np.ndindex(*indices.shape):
new_index = list(index)
new_index[axis] = indices[index]
output[tuple(new_index)] += updates[index]
return output
def verify_scatter_add(dshape, ishape, axis=0):
d = relay.var("d", relay.TensorType(dshape, "float32"))
i = relay.var("i", relay.TensorType(ishape, "int64"))
u = relay.var("u", relay.TensorType(ishape, "float32"))
z = relay.op.scatter_add(d, i, u, axis)
func = relay.Function([d, i, u], z)
data_np = np.random.uniform(size=dshape).astype("float32")
updates_np = np.random.uniform(size=ishape).astype("float32")
indices_np = np.random.randint(-dshape[axis], dshape[axis] - 1, ishape).astype("int64")
ref_res = ref_scatter_add(data_np, indices_np, updates_np, axis)
for target, ctx in [("llvm", tvm.cpu())]:
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data_np, indices_np, updates_np)
tvm.testing.assert_allclose(
op_res.asnumpy(), ref_res, rtol=1e-5)
verify_scatter_add((10, ), (10, ), 0)
verify_scatter_add((10, 5), (10, 5), -2)
verify_scatter_add((10, 5), (10, 5), -1)
verify_scatter_add((10, 5), (3, 5), 0)
verify_scatter_add((12, 4), (7, 2), 1)
verify_scatter_add((2, 3, 4), (1, 3, 4), 0)
verify_scatter_add((2, 3, 4), (2, 1, 4), 1)
verify_scatter_add((2, 3, 4), (2, 3, 1), 2)
verify_scatter_add((2, 3, 4, 5), (1, 3, 4, 5), 0)
verify_scatter_add((6, 3, 4, 5), (2, 3, 4, 5), 1)
verify_scatter_add((2, 3, 8, 5), (2, 3, 1, 1), 2)
verify_scatter_add((16, 16, 4, 5), (16, 16, 4, 5), 3)
@tvm.testing.uses_gpu
def test_gather():
def verify_gather(data, axis, indices, ref_res):
data = np.asarray(data, dtype='float32')
indices = np.asarray(indices, dtype='int32')
ref_res = np.asarray(ref_res)
d = relay.var("x", relay.TensorType(data.shape, "float32"))
i = relay.var("y", relay.TensorType(indices.shape, "int32"))
z = relay.gather(d, axis, i)
func = relay.Function([d, i], z)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data, indices)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res,
rtol=1e-5)
verify_gather([[1, 2], [3, 4]],
1,
[[0, 0], [1, 0]],
[[1, 1], [4, 3]])
verify_gather([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]],
0,
[[[1, 0, 1], [1, 1, 0]]],
[[[6, 1, 8], [9, 10, 5]]])
verify_gather([[[-0.2321, -0.2024, -1.7624], [-0.3829, -0.4246, 0.2448],
[0.1822, 0.2360, -0.8965], [0.4497, -0.2224, 0.6103]],
[[0.0408, -0.7667, -0.4303], [-0.3216, 0.7489, -0.1502],
[0.0144, -0.4699, -0.0064], [-0.0768, -1.6064, 1.3390]]],
1,
[[[2, 2, 0], [1, 0, 3]], [[3, 2, 0], [1, 0, 0]]],
[[[0.1822, 0.2360, -1.7624], [-0.3829, -0.2024, 0.6103]],
[[-0.0768, -0.4699, -0.4303], [-0.3216, -0.7667, -0.4303]]])
verify_gather([[[0.3050, 1.6986, 1.1034], [0.7020, -0.6960, -2.1818],
[0.3116, -0.5773, -0.9912], [0.0835, -1.3915, -1.0720]],
[[0.1694, -0.6091, -0.6539], [-0.5234, -0.1218, 0.5084],
[0.2374, -1.9537, -2.0078], [-0.5700, -1.0302, 0.1558]]],
2,
[[[1, 1, 0, 1], [0, 0, 2, 2], [1, 2, 1, 2], [2, 2, 1, 0]],
[[0, 0, 1, 2], [2, 2, 1, 0], [1, 2, 0, 0], [0, 2, 0, 2]]],
[[[1.6986, 1.6986, 0.3050, 1.6986],
[0.7020, 0.7020, -2.1818, -2.1818],
[-0.5773, -0.9912, -0.5773, -0.9912],
[-1.0720, -1.0720, -1.3915, 0.0835]],
[[0.1694, 0.1694, -0.6091, -0.6539],
[0.5084, 0.5084, -0.1218, -0.5234],
[-1.9537, -2.0078, 0.2374, 0.2374],
[-0.5700, 0.1558, -0.5700, 0.1558]]])
@tvm.testing.uses_gpu
def test_gather_nd():
def verify_gather_nd(xshape, yshape, y_data):
x = relay.var("x", relay.TensorType(xshape, "float32"))
y = relay.var("y", relay.TensorType(yshape, "int32"))
z = relay.gather_nd(x, y)
func = relay.Function([x, y], z)
x_data = np.random.uniform(size=xshape).astype("float32")
ref_res = x_data[tuple(y_data)]
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_gather_nd((2, 2), (2, 3), [[1, 1, 0], [0, 1, 0]])
verify_gather_nd((2, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify_gather_nd((3, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify_gather_nd((3, 2), (2, 2, 3), [[[0, 1, 2], [2, 0, 1]], [[0, 0, 0], [1, 1, 1]]])
def _verify_infiniteness_ops(relay_op, ref_op):
for dtype in ['float32', 'float16', 'float16', 'int32', 'int16']:
shape = (2, 8, 8)
x = relay.var("x", relay.TensorType(shape, dtype))
y = relay_op(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, "bool")
data = np.random.uniform(size=shape).astype(dtype)
if dtype.startswith('float'):
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.infty
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan
intrp = create_executor()
op_res = intrp.evaluate(y, {x: data})
ref_res = ref_op(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
def test_isfinite():
_verify_infiniteness_ops(relay.isfinite, np.isfinite)
def test_isinf():
_verify_infiniteness_ops(relay.isinf, np.isinf)
@tvm.testing.uses_gpu
def test_unravel_index():
def verify_unravel_index(indices, shape, dtype):
x_data = np.array(indices).astype(dtype)
y_data = np.array(shape).astype(dtype)
x = relay.var("x", relay.TensorType(x_data.shape, dtype))
y = relay.var("y", relay.TensorType(y_data.shape, dtype))
z = relay.unravel_index(x, y)
zz = run_infer_type(z)
if len(x_data.shape) == 1:
out_shape = [y_data.shape[0], x_data.shape[0]]
else:
out_shape = [y_data.shape[0]]
assert zz.checked_type == relay.ty.TensorType(out_shape, dtype)
func = relay.Function([x, y], z)
ref_res = np.unravel_index(x_data, y_data)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
for dtype in ["int64", "int32"]:
verify_unravel_index([0, 1, 2, 3], [2, 2], dtype)
verify_unravel_index([144], [5, 5, 5, 2], dtype)
verify_unravel_index(144, [5, 5, 5, 2], dtype)
verify_unravel_index([100, 13, 5], [5, 5, 5, 2], dtype)
@tvm.testing.uses_gpu
def test_sparse_to_dense():
def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected):
sparse_indices_data = np.array(sparse_indices)
sparse_values_data = np.array(sparse_values)
default_value_data = np.array(default_value)
a = relay.var("a", relay.TensorType(sparse_indices_data.shape, str(sparse_indices_data.dtype)))
b = relay.var("b", relay.TensorType(sparse_values_data.shape, str(sparse_values_data.dtype)))
if default_value is None:
args = [a, b]
d = relay.sparse_to_dense(a, output_shape, b)
else:
c = relay.var("c", relay.TensorType(default_value_data.shape, str(default_value_data.dtype)))
args = [a, b, c]
d = relay.sparse_to_dense(a, output_shape, b, c)
zz = run_infer_type(d)
assert zz.checked_type == relay.ty.TensorType(output_shape, str(sparse_values_data.dtype))
func = relay.Function(args, d)
for target, ctx in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
if default_value is None:
op_res = intrp.evaluate(func)(sparse_indices_data, sparse_values_data)
else:
op_res = intrp.evaluate(func)(
sparse_indices_data, sparse_values_data, default_value_data
)
tvm.testing.assert_allclose(op_res.asnumpy(), xpected, rtol=1e-5)
verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0])
verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3])
verify_sparse_to_dense([[0, 0], [1, 2]], [1, 2], 0, [3, 4], [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]])
verify_sparse_to_dense(
[[0, 0, 0], [1, 2, 3]],
[1, 2],
4,
[2, 3, 4],
[[[1, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 4]], [[4, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 2]]]
)
verify_sparse_to_dense([0, 1, 4], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
verify_sparse_to_dense(1, 3, None, [5], [0, 3, 0, 0, 0])
if __name__ == "__main__":
test_cast()
test_zeros_ones()
test_unary_identity()
test_clip()
test_transpose_infer_type()
test_transpose()
test_reshape_infer_type()
test_reshape()
test_reshape_fail()
test_reshape_like_infer_type()
test_reshape_like()
test_take_infer_type()
test_take()
test_full_infer_type()
test_full()
test_full_like_infer_type()
test_full_like()
test_infer_type_leaky_relu()
test_infer_type_prelu()
test_squeeze()
test_squeeze_infer_type()
test_squeeze_bad_axes_infer_type()
test_split_infer_type()
test_arange()
test_meshgrid()
test_reverse()
test_stack()
test_tile()
test_repeat()
test_gather_nd()
test_isfinite()
test_isinf()
test_unravel_index()
test_sparse_to_dense()
test_fixed_point_multiply()
| true
| true
|
f709aaa7ef848d9eea57b70c4b2699c9584e9ab3
| 2,111
|
py
|
Python
|
application/pages/training_analysis/services/fit_file_services.py
|
Jhsmit/awesome-panel
|
53f7754f7c505a2666f6724df26c851ae942ec40
|
[
"Apache-2.0"
] | 1
|
2020-05-08T21:44:37.000Z
|
2020-05-08T21:44:37.000Z
|
application/pages/training_analysis/services/fit_file_services.py
|
Jhsmit/awesome-panel
|
53f7754f7c505a2666f6724df26c851ae942ec40
|
[
"Apache-2.0"
] | null | null | null |
application/pages/training_analysis/services/fit_file_services.py
|
Jhsmit/awesome-panel
|
53f7754f7c505a2666f6724df26c851ae942ec40
|
[
"Apache-2.0"
] | null | null | null |
"""In this module we provide services for working with fit files.
Resources
- fitparse package: [GitHub](https://github.com/dtcooper/python-fitparse) and \
[Docs](http://dtcooper.github.io/python-fitparse/)
- fitdecode pacakge: [GitHub](https://github.com/polyvertex/fitdecode) and \
[Read the Docs](https://fitdecode.readthedocs.io/en/latest/)
- [FIT on Wikipedia](https://wiki.openstreetmap.org/wiki/FIT)
- [Download FIT SDK](https://www.thisisant.com/resources/fit).
"""
from typing import Union
import fitparse
import pandas as pd
UNIT_CONVERSION = {
"speed": {"from": "10*6m/s", "to": "km/h", "factor": 0.0036,},
"enhanced_speed": {"from": "10*6m/s", "to": "km/h", "factor": 3.6,},
"altitude": {"from": "unknown", "to": "m", "factor": 0.03855343881175331,},
"position_long": {"from": "semicircles", "to": "degrees", "factor": (180.0 / 2 ** 31),},
"position_lat": {"from": "semicircles", "to": "degrees", "factor": (180.0 / 2 ** 31),},
}
def parse_fit_file(file: Union[fitparse.base.FitFile, bytes, str,]) -> pd.DataFrame:
"""Converts a fit_file to a dataframe
Args:
file (Union[fitparse.base.FitFile, bytes, str]): The fit file to parse
Raises:
ValueError: If the file is not in a supported format
Returns:
pd.DataFrame: A DataFrame with the data
"""
if isinstance(file, (bytes, str,),):
fit_file = fitparse.FitFile(file)
elif isinstance(file, fitparse.base.FitFile,):
fit_file = file
else:
raise ValueError(f"{type(file)} is not supported!")
return _parse_records(fit_file.get_messages("record"))
def _parse_records(records,):
data = [record.get_values() for record in records]
training_data = pd.DataFrame(data)
_convert_units(training_data)
return training_data
def _convert_units(training_data_row: pd.DataFrame,):
columns = set(UNIT_CONVERSION.keys()).intersection(set(training_data_row.columns))
for column in columns:
training_data_row[column] *= UNIT_CONVERSION[column]["factor"]
| 35.183333
| 93
| 0.650403
|
from typing import Union
import fitparse
import pandas as pd
UNIT_CONVERSION = {
"speed": {"from": "10*6m/s", "to": "km/h", "factor": 0.0036,},
"enhanced_speed": {"from": "10*6m/s", "to": "km/h", "factor": 3.6,},
"altitude": {"from": "unknown", "to": "m", "factor": 0.03855343881175331,},
"position_long": {"from": "semicircles", "to": "degrees", "factor": (180.0 / 2 ** 31),},
"position_lat": {"from": "semicircles", "to": "degrees", "factor": (180.0 / 2 ** 31),},
}
def parse_fit_file(file: Union[fitparse.base.FitFile, bytes, str,]) -> pd.DataFrame:
if isinstance(file, (bytes, str,),):
fit_file = fitparse.FitFile(file)
elif isinstance(file, fitparse.base.FitFile,):
fit_file = file
else:
raise ValueError(f"{type(file)} is not supported!")
return _parse_records(fit_file.get_messages("record"))
def _parse_records(records,):
data = [record.get_values() for record in records]
training_data = pd.DataFrame(data)
_convert_units(training_data)
return training_data
def _convert_units(training_data_row: pd.DataFrame,):
columns = set(UNIT_CONVERSION.keys()).intersection(set(training_data_row.columns))
for column in columns:
training_data_row[column] *= UNIT_CONVERSION[column]["factor"]
| true
| true
|
f709abae3ce3540aa543e1f247ef2b414093609a
| 679
|
py
|
Python
|
hackerrank/Python/Exceptions/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | 4
|
2020-07-24T01:59:50.000Z
|
2021-07-24T15:14:08.000Z
|
hackerrank/Python/Exceptions/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
hackerrank/Python/Exceptions/test.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'3',
'1 0',
'2 $',
'3 1',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
"Error Code: integer division or modulo by zero\n"
+ "Error Code: invalid literal for int() with base 10: '$'\n"
+ "3\n")
if __name__ == '__main__':
unittest.main()
| 26.115385
| 86
| 0.561119
|
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'3',
'1 0',
'2 $',
'3 1',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
"Error Code: integer division or modulo by zero\n"
+ "Error Code: invalid literal for int() with base 10: '$'\n"
+ "3\n")
if __name__ == '__main__':
unittest.main()
| true
| true
|
f709ac008c4373082aa0f1b4e12d73b060e3e98e
| 14,590
|
py
|
Python
|
RecoEgamma/EgammaTools/python/regressionModifier_cfi.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 6
|
2017-09-08T14:12:56.000Z
|
2022-03-09T23:57:01.000Z
|
RecoEgamma/EgammaTools/python/regressionModifier_cfi.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 545
|
2017-09-19T17:10:19.000Z
|
2022-03-07T16:55:27.000Z
|
RecoEgamma/EgammaTools/python/regressionModifier_cfi.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 14
|
2017-10-04T09:47:21.000Z
|
2019-10-23T18:04:45.000Z
|
import FWCore.ParameterSet.Config as cms
regressionModifier106XUL = cms.PSet(
modifierName = cms.string('EGRegressionModifierV3'),
rhoTag = cms.InputTag('fixedGridRhoFastjetAllTmp'),
useClosestToCentreSeedCrysDef = cms.bool(False),
maxRawEnergyForLowPtEBSigma = cms.double(-1),
maxRawEnergyForLowPtEESigma = cms.double(1200.),
eleRegs = cms.PSet(
ecalOnlyMean = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(-1.),
rangeMaxHighEt = cms.double(3.0),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("electron_eb_ecalOnly_1To300_0p2To2_mean"),
ebHighEtForestName = cms.string("electron_eb_ECALonly"),
eeLowEtForestName = cms.string("electron_ee_ecalOnly_1To300_0p2To2_mean"),
eeHighEtForestName = cms.string("electron_ee_ECALonly"),
),
ecalOnlySigma = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("electron_eb_ecalOnly_1To300_0p0002To0p5_sigma"),
ebHighEtForestName = cms.string("electron_eb_ECALonly_var"),
eeLowEtForestName = cms.string("electron_ee_ecalOnly_1To300_0p0002To0p5_sigma"),
eeHighEtForestName = cms.string("electron_ee_ECALonly_var"),
),
epComb = cms.PSet(
ecalTrkRegressionConfig = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(0.2),
rangeMaxHighEt = cms.double(2.0),
lowEtHighEtBoundary = cms.double(999999.),
forceHighEnergyTrainingIfSaturated = cms.bool(False),
ebLowEtForestName = cms.string('electron_eb_ecalTrk_1To300_0p2To2_mean'),
ebHighEtForestName = cms.string('electron_eb_ecalTrk_1To300_0p2To2_mean'),
eeLowEtForestName = cms.string('electron_ee_ecalTrk_1To300_0p2To2_mean'),
eeHighEtForestName = cms.string('electron_ee_ecalTrk_1To300_0p2To2_mean'),
),
ecalTrkRegressionUncertConfig = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
lowEtHighEtBoundary = cms.double(999999.),
forceHighEnergyTrainingIfSaturated = cms.bool(False),
ebLowEtForestName = cms.string('electron_eb_ecalTrk_1To300_0p0002To0p5_sigma'),
ebHighEtForestName = cms.string('electron_eb_ecalTrk_1To300_0p0002To0p5_sigma'),
eeLowEtForestName = cms.string('electron_ee_ecalTrk_1To300_0p0002To0p5_sigma'),
eeHighEtForestName = cms.string('electron_ee_ecalTrk_1To300_0p0002To0p5_sigma'),
),
maxEcalEnergyForComb=cms.double(200.),
minEOverPForComb=cms.double(0.025),
maxEPDiffInSigmaForComb=cms.double(15.),
maxRelTrkMomErrForComb=cms.double(10.),
)
),
phoRegs = cms.PSet(
ecalOnlyMean = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(-1.),
rangeMaxHighEt = cms.double(3.0),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("photon_eb_ecalOnly_5To300_0p2To2_mean"),
ebHighEtForestName = cms.string("photon_eb_ECALonly"),
eeLowEtForestName = cms.string("photon_ee_ecalOnly_5To300_0p2To2_mean"),
eeHighEtForestName = cms.string("photon_ee_ECALonly"),
),
ecalOnlySigma = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("photon_eb_ecalOnly_5To300_0p0002To0p5_sigma"),
ebHighEtForestName = cms.string("photon_eb_ECALonly_var"),
eeLowEtForestName = cms.string("photon_ee_ecalOnly_5To300_0p0002To0p5_sigma"),
eeHighEtForestName = cms.string("photon_ee_ECALonly_var"),
),
)
)
regressionModifier103XLowPtPho = cms.PSet(
modifierName = cms.string('EGRegressionModifierV3'),
rhoTag = cms.InputTag('fixedGridRhoFastjetAllTmp'),
useClosestToCentreSeedCrysDef = cms.bool(False),
maxRawEnergyForLowPtEBSigma = cms.double(-1),
maxRawEnergyForLowPtEESigma = cms.double(1200.),
eleRegs = cms.PSet(
ecalOnlyMean = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(-1.),
rangeMaxHighEt = cms.double(3.0),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("electron_eb_ecalOnly_1To20_0p2To2_mean"),
ebHighEtForestName = cms.string("electron_eb_ECALonly"),
eeLowEtForestName = cms.string("electron_ee_ecalOnly_1To20_0p2To2_mean"),
eeHighEtForestName = cms.string("electron_ee_ECALonly"),
),
ecalOnlySigma = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("electron_eb_ecalOnly_1To20_0p0002To0p5_sigma"),
ebHighEtForestName = cms.string("electron_eb_ECALonly_var"),
eeLowEtForestName = cms.string("electron_ee_ecalOnly_1To20_0p0002To0p5_sigma"),
eeHighEtForestName = cms.string("electron_ee_ECALonly_var"),
),
epComb = cms.PSet(
ecalTrkRegressionConfig = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(0.2),
rangeMaxHighEt = cms.double(2.0),
lowEtHighEtBoundary = cms.double(999999.),
forceHighEnergyTrainingIfSaturated = cms.bool(False),
ebLowEtForestName = cms.string('electron_eb_ecalTrk_1To20_0p2To2_mean'),
ebHighEtForestName = cms.string('electron_eb_ecalTrk_1To20_0p2To2_mean'),
eeLowEtForestName = cms.string('electron_ee_ecalTrk_1To20_0p2To2_mean'),
eeHighEtForestName = cms.string('electron_ee_ecalTrk_1To20_0p2To2_mean'),
),
ecalTrkRegressionUncertConfig = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
lowEtHighEtBoundary = cms.double(999999.),
forceHighEnergyTrainingIfSaturated = cms.bool(False),
ebLowEtForestName = cms.string('electron_eb_ecalTrk_1To20_0p0002To0p5_sigma'),
ebHighEtForestName = cms.string('electron_eb_ecalTrk_1To20_0p0002To0p5_sigma'),
eeLowEtForestName = cms.string('electron_ee_ecalTrk_1To20_0p0002To0p5_sigma'),
eeHighEtForestName = cms.string('electron_ee_ecalTrk_1To20_0p0002To0p5_sigma'),
),
maxEcalEnergyForComb=cms.double(200.),
minEOverPForComb=cms.double(0.025),
maxEPDiffInSigmaForComb=cms.double(15.),
maxRelTrkMomErrForComb=cms.double(10.),
)
),
phoRegs = cms.PSet(
ecalOnlyMean = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(-1.),
rangeMaxHighEt = cms.double(3.0),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("photon_eb_ecalOnly_1To20_0p2To2_mean"),
ebHighEtForestName = cms.string("photon_eb_ECALonly"),
eeLowEtForestName = cms.string("photon_ee_ecalOnly_1To20_0p2To2_mean"),
eeHighEtForestName = cms.string("photon_ee_ECALonly"),
),
ecalOnlySigma = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("photon_eb_ecalOnly_1To20_0p0002To0p5_sigma"),
ebHighEtForestName = cms.string("photon_eb_ECALonly_var"),
eeLowEtForestName = cms.string("photon_ee_ecalOnly_1To20_0p0002To0p5_sigma"),
eeHighEtForestName = cms.string("photon_ee_ECALonly_var"),
),
)
)
regressionModifier94X = \
cms.PSet( modifierName = cms.string('EGRegressionModifierV2'),
rhoCollection = cms.InputTag('fixedGridRhoFastjetAllTmp'),
electron_config = cms.PSet( # EB, EE
regressionKey = cms.vstring('electron_eb_ECALonly_lowpt', 'electron_eb_ECALonly', 'electron_ee_ECALonly_lowpt', 'electron_ee_ECALonly',
'electron_eb_ECALTRK_lowpt', 'electron_eb_ECALTRK', 'electron_ee_ECALTRK_lowpt', 'electron_ee_ECALTRK'),
uncertaintyKey = cms.vstring('electron_eb_ECALonly_lowpt_var', 'electron_eb_ECALonly_var', 'electron_ee_ECALonly_lowpt_var', 'electron_ee_ECALonly_var',
'electron_eb_ECALTRK_lowpt_var', 'electron_eb_ECALTRK_var', 'electron_ee_ECALTRK_lowpt_var', 'electron_ee_ECALTRK_var'),
),
photon_config = cms.PSet( # EB, EE
regressionKey = cms.vstring('photon_eb_ECALonly_lowpt', 'photon_eb_ECALonly', 'photon_ee_ECALonly_lowpt', 'photon_ee_ECALonly'),
uncertaintyKey = cms.vstring('photon_eb_ECALonly_lowpt_var', 'photon_eb_ECALonly_var', 'photon_ee_ECALonly_lowpt_var', 'photon_ee_ECALonly_var'),
),
lowEnergy_ECALonlyThr = cms.double(99999.),
lowEnergy_ECALTRKThr = cms.double(50.),
highEnergy_ECALTRKThr = cms.double(200.),
eOverP_ECALTRKThr = cms.double(0.025),
epDiffSig_ECALTRKThr = cms.double(15.),
epSig_ECALTRKThr = cms.double(10.),
forceHighEnergyEcalTrainingIfSaturated = cms.bool(True)
)
regressionModifier80X = \
cms.PSet( modifierName = cms.string('EGRegressionModifierV1'),
autoDetectBunchSpacing = cms.bool(True),
applyExtraHighEnergyProtection = cms.bool(True),
bunchSpacingTag = cms.InputTag("bunchSpacingProducer"),
manualBunchSpacing = cms.int32(50),
rhoCollection = cms.InputTag("fixedGridRhoFastjetAll"),
vertexCollection = cms.InputTag("offlinePrimaryVertices"),
electron_config = cms.PSet( # EB, EE
regressionKey_25ns = cms.vstring('gedelectron_EBCorrection_25ns', 'gedelectron_EECorrection_25ns'),
uncertaintyKey_25ns = cms.vstring('gedelectron_EBUncertainty_25ns', 'gedelectron_EEUncertainty_25ns'),
combinationKey_25ns = cms.string('gedelectron_p4combination_25ns'),
regressionKey_50ns = cms.vstring('gedelectron_EBCorrection_50ns', 'gedelectron_EECorrection_50ns'),
uncertaintyKey_50ns = cms.vstring('gedelectron_EBUncertainty_50ns', 'gedelectron_EEUncertainty_50ns'),
combinationKey_50ns = cms.string('gedelectron_p4combination_50ns'),
),
photon_config = cms.PSet( # EB, EE
regressionKey_25ns = cms.vstring('gedphoton_EBCorrection_25ns', 'gedphoton_EECorrection_25ns'),
uncertaintyKey_25ns = cms.vstring('gedphoton_EBUncertainty_25ns', 'gedphoton_EEUncertainty_25ns'),
regressionKey_50ns = cms.vstring('gedphoton_EBCorrection_50ns', 'gedphoton_EECorrection_50ns'),
uncertaintyKey_50ns = cms.vstring('gedphoton_EBUncertainty_50ns', 'gedphoton_EEUncertainty_50ns'),
)
)
#by default we use the regression inappropriate to the main purpose of this release
#life is simplier that way
regressionModifier = regressionModifier94X.clone()
from Configuration.Eras.Modifier_run2_egamma_2016_cff import run2_egamma_2016
from Configuration.Eras.Modifier_run2_egamma_2017_cff import run2_egamma_2017
from Configuration.Eras.Modifier_run2_egamma_2018_cff import run2_egamma_2018
(run2_egamma_2016 | run2_egamma_2017 | run2_egamma_2018).toReplaceWith(regressionModifier,regressionModifier106XUL)
from Configuration.ProcessModifiers.egamma_lowPt_exclusive_cff import egamma_lowPt_exclusive
egamma_lowPt_exclusive.toReplaceWith(regressionModifier,regressionModifier103XLowPtPho)
| 58.12749
| 204
| 0.62111
|
import FWCore.ParameterSet.Config as cms
regressionModifier106XUL = cms.PSet(
modifierName = cms.string('EGRegressionModifierV3'),
rhoTag = cms.InputTag('fixedGridRhoFastjetAllTmp'),
useClosestToCentreSeedCrysDef = cms.bool(False),
maxRawEnergyForLowPtEBSigma = cms.double(-1),
maxRawEnergyForLowPtEESigma = cms.double(1200.),
eleRegs = cms.PSet(
ecalOnlyMean = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(-1.),
rangeMaxHighEt = cms.double(3.0),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("electron_eb_ecalOnly_1To300_0p2To2_mean"),
ebHighEtForestName = cms.string("electron_eb_ECALonly"),
eeLowEtForestName = cms.string("electron_ee_ecalOnly_1To300_0p2To2_mean"),
eeHighEtForestName = cms.string("electron_ee_ECALonly"),
),
ecalOnlySigma = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("electron_eb_ecalOnly_1To300_0p0002To0p5_sigma"),
ebHighEtForestName = cms.string("electron_eb_ECALonly_var"),
eeLowEtForestName = cms.string("electron_ee_ecalOnly_1To300_0p0002To0p5_sigma"),
eeHighEtForestName = cms.string("electron_ee_ECALonly_var"),
),
epComb = cms.PSet(
ecalTrkRegressionConfig = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(0.2),
rangeMaxHighEt = cms.double(2.0),
lowEtHighEtBoundary = cms.double(999999.),
forceHighEnergyTrainingIfSaturated = cms.bool(False),
ebLowEtForestName = cms.string('electron_eb_ecalTrk_1To300_0p2To2_mean'),
ebHighEtForestName = cms.string('electron_eb_ecalTrk_1To300_0p2To2_mean'),
eeLowEtForestName = cms.string('electron_ee_ecalTrk_1To300_0p2To2_mean'),
eeHighEtForestName = cms.string('electron_ee_ecalTrk_1To300_0p2To2_mean'),
),
ecalTrkRegressionUncertConfig = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
lowEtHighEtBoundary = cms.double(999999.),
forceHighEnergyTrainingIfSaturated = cms.bool(False),
ebLowEtForestName = cms.string('electron_eb_ecalTrk_1To300_0p0002To0p5_sigma'),
ebHighEtForestName = cms.string('electron_eb_ecalTrk_1To300_0p0002To0p5_sigma'),
eeLowEtForestName = cms.string('electron_ee_ecalTrk_1To300_0p0002To0p5_sigma'),
eeHighEtForestName = cms.string('electron_ee_ecalTrk_1To300_0p0002To0p5_sigma'),
),
maxEcalEnergyForComb=cms.double(200.),
minEOverPForComb=cms.double(0.025),
maxEPDiffInSigmaForComb=cms.double(15.),
maxRelTrkMomErrForComb=cms.double(10.),
)
),
phoRegs = cms.PSet(
ecalOnlyMean = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(-1.),
rangeMaxHighEt = cms.double(3.0),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("photon_eb_ecalOnly_5To300_0p2To2_mean"),
ebHighEtForestName = cms.string("photon_eb_ECALonly"),
eeLowEtForestName = cms.string("photon_ee_ecalOnly_5To300_0p2To2_mean"),
eeHighEtForestName = cms.string("photon_ee_ECALonly"),
),
ecalOnlySigma = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("photon_eb_ecalOnly_5To300_0p0002To0p5_sigma"),
ebHighEtForestName = cms.string("photon_eb_ECALonly_var"),
eeLowEtForestName = cms.string("photon_ee_ecalOnly_5To300_0p0002To0p5_sigma"),
eeHighEtForestName = cms.string("photon_ee_ECALonly_var"),
),
)
)
regressionModifier103XLowPtPho = cms.PSet(
modifierName = cms.string('EGRegressionModifierV3'),
rhoTag = cms.InputTag('fixedGridRhoFastjetAllTmp'),
useClosestToCentreSeedCrysDef = cms.bool(False),
maxRawEnergyForLowPtEBSigma = cms.double(-1),
maxRawEnergyForLowPtEESigma = cms.double(1200.),
eleRegs = cms.PSet(
ecalOnlyMean = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(-1.),
rangeMaxHighEt = cms.double(3.0),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("electron_eb_ecalOnly_1To20_0p2To2_mean"),
ebHighEtForestName = cms.string("electron_eb_ECALonly"),
eeLowEtForestName = cms.string("electron_ee_ecalOnly_1To20_0p2To2_mean"),
eeHighEtForestName = cms.string("electron_ee_ECALonly"),
),
ecalOnlySigma = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("electron_eb_ecalOnly_1To20_0p0002To0p5_sigma"),
ebHighEtForestName = cms.string("electron_eb_ECALonly_var"),
eeLowEtForestName = cms.string("electron_ee_ecalOnly_1To20_0p0002To0p5_sigma"),
eeHighEtForestName = cms.string("electron_ee_ECALonly_var"),
),
epComb = cms.PSet(
ecalTrkRegressionConfig = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(0.2),
rangeMaxHighEt = cms.double(2.0),
lowEtHighEtBoundary = cms.double(999999.),
forceHighEnergyTrainingIfSaturated = cms.bool(False),
ebLowEtForestName = cms.string('electron_eb_ecalTrk_1To20_0p2To2_mean'),
ebHighEtForestName = cms.string('electron_eb_ecalTrk_1To20_0p2To2_mean'),
eeLowEtForestName = cms.string('electron_ee_ecalTrk_1To20_0p2To2_mean'),
eeHighEtForestName = cms.string('electron_ee_ecalTrk_1To20_0p2To2_mean'),
),
ecalTrkRegressionUncertConfig = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
lowEtHighEtBoundary = cms.double(999999.),
forceHighEnergyTrainingIfSaturated = cms.bool(False),
ebLowEtForestName = cms.string('electron_eb_ecalTrk_1To20_0p0002To0p5_sigma'),
ebHighEtForestName = cms.string('electron_eb_ecalTrk_1To20_0p0002To0p5_sigma'),
eeLowEtForestName = cms.string('electron_ee_ecalTrk_1To20_0p0002To0p5_sigma'),
eeHighEtForestName = cms.string('electron_ee_ecalTrk_1To20_0p0002To0p5_sigma'),
),
maxEcalEnergyForComb=cms.double(200.),
minEOverPForComb=cms.double(0.025),
maxEPDiffInSigmaForComb=cms.double(15.),
maxRelTrkMomErrForComb=cms.double(10.),
)
),
phoRegs = cms.PSet(
ecalOnlyMean = cms.PSet(
rangeMinLowEt = cms.double(0.2),
rangeMaxLowEt = cms.double(2.0),
rangeMinHighEt = cms.double(-1.),
rangeMaxHighEt = cms.double(3.0),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("photon_eb_ecalOnly_1To20_0p2To2_mean"),
ebHighEtForestName = cms.string("photon_eb_ECALonly"),
eeLowEtForestName = cms.string("photon_ee_ecalOnly_1To20_0p2To2_mean"),
eeHighEtForestName = cms.string("photon_ee_ECALonly"),
),
ecalOnlySigma = cms.PSet(
rangeMinLowEt = cms.double(0.0002),
rangeMaxLowEt = cms.double(0.5),
rangeMinHighEt = cms.double(0.0002),
rangeMaxHighEt = cms.double(0.5),
forceHighEnergyTrainingIfSaturated = cms.bool(True),
lowEtHighEtBoundary = cms.double(999999.),
ebLowEtForestName = cms.string("photon_eb_ecalOnly_1To20_0p0002To0p5_sigma"),
ebHighEtForestName = cms.string("photon_eb_ECALonly_var"),
eeLowEtForestName = cms.string("photon_ee_ecalOnly_1To20_0p0002To0p5_sigma"),
eeHighEtForestName = cms.string("photon_ee_ECALonly_var"),
),
)
)
regressionModifier94X = \
cms.PSet( modifierName = cms.string('EGRegressionModifierV2'),
rhoCollection = cms.InputTag('fixedGridRhoFastjetAllTmp'),
electron_config = cms.PSet(
regressionKey = cms.vstring('electron_eb_ECALonly_lowpt', 'electron_eb_ECALonly', 'electron_ee_ECALonly_lowpt', 'electron_ee_ECALonly',
'electron_eb_ECALTRK_lowpt', 'electron_eb_ECALTRK', 'electron_ee_ECALTRK_lowpt', 'electron_ee_ECALTRK'),
uncertaintyKey = cms.vstring('electron_eb_ECALonly_lowpt_var', 'electron_eb_ECALonly_var', 'electron_ee_ECALonly_lowpt_var', 'electron_ee_ECALonly_var',
'electron_eb_ECALTRK_lowpt_var', 'electron_eb_ECALTRK_var', 'electron_ee_ECALTRK_lowpt_var', 'electron_ee_ECALTRK_var'),
),
photon_config = cms.PSet(
regressionKey = cms.vstring('photon_eb_ECALonly_lowpt', 'photon_eb_ECALonly', 'photon_ee_ECALonly_lowpt', 'photon_ee_ECALonly'),
uncertaintyKey = cms.vstring('photon_eb_ECALonly_lowpt_var', 'photon_eb_ECALonly_var', 'photon_ee_ECALonly_lowpt_var', 'photon_ee_ECALonly_var'),
),
lowEnergy_ECALonlyThr = cms.double(99999.),
lowEnergy_ECALTRKThr = cms.double(50.),
highEnergy_ECALTRKThr = cms.double(200.),
eOverP_ECALTRKThr = cms.double(0.025),
epDiffSig_ECALTRKThr = cms.double(15.),
epSig_ECALTRKThr = cms.double(10.),
forceHighEnergyEcalTrainingIfSaturated = cms.bool(True)
)
regressionModifier80X = \
cms.PSet( modifierName = cms.string('EGRegressionModifierV1'),
autoDetectBunchSpacing = cms.bool(True),
applyExtraHighEnergyProtection = cms.bool(True),
bunchSpacingTag = cms.InputTag("bunchSpacingProducer"),
manualBunchSpacing = cms.int32(50),
rhoCollection = cms.InputTag("fixedGridRhoFastjetAll"),
vertexCollection = cms.InputTag("offlinePrimaryVertices"),
electron_config = cms.PSet(
regressionKey_25ns = cms.vstring('gedelectron_EBCorrection_25ns', 'gedelectron_EECorrection_25ns'),
uncertaintyKey_25ns = cms.vstring('gedelectron_EBUncertainty_25ns', 'gedelectron_EEUncertainty_25ns'),
combinationKey_25ns = cms.string('gedelectron_p4combination_25ns'),
regressionKey_50ns = cms.vstring('gedelectron_EBCorrection_50ns', 'gedelectron_EECorrection_50ns'),
uncertaintyKey_50ns = cms.vstring('gedelectron_EBUncertainty_50ns', 'gedelectron_EEUncertainty_50ns'),
combinationKey_50ns = cms.string('gedelectron_p4combination_50ns'),
),
photon_config = cms.PSet(
regressionKey_25ns = cms.vstring('gedphoton_EBCorrection_25ns', 'gedphoton_EECorrection_25ns'),
uncertaintyKey_25ns = cms.vstring('gedphoton_EBUncertainty_25ns', 'gedphoton_EEUncertainty_25ns'),
regressionKey_50ns = cms.vstring('gedphoton_EBCorrection_50ns', 'gedphoton_EECorrection_50ns'),
uncertaintyKey_50ns = cms.vstring('gedphoton_EBUncertainty_50ns', 'gedphoton_EEUncertainty_50ns'),
)
)
regressionModifier = regressionModifier94X.clone()
from Configuration.Eras.Modifier_run2_egamma_2016_cff import run2_egamma_2016
from Configuration.Eras.Modifier_run2_egamma_2017_cff import run2_egamma_2017
from Configuration.Eras.Modifier_run2_egamma_2018_cff import run2_egamma_2018
(run2_egamma_2016 | run2_egamma_2017 | run2_egamma_2018).toReplaceWith(regressionModifier,regressionModifier106XUL)
from Configuration.ProcessModifiers.egamma_lowPt_exclusive_cff import egamma_lowPt_exclusive
egamma_lowPt_exclusive.toReplaceWith(regressionModifier,regressionModifier103XLowPtPho)
| true
| true
|
f709acd7de0d69fa46a1ab2426b894f47946597c
| 15,795
|
py
|
Python
|
tensorflow/python/kernel_tests/sparse_xent_op_test.py
|
rainwoodman/tensorflow
|
9b7ff60faa841f0473facf618cb5b66b9cb99b5e
|
[
"Apache-2.0"
] | 3
|
2021-03-15T05:31:57.000Z
|
2021-12-14T07:29:31.000Z
|
tensorflow/python/kernel_tests/sparse_xent_op_test.py
|
rainwoodman/tensorflow
|
9b7ff60faa841f0473facf618cb5b66b9cb99b5e
|
[
"Apache-2.0"
] | 7
|
2021-11-10T20:21:23.000Z
|
2022-03-22T19:18:39.000Z
|
tensorflow/python/kernel_tests/sparse_xent_op_test.py
|
rainwoodman/tensorflow
|
9b7ff60faa841f0473facf618cb5b66b9cb99b5e
|
[
"Apache-2.0"
] | 1
|
2019-09-27T09:03:41.000Z
|
2019-09-27T09:03:41.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseSoftmaxCrossEntropyWithLogits op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import backprop as backprop_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import app
from tensorflow.python.platform import test
class SparseXentTest(test.TestCase):
def _npXent(self, features, labels):
features = np.reshape(features, [-1, features.shape[-1]])
labels = np.reshape(labels, [-1])
batch_dim = 0
class_dim = 1
batch_size = features.shape[batch_dim]
e = np.exp(features - np.reshape(
np.amax(
features, axis=class_dim), [batch_size, 1]))
probs = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])
labels_mat = np.zeros_like(probs).astype(probs.dtype)
labels_mat[np.arange(batch_size), labels] = 1.0
bp = (probs - labels_mat)
l = -np.sum(labels_mat * np.log(probs + 1.0e-20), axis=1)
return l, bp
def _testXent(self, np_features, np_labels):
np_loss, np_backprop = self._npXent(np_features, np_labels)
with self.cached_session():
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
np_features, np_labels)
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def testSingleClass(self):
for label_dtype in np.int32, np.int64:
with self.cached_session():
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
np.array([[1.], [-1.], [0.]]).astype(np.float32),
np.array([0, 0, 0]).astype(label_dtype))
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
self.assertAllClose([[0.0], [0.0], [0.0]], tf_backprop)
@test_util.run_gpu_only()
def testInvalidLabelGPU(self):
features = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],
[1., 2., 3., 4.]]
labels = [4, 3, 0, -1]
loss, backprop = self.evaluate(
gen_nn_ops.sparse_softmax_cross_entropy_with_logits(features, labels))
self.assertAllClose([[np.nan] * 4, [0.25, 0.25, 0.25, -0.75],
[-0.968, 0.087, 0.237, 0.6439], [np.nan] * 4],
backprop,
rtol=1e-3,
atol=1e-3)
self.assertAllClose([np.nan, 1.3862, 3.4420, np.nan],
loss,
rtol=1e-3,
atol=1e-3)
@test_util.run_in_graph_and_eager_modes(use_gpu=False)
@test_util.disable_xla("XLA cannot assert inside of a kernel.")
def testInvalidLabelCPU(self):
features = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],
[1., 2., 3., 4.]]
labels = [4, 3, 0, -1]
with self.assertRaisesRegex(
(errors_impl.InvalidArgumentError, errors_impl.UnknownError),
"Received a label value of"):
self.evaluate(
gen_nn_ops.sparse_softmax_cross_entropy_with_logits(features, labels))
def testNpXent(self):
# We create 2 batches of logits for testing.
# batch 0 is the boring uniform distribution: 1, 1, 1, 1, with target 3.
# batch 1 has a bit of difference: 1, 2, 3, 4, with target 0.
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
labels = [3, 0]
# For batch 0, we expect the uniform distribution: 0.25, 0.25, 0.25, 0.25
# With a hard target 3, the backprop is [0.25, 0.25, 0.25, -0.75]
# The loss for this batch is -log(0.25) = 1.386
#
# For batch 1, we have:
# exp(0) = 1
# exp(1) = 2.718
# exp(2) = 7.389
# exp(3) = 20.085
# SUM = 31.192
# So we have as probabilities:
# exp(0) / SUM = 0.032
# exp(1) / SUM = 0.087
# exp(2) / SUM = 0.237
# exp(3) / SUM = 0.644
# With a hard 1, the backprop is [0.032 - 1.0 = -0.968, 0.087, 0.237, 0.644]
# The loss for this batch is [1.0 * -log(0.25), 1.0 * -log(0.032)]
# = [1.3862, 3.4420]
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, -0.75], [-0.968, 0.087, 0.237, 0.6439]]),
np_backprop,
rtol=1.e-3,
atol=1.e-3)
self.assertAllClose(
np.array([1.3862, 3.4420]), np_loss, rtol=1.e-3, atol=1.e-3)
def testShapeMismatch(self):
with self.session():
with self.assertRaisesRegex(ValueError, ".*Rank mismatch:*"):
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]])
def testScalar(self):
with self.session():
with self.assertRaisesRegex(ValueError, ".*Logits cannot be scalars*"):
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=constant_op.constant(0), logits=constant_op.constant(1.0))
def testLabelsPlaceholderScalar(self):
with ops_lib.Graph().as_default(), self.session():
labels = array_ops.placeholder(np.int32)
y = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=[[7.]])
with self.assertRaisesOpError("labels must be 1-D"):
y.eval(feed_dict={labels: 0})
def testVector(self):
with self.session():
loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=constant_op.constant(0), logits=constant_op.constant([1.0]))
self.assertAllClose(0.0, self.evaluate(loss))
def testFloat(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32),
np.array([3, 0]).astype(label_dtype))
def testDouble(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
np.array([0, 3]).astype(label_dtype))
def testHalf(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([3, 0]).astype(label_dtype))
def testEmpty(self):
self._testXent(np.zeros((0, 3)), np.zeros((0,), dtype=np.int32))
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testGradient(self):
with self.session() as sess:
l = constant_op.constant([3, 0, 1], name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
def xent(f):
# gradient_checker_v2.computee_gradient doesn't take int32/int64.
# labels must be of type int32/int64, so passing them separately here.
return nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent")
theoretical, numerical = gradient_checker_v2.compute_gradient(xent, [f])
if not context.executing_eagerly():
# Check that no extra computation performed. When only first derivative
# is requested, second derivative must not be computed. So when there is
# no second derivative, there is no `BatchMatMul` op in the graph.
op_names = [
op.op_def.name for op in sess.graph.get_operations() if op.op_def
]
self.assertNotIn("BatchMatMul", op_names)
self.assertNotIn("BatchMatMulV2", op_names)
tol = 5e-8
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
def testSecondGradient(self):
with self.session() as sess:
l = constant_op.constant([3, 0, 1], name="l")
f = constant_op.constant(
[0.3, 0.4, 0.1, 1.2, 0.1, 1.9, 0.1, 0.7, 0.8, 0.2, 1.3, 1.3],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
def xent_grad(f):
if not context.executing_eagerly():
return gradients_impl.gradients(
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent"), [f])[0]
with backprop_lib.GradientTape() as tape:
tape.watch(f)
return tape.gradient(
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent"), [f])[0]
theoretical, numerical = gradient_checker_v2.compute_gradient(
xent_grad, [f])
if not context.executing_eagerly():
# Check that second derivative is calculated.
# (it is equivalent to being `BatchMatMul` op in the graph because of
# implementation of xentropy grad)
op_names = [
op.op_def.name for op in sess.graph.get_operations() if op.op_def
]
self.assertIn("BatchMatMulV2", op_names)
tol = 5e-8
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def _testHighDim(self, features, labels):
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
# manually reshape loss
np_loss = np.reshape(np_loss, np.array(labels).shape)
tf_loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=features)
if not context.executing_eagerly():
tf_backprop = tf_loss.op.inputs[0].op.outputs[1]
else:
with backprop_lib.GradientTape() as tape:
features = constant_op.constant(features)
tape.watch(features)
tf_backprop = tape.gradient(
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=features), [features])[0]
tf_backprop = array_ops.reshape(tf_backprop, np_backprop.shape)
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def testHighDim(self):
features = [[[1., 1., 1., 1.]], [[1., 2., 3., 4.]]]
labels = [[3], [0]]
self._testHighDim(features, labels)
def testHighDim2(self):
features = [[[1., 1., 1., 1.], [2., 2., 2., 2.]],
[[1., 2., 3., 4.], [5., 6., 7., 8.]]]
labels = [[3, 2], [0, 3]]
self._testHighDim(features, labels)
def testScalarHandling(self):
with ops_lib.Graph().as_default(), self.session(use_gpu=False) as sess:
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
".*labels must be 1-D.*"):
labels = array_ops.placeholder(dtypes.int32, shape=[None, 1])
logits = array_ops.placeholder(dtypes.float32, shape=[None, 3])
ce = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=array_ops.squeeze(labels), logits=logits)
labels_v2 = np.zeros((1, 1), dtype=np.int32)
logits_v2 = np.random.randn(1, 3)
sess.run([ce], feed_dict={labels: labels_v2, logits: logits_v2})
def _sparse_vs_dense_xent_benchmark_dense(labels, logits):
labels = array_ops.identity(labels)
logits = array_ops.identity(logits)
with ops_lib.device("/cpu:0"): # Sparse-to-dense must be on CPU
batch_size = array_ops.shape(logits)[0]
num_entries = array_ops.shape(logits)[1]
length = batch_size * num_entries
labels += num_entries * math_ops.range(batch_size)
target = sparse_ops.sparse_to_dense(labels,
array_ops.stack([length]), 1.0, 0.0)
target = array_ops.reshape(target, array_ops.stack([-1, num_entries]))
crossent = nn_ops.softmax_cross_entropy_with_logits(
labels=target, logits=logits, name="SequenceLoss/CrossEntropy")
crossent_sum = math_ops.reduce_sum(crossent)
grads = gradients_impl.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def _sparse_vs_dense_xent_benchmark_sparse(labels, logits):
# Using sparse_softmax_cross_entropy_with_logits
labels = labels.astype(np.int64)
labels = array_ops.identity(labels)
logits = array_ops.identity(logits)
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits, labels, name="SequenceLoss/CrossEntropy")
crossent_sum = math_ops.reduce_sum(crossent)
grads = gradients_impl.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.per_process_gpu_memory_fraction = 0.3
labels = np.random.randint(num_entries, size=batch_size).astype(np.int32)
logits = np.random.randn(batch_size, num_entries).astype(np.float32)
def _timer(sess, ops):
# Warm in
for _ in range(20):
sess.run(ops)
# Timing run
start = time.time()
for _ in range(20):
sess.run(ops)
end = time.time()
return (end - start) / 20.0 # Average runtime per iteration
# Using sparse_to_dense and softmax_cross_entropy_with_logits
with session.Session(config=config) as sess:
if not use_gpu:
with ops_lib.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
delta_dense = _timer(sess, ops)
# Using sparse_softmax_cross_entropy_with_logits
with session.Session(config=config) as sess:
if not use_gpu:
with test_util.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
delta_sparse = _timer(sess, ops)
print("%d \t %d \t %s \t %f \t %f \t %f" % (batch_size, num_entries, use_gpu,
delta_dense, delta_sparse,
delta_sparse / delta_dense))
def main(_):
print("Sparse Xent vs. SparseToDense + Xent")
print("batch \t depth \t gpu \t dt(dense) \t dt(sparse) "
"\t dt(sparse)/dt(dense)")
for use_gpu in (False, True):
for batch_size in (32, 64, 128):
for num_entries in (100, 1000, 10000):
sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu)
sparse_vs_dense_xent_benchmark(32, 100000, use_gpu)
sparse_vs_dense_xent_benchmark(8, 1000000, use_gpu)
if __name__ == "__main__":
if "--benchmarks" in sys.argv:
sys.argv.remove("--benchmarks")
app.run()
else:
test.main()
| 39.68593
| 80
| 0.650839
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import backprop as backprop_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.nn_grad
from tensorflow.python.platform import app
from tensorflow.python.platform import test
class SparseXentTest(test.TestCase):
def _npXent(self, features, labels):
features = np.reshape(features, [-1, features.shape[-1]])
labels = np.reshape(labels, [-1])
batch_dim = 0
class_dim = 1
batch_size = features.shape[batch_dim]
e = np.exp(features - np.reshape(
np.amax(
features, axis=class_dim), [batch_size, 1]))
probs = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])
labels_mat = np.zeros_like(probs).astype(probs.dtype)
labels_mat[np.arange(batch_size), labels] = 1.0
bp = (probs - labels_mat)
l = -np.sum(labels_mat * np.log(probs + 1.0e-20), axis=1)
return l, bp
def _testXent(self, np_features, np_labels):
np_loss, np_backprop = self._npXent(np_features, np_labels)
with self.cached_session():
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
np_features, np_labels)
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def testSingleClass(self):
for label_dtype in np.int32, np.int64:
with self.cached_session():
loss, backprop = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
np.array([[1.], [-1.], [0.]]).astype(np.float32),
np.array([0, 0, 0]).astype(label_dtype))
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
self.assertAllClose([[0.0], [0.0], [0.0]], tf_backprop)
@test_util.run_gpu_only()
def testInvalidLabelGPU(self):
features = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],
[1., 2., 3., 4.]]
labels = [4, 3, 0, -1]
loss, backprop = self.evaluate(
gen_nn_ops.sparse_softmax_cross_entropy_with_logits(features, labels))
self.assertAllClose([[np.nan] * 4, [0.25, 0.25, 0.25, -0.75],
[-0.968, 0.087, 0.237, 0.6439], [np.nan] * 4],
backprop,
rtol=1e-3,
atol=1e-3)
self.assertAllClose([np.nan, 1.3862, 3.4420, np.nan],
loss,
rtol=1e-3,
atol=1e-3)
@test_util.run_in_graph_and_eager_modes(use_gpu=False)
@test_util.disable_xla("XLA cannot assert inside of a kernel.")
def testInvalidLabelCPU(self):
features = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],
[1., 2., 3., 4.]]
labels = [4, 3, 0, -1]
with self.assertRaisesRegex(
(errors_impl.InvalidArgumentError, errors_impl.UnknownError),
"Received a label value of"):
self.evaluate(
gen_nn_ops.sparse_softmax_cross_entropy_with_logits(features, labels))
def testNpXent(self):
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
labels = [3, 0]
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, -0.75], [-0.968, 0.087, 0.237, 0.6439]]),
np_backprop,
rtol=1.e-3,
atol=1.e-3)
self.assertAllClose(
np.array([1.3862, 3.4420]), np_loss, rtol=1.e-3, atol=1.e-3)
def testShapeMismatch(self):
with self.session():
with self.assertRaisesRegex(ValueError, ".*Rank mismatch:*"):
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]])
def testScalar(self):
with self.session():
with self.assertRaisesRegex(ValueError, ".*Logits cannot be scalars*"):
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=constant_op.constant(0), logits=constant_op.constant(1.0))
def testLabelsPlaceholderScalar(self):
with ops_lib.Graph().as_default(), self.session():
labels = array_ops.placeholder(np.int32)
y = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=[[7.]])
with self.assertRaisesOpError("labels must be 1-D"):
y.eval(feed_dict={labels: 0})
def testVector(self):
with self.session():
loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=constant_op.constant(0), logits=constant_op.constant([1.0]))
self.assertAllClose(0.0, self.evaluate(loss))
def testFloat(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32),
np.array([3, 0]).astype(label_dtype))
def testDouble(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
np.array([0, 3]).astype(label_dtype))
def testHalf(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([3, 0]).astype(label_dtype))
def testEmpty(self):
self._testXent(np.zeros((0, 3)), np.zeros((0,), dtype=np.int32))
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testGradient(self):
with self.session() as sess:
l = constant_op.constant([3, 0, 1], name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
def xent(f):
# labels must be of type int32/int64, so passing them separately here.
return nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent")
theoretical, numerical = gradient_checker_v2.compute_gradient(xent, [f])
if not context.executing_eagerly():
# Check that no extra computation performed. When only first derivative
# is requested, second derivative must not be computed. So when there is
# no second derivative, there is no `BatchMatMul` op in the graph.
op_names = [
op.op_def.name for op in sess.graph.get_operations() if op.op_def
]
self.assertNotIn("BatchMatMul", op_names)
self.assertNotIn("BatchMatMulV2", op_names)
tol = 5e-8
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
def testSecondGradient(self):
with self.session() as sess:
l = constant_op.constant([3, 0, 1], name="l")
f = constant_op.constant(
[0.3, 0.4, 0.1, 1.2, 0.1, 1.9, 0.1, 0.7, 0.8, 0.2, 1.3, 1.3],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
def xent_grad(f):
if not context.executing_eagerly():
return gradients_impl.gradients(
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent"), [f])[0]
with backprop_lib.GradientTape() as tape:
tape.watch(f)
return tape.gradient(
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent"), [f])[0]
theoretical, numerical = gradient_checker_v2.compute_gradient(
xent_grad, [f])
if not context.executing_eagerly():
# Check that second derivative is calculated.
# (it is equivalent to being `BatchMatMul` op in the graph because of
# implementation of xentropy grad)
op_names = [
op.op_def.name for op in sess.graph.get_operations() if op.op_def
]
self.assertIn("BatchMatMulV2", op_names)
tol = 5e-8
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def _testHighDim(self, features, labels):
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
# manually reshape loss
np_loss = np.reshape(np_loss, np.array(labels).shape)
tf_loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=features)
if not context.executing_eagerly():
tf_backprop = tf_loss.op.inputs[0].op.outputs[1]
else:
with backprop_lib.GradientTape() as tape:
features = constant_op.constant(features)
tape.watch(features)
tf_backprop = tape.gradient(
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=features), [features])[0]
tf_backprop = array_ops.reshape(tf_backprop, np_backprop.shape)
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def testHighDim(self):
features = [[[1., 1., 1., 1.]], [[1., 2., 3., 4.]]]
labels = [[3], [0]]
self._testHighDim(features, labels)
def testHighDim2(self):
features = [[[1., 1., 1., 1.], [2., 2., 2., 2.]],
[[1., 2., 3., 4.], [5., 6., 7., 8.]]]
labels = [[3, 2], [0, 3]]
self._testHighDim(features, labels)
def testScalarHandling(self):
with ops_lib.Graph().as_default(), self.session(use_gpu=False) as sess:
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
".*labels must be 1-D.*"):
labels = array_ops.placeholder(dtypes.int32, shape=[None, 1])
logits = array_ops.placeholder(dtypes.float32, shape=[None, 3])
ce = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=array_ops.squeeze(labels), logits=logits)
labels_v2 = np.zeros((1, 1), dtype=np.int32)
logits_v2 = np.random.randn(1, 3)
sess.run([ce], feed_dict={labels: labels_v2, logits: logits_v2})
def _sparse_vs_dense_xent_benchmark_dense(labels, logits):
labels = array_ops.identity(labels)
logits = array_ops.identity(logits)
with ops_lib.device("/cpu:0"): # Sparse-to-dense must be on CPU
batch_size = array_ops.shape(logits)[0]
num_entries = array_ops.shape(logits)[1]
length = batch_size * num_entries
labels += num_entries * math_ops.range(batch_size)
target = sparse_ops.sparse_to_dense(labels,
array_ops.stack([length]), 1.0, 0.0)
target = array_ops.reshape(target, array_ops.stack([-1, num_entries]))
crossent = nn_ops.softmax_cross_entropy_with_logits(
labels=target, logits=logits, name="SequenceLoss/CrossEntropy")
crossent_sum = math_ops.reduce_sum(crossent)
grads = gradients_impl.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def _sparse_vs_dense_xent_benchmark_sparse(labels, logits):
# Using sparse_softmax_cross_entropy_with_logits
labels = labels.astype(np.int64)
labels = array_ops.identity(labels)
logits = array_ops.identity(logits)
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits, labels, name="SequenceLoss/CrossEntropy")
crossent_sum = math_ops.reduce_sum(crossent)
grads = gradients_impl.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.per_process_gpu_memory_fraction = 0.3
labels = np.random.randint(num_entries, size=batch_size).astype(np.int32)
logits = np.random.randn(batch_size, num_entries).astype(np.float32)
def _timer(sess, ops):
# Warm in
for _ in range(20):
sess.run(ops)
# Timing run
start = time.time()
for _ in range(20):
sess.run(ops)
end = time.time()
return (end - start) / 20.0 # Average runtime per iteration
# Using sparse_to_dense and softmax_cross_entropy_with_logits
with session.Session(config=config) as sess:
if not use_gpu:
with ops_lib.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
delta_dense = _timer(sess, ops)
# Using sparse_softmax_cross_entropy_with_logits
with session.Session(config=config) as sess:
if not use_gpu:
with test_util.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
delta_sparse = _timer(sess, ops)
print("%d \t %d \t %s \t %f \t %f \t %f" % (batch_size, num_entries, use_gpu,
delta_dense, delta_sparse,
delta_sparse / delta_dense))
def main(_):
print("Sparse Xent vs. SparseToDense + Xent")
print("batch \t depth \t gpu \t dt(dense) \t dt(sparse) "
"\t dt(sparse)/dt(dense)")
for use_gpu in (False, True):
for batch_size in (32, 64, 128):
for num_entries in (100, 1000, 10000):
sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu)
sparse_vs_dense_xent_benchmark(32, 100000, use_gpu)
sparse_vs_dense_xent_benchmark(8, 1000000, use_gpu)
if __name__ == "__main__":
if "--benchmarks" in sys.argv:
sys.argv.remove("--benchmarks")
app.run()
else:
test.main()
| true
| true
|
f709ad2a894c733b34890663a08ccf16a64e97a4
| 7,047
|
py
|
Python
|
chirp/library/do_delete_audio_file_from_db_test.py
|
chirpradio/chirpradio-machine
|
e854db2be43a4c879bbda134272a73225d7fa2df
|
[
"Apache-2.0"
] | 8
|
2015-03-06T17:28:36.000Z
|
2020-11-27T10:06:40.000Z
|
chirp/library/do_delete_audio_file_from_db_test.py
|
chirpradio/chirpradio-machine
|
e854db2be43a4c879bbda134272a73225d7fa2df
|
[
"Apache-2.0"
] | 9
|
2015-09-21T18:52:22.000Z
|
2018-02-12T19:23:17.000Z
|
chirp/library/do_delete_audio_file_from_db_test.py
|
chirpradio/chirpradio-machine
|
e854db2be43a4c879bbda134272a73225d7fa2df
|
[
"Apache-2.0"
] | 9
|
2016-04-08T00:21:15.000Z
|
2018-01-25T19:35:58.000Z
|
import os
import time
import unittest
from mock import patch
from chirp.library import audio_file_test
from chirp.library import do_delete_audio_file_from_db
from chirp.library import database
TEST_DB_NAME_PATTERN = "/tmp/chirp-library-db_test.%d.sqlite"
class DeleteFingerprintTest(unittest.TestCase):
def setUp(self):
self.name = TEST_DB_NAME_PATTERN % int(time.time() * 1000000)
self.db = database.Database(self.name)
def tearDown(self):
os.unlink(self.name)
def _add_test_audiofiles(self):
test_volume = 17
test_import_timestamp = 1230959520
# populate some dummy audiofiles into the database
all_au_files = [audio_file_test.get_test_audio_file(i)
for i in xrange(10)]
add_txn = self.db.begin_add(test_volume, test_import_timestamp)
for au_file in all_au_files:
au_file.volume = test_volume
au_file.import_timestamp = test_import_timestamp
for au_file in all_au_files:
add_txn.add(au_file)
add_txn.commit()
def test_del_audiofilese__full_delete_single(self):
# SETUP
test_fingerprint = "0000000000000007"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
# make sure 10 records exist
self.assertEqual(len(list(self.db.get_all())), 10)
# quick confirmation that the audiofile that we want to test exists.
af = self.db.get_by_fingerprint(test_fingerprint)
self.assertEquals(af.fingerprint, test_fingerprint)
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
afm.del_audiofiles([test_fingerprint])
# RESULTS
# verify audiofile doesn't exist
af = self.db.get_by_fingerprint(test_fingerprint)
self.assertEquals(af, None)
# make sure only 9 records exist now
self.assertEqual(len(list(self.db.get_all())), 9)
def test_del_audiofiles__full_delete_multiple(self):
# SETUP
test_fingerprint_1 = "0000000000000005"
test_fingerprint_2 = "0000000000000007"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
# make sure 10 records exist
self.assertEqual(len(list(self.db.get_all())), 10)
# quick confirmation that the audiofiles that we want to test exists.
af = self.db.get_by_fingerprint(test_fingerprint_1)
self.assertEquals(af.fingerprint, test_fingerprint_1)
af = self.db.get_by_fingerprint(test_fingerprint_2)
self.assertEquals(af.fingerprint, test_fingerprint_2)
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
afm.del_audiofiles([test_fingerprint_1, test_fingerprint_2])
# RESULTS
# verify audiofiles don't exist
af = self.db.get_by_fingerprint(test_fingerprint_1)
self.assertEquals(af, None)
af = self.db.get_by_fingerprint(test_fingerprint_2)
self.assertEquals(af, None)
# make sure only 8 records exist now
self.assertEqual(len(list(self.db.get_all())), 8)
def test_del_audiofiles__full_delete_non_existing_fingerprint(self):
# SETUP
test_fingerprint_1 = "0000000000000020"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
# make sure 10 records exist
self.assertEqual(len(list(self.db.get_all())), 10)
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
afm.del_audiofiles([test_fingerprint_1])
# RESULTS
# make sure nothing was deleted
self.assertEqual(len(list(self.db.get_all())), 10)
def test_del_audiofiles__raises_exception(self):
# SETUP
test_fingerprint_1 = "0000000000000007"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
# make sure 10 records exist
self.assertEqual(len(list(self.db.get_all())), 10)
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
def _raise_exception(*args, **kwargs):
raise Exception('Test')
with patch.object(afm, 'conn', autospec=True) as mock_conn:
mock_conn.execute.side_effect = _raise_exception
with self.assertRaises(Exception):
afm.del_audiofiles([test_fingerprint_1])
mock_conn.rollback.assert_called_with()
def test_get_audio_files__existing_record(self):
# SETUP
test_fingerprint = "0000000000000007"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
af = afm.get_audio_files(fingerprints=[test_fingerprint])
# RESULTS
self.assertSetEqual(
set(a['fingerprint'] for a in af),
set([test_fingerprint]))
def test_get_audio_files__non_existing_records(self):
# SETUP
test_fingerprint_1 = "0000000000000020"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
af = afm.get_audio_files(
fingerprints=[test_fingerprint_1])
# RESULTS
self.assertEqual(len(list(af)), 0)
def test_get_tags__existing_record(self):
# SETUP
test_fingerprint_1 = "0000000000000005"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
af = afm.get_tags(
fingerprints=[test_fingerprint_1])
# RESULTS
self.assertListEqual(
list(a['fingerprint'] for a in af),
5 * [test_fingerprint_1])
def test_get_tags__non_existing_records(self):
# SETUP
test_fingerprint_1 = "0000000000000020"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
af = afm.get_tags(
fingerprints=[test_fingerprint_1])
# RESULTS
self.assertEqual(len(list(af)), 0)
def test_print_rows_can_handle_non_ascii(self):
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name
)
afm.print_rows([
[u'non-ascii string with a \xf8 character'],
])
| 30.506494
| 77
| 0.65276
|
import os
import time
import unittest
from mock import patch
from chirp.library import audio_file_test
from chirp.library import do_delete_audio_file_from_db
from chirp.library import database
TEST_DB_NAME_PATTERN = "/tmp/chirp-library-db_test.%d.sqlite"
class DeleteFingerprintTest(unittest.TestCase):
def setUp(self):
self.name = TEST_DB_NAME_PATTERN % int(time.time() * 1000000)
self.db = database.Database(self.name)
def tearDown(self):
os.unlink(self.name)
def _add_test_audiofiles(self):
test_volume = 17
test_import_timestamp = 1230959520
all_au_files = [audio_file_test.get_test_audio_file(i)
for i in xrange(10)]
add_txn = self.db.begin_add(test_volume, test_import_timestamp)
for au_file in all_au_files:
au_file.volume = test_volume
au_file.import_timestamp = test_import_timestamp
for au_file in all_au_files:
add_txn.add(au_file)
add_txn.commit()
def test_del_audiofilese__full_delete_single(self):
test_fingerprint = "0000000000000007"
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
self.assertEqual(len(list(self.db.get_all())), 10)
af = self.db.get_by_fingerprint(test_fingerprint)
self.assertEquals(af.fingerprint, test_fingerprint)
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
afm.del_audiofiles([test_fingerprint])
af = self.db.get_by_fingerprint(test_fingerprint)
self.assertEquals(af, None)
# make sure only 9 records exist now
self.assertEqual(len(list(self.db.get_all())), 9)
def test_del_audiofiles__full_delete_multiple(self):
# SETUP
test_fingerprint_1 = "0000000000000005"
test_fingerprint_2 = "0000000000000007"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
# make sure 10 records exist
self.assertEqual(len(list(self.db.get_all())), 10)
# quick confirmation that the audiofiles that we want to test exists.
af = self.db.get_by_fingerprint(test_fingerprint_1)
self.assertEquals(af.fingerprint, test_fingerprint_1)
af = self.db.get_by_fingerprint(test_fingerprint_2)
self.assertEquals(af.fingerprint, test_fingerprint_2)
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
afm.del_audiofiles([test_fingerprint_1, test_fingerprint_2])
# RESULTS
# verify audiofiles don't exist
af = self.db.get_by_fingerprint(test_fingerprint_1)
self.assertEquals(af, None)
af = self.db.get_by_fingerprint(test_fingerprint_2)
self.assertEquals(af, None)
self.assertEqual(len(list(self.db.get_all())), 8)
def test_del_audiofiles__full_delete_non_existing_fingerprint(self):
test_fingerprint_1 = "0000000000000020"
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
self.assertEqual(len(list(self.db.get_all())), 10)
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
afm.del_audiofiles([test_fingerprint_1])
self.assertEqual(len(list(self.db.get_all())), 10)
def test_del_audiofiles__raises_exception(self):
test_fingerprint_1 = "0000000000000007"
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
self.assertEqual(len(list(self.db.get_all())), 10)
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
def _raise_exception(*args, **kwargs):
raise Exception('Test')
with patch.object(afm, 'conn', autospec=True) as mock_conn:
mock_conn.execute.side_effect = _raise_exception
with self.assertRaises(Exception):
afm.del_audiofiles([test_fingerprint_1])
mock_conn.rollback.assert_called_with()
def test_get_audio_files__existing_record(self):
test_fingerprint = "0000000000000007"
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
af = afm.get_audio_files(fingerprints=[test_fingerprint])
self.assertSetEqual(
set(a['fingerprint'] for a in af),
set([test_fingerprint]))
def test_get_audio_files__non_existing_records(self):
test_fingerprint_1 = "0000000000000020"
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
af = afm.get_audio_files(
fingerprints=[test_fingerprint_1])
self.assertEqual(len(list(af)), 0)
def test_get_tags__existing_record(self):
test_fingerprint_1 = "0000000000000005"
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
af = afm.get_tags(
fingerprints=[test_fingerprint_1])
self.assertListEqual(
list(a['fingerprint'] for a in af),
5 * [test_fingerprint_1])
def test_get_tags__non_existing_records(self):
test_fingerprint_1 = "0000000000000020"
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
af = afm.get_tags(
fingerprints=[test_fingerprint_1])
self.assertEqual(len(list(af)), 0)
def test_print_rows_can_handle_non_ascii(self):
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name
)
afm.print_rows([
[u'non-ascii string with a \xf8 character'],
])
| true
| true
|
f709ad6bc723b406239f8b4084411aa1356e16f3
| 1,133
|
py
|
Python
|
setup.py
|
pjdelport/pytest-testmon
|
dbbaf2f29cc7e9a2745f27dae91e44ce973e8d10
|
[
"MIT"
] | null | null | null |
setup.py
|
pjdelport/pytest-testmon
|
dbbaf2f29cc7e9a2745f27dae91e44ce973e8d10
|
[
"MIT"
] | null | null | null |
setup.py
|
pjdelport/pytest-testmon
|
dbbaf2f29cc7e9a2745f27dae91e44ce973e8d10
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='pytest-testmon',
description='take TDD to a new level with py.test and testmon',
long_description=''.join(open('README.rst').readlines()),
version='0.9.15',
license='MIT',
platforms=['linux', 'osx', 'win32'],
packages=['testmon'],
url='https://github.com/tarpas/pytest-testmon/',
author_email='tibor.arpas@infinit.sk',
author='Tibor Arpas, Jozef Knaperek, Martin Riesz, Daniel Hahler',
entry_points={
'pytest11': [
'testmon = testmon.pytest_testmon',
],
'tox': [
'testmon = testmon.tox_testmon',
],
},
install_requires=['pytest>=2.8.0,<5', 'coverage>=4,<5'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Programming Language :: Python', ],
)
| 33.323529
| 70
| 0.58782
|
from setuptools import setup
setup(
name='pytest-testmon',
description='take TDD to a new level with py.test and testmon',
long_description=''.join(open('README.rst').readlines()),
version='0.9.15',
license='MIT',
platforms=['linux', 'osx', 'win32'],
packages=['testmon'],
url='https://github.com/tarpas/pytest-testmon/',
author_email='tibor.arpas@infinit.sk',
author='Tibor Arpas, Jozef Knaperek, Martin Riesz, Daniel Hahler',
entry_points={
'pytest11': [
'testmon = testmon.pytest_testmon',
],
'tox': [
'testmon = testmon.tox_testmon',
],
},
install_requires=['pytest>=2.8.0,<5', 'coverage>=4,<5'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Programming Language :: Python', ],
)
| true
| true
|
f709ae4ed5324f7f1cb8132611e387bb13a68a6f
| 2,557
|
py
|
Python
|
tests/test_imports.py
|
mnicolas94/pyrulo
|
1a537369407b760182bd40f188fd82be637310e2
|
[
"MIT"
] | null | null | null |
tests/test_imports.py
|
mnicolas94/pyrulo
|
1a537369407b760182bd40f188fd82be637310e2
|
[
"MIT"
] | null | null | null |
tests/test_imports.py
|
mnicolas94/pyrulo
|
1a537369407b760182bd40f188fd82be637310e2
|
[
"MIT"
] | null | null | null |
import unittest
import pyrulo.class_imports
class TestImports(unittest.TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_whenImportClassesByDir_resultIsTheExpected(self):
# arrange
path = "test_classes"
# act
classes = pyrulo.class_imports.import_classes_in_dir(path, object, False)
names = [cls.__name__ for cls in classes]
counts = {}
for name in names:
counts.setdefault(name, 0)
counts[name] += 1
# assert
self.assertIn("A", names)
self.assertIn("B", names)
self.assertIn("C", names)
self.assertEqual(counts["A"], 1)
self.assertEqual(counts["B"], 1)
self.assertEqual(counts["C"], 1)
def test_whenImportClassesByExternalDir_resultIsTheExpected(self):
# arrange
path = "C:/_cosas/Desarrollo/Proyectos/Python/propsettings/propsettings"
# act
classes = pyrulo.class_imports.import_classes_in_dir(path, object, False)
names = [cls.__name__ for cls in classes]
# assert
self.assertIn("Setting", names)
def test_whenImportClassFromFile_resultsIsTheExpected(self):
# arrange
path = "test_classes/a.py"
# act
classes = pyrulo.class_imports.import_classes_in_file(path, object)
names = [cls.__name__ for cls in classes]
# assert
self.assertIn("A", names)
def test_whenImportClassFromFileByKey_resultsIsTheExpected(self):
# arrange
path = "test_classes/a.py"
# act
classes = pyrulo.class_imports.import_classes_in_file(path, object)
names = [cls.__name__ for cls in classes]
# assert
self.assertIn("A", names)
def test_whenImportClassesFromExternalFile_resultIsTheExpected(self):
# arrange
path = "C:/_cosas/Desarrollo/Proyectos/Python/propsettings/propsettings/setting.py"
# act
classes = pyrulo.class_imports.import_classes_in_file(path, object)
names = [cls.__name__ for cls in classes]
# assert
self.assertIn("Setting", names)
def test_whenImportClassesFromSiblingFile_resultIsTheExpected(self):
# arrange
path = "sibling_classes.py"
# act
classes = pyrulo.class_imports.import_classes_in_file(path, object)
names = [cls.__name__ for cls in classes]
# assert
self.assertIn("Sibling", names)
if __name__ == '__main__':
unittest.main()
| 27.793478
| 91
| 0.637857
|
import unittest
import pyrulo.class_imports
class TestImports(unittest.TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_whenImportClassesByDir_resultIsTheExpected(self):
path = "test_classes"
classes = pyrulo.class_imports.import_classes_in_dir(path, object, False)
names = [cls.__name__ for cls in classes]
counts = {}
for name in names:
counts.setdefault(name, 0)
counts[name] += 1
self.assertIn("A", names)
self.assertIn("B", names)
self.assertIn("C", names)
self.assertEqual(counts["A"], 1)
self.assertEqual(counts["B"], 1)
self.assertEqual(counts["C"], 1)
def test_whenImportClassesByExternalDir_resultIsTheExpected(self):
path = "C:/_cosas/Desarrollo/Proyectos/Python/propsettings/propsettings"
classes = pyrulo.class_imports.import_classes_in_dir(path, object, False)
names = [cls.__name__ for cls in classes]
self.assertIn("Setting", names)
def test_whenImportClassFromFile_resultsIsTheExpected(self):
path = "test_classes/a.py"
classes = pyrulo.class_imports.import_classes_in_file(path, object)
names = [cls.__name__ for cls in classes]
self.assertIn("A", names)
def test_whenImportClassFromFileByKey_resultsIsTheExpected(self):
path = "test_classes/a.py"
classes = pyrulo.class_imports.import_classes_in_file(path, object)
names = [cls.__name__ for cls in classes]
self.assertIn("A", names)
def test_whenImportClassesFromExternalFile_resultIsTheExpected(self):
path = "C:/_cosas/Desarrollo/Proyectos/Python/propsettings/propsettings/setting.py"
classes = pyrulo.class_imports.import_classes_in_file(path, object)
names = [cls.__name__ for cls in classes]
self.assertIn("Setting", names)
def test_whenImportClassesFromSiblingFile_resultIsTheExpected(self):
path = "sibling_classes.py"
classes = pyrulo.class_imports.import_classes_in_file(path, object)
names = [cls.__name__ for cls in classes]
self.assertIn("Sibling", names)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f709b0dedd5c969d909177a66ee08e53365ca5a2
| 1,357
|
py
|
Python
|
test/test_settings.py
|
mm40/pudb
|
889016708fccdcb27b6cbe03b94d626f6d39be46
|
[
"MIT"
] | 3
|
2015-12-23T06:45:52.000Z
|
2017-03-14T10:04:44.000Z
|
test/test_settings.py
|
mm40/pudb
|
889016708fccdcb27b6cbe03b94d626f6d39be46
|
[
"MIT"
] | null | null | null |
test/test_settings.py
|
mm40/pudb
|
889016708fccdcb27b6cbe03b94d626f6d39be46
|
[
"MIT"
] | 3
|
2015-08-10T17:41:14.000Z
|
2020-03-03T10:13:47.000Z
|
import collections
import pytest # noqa: F401
from pudb.py3compat import builtins
from pudb.settings import load_breakpoints, save_breakpoints
def test_load_breakpoints(mocker):
fake_data = ["b /home/user/test.py:41"], ["b /home/user/test.py:50"]
mock_open = mocker.mock_open()
mock_open.return_value.readlines.side_effect = fake_data
mocker.patch.object(builtins, "open", mock_open)
mocker.patch("pudb.settings.lookup_module",
mocker.Mock(return_value="/home/user/test.py"))
mocker.patch("pudb.settings.get_breakpoint_invalid_reason",
mocker.Mock(return_value=None))
result = load_breakpoints()
expected = [("/home/user/test.py", 41, False, None, None),
("/home/user/test.py", 50, False, None, None)]
assert result == expected
def test_save_breakpoints(mocker):
MockBP = collections.namedtuple("MockBreakpoint", "file line cond")
mock_breakpoints = [MockBP("/home/user/test.py", 41, None),
MockBP("/home/user/test.py", 50, None)]
mocker.patch("pudb.settings.get_breakpoints_file_name",
mocker.Mock(return_value="saved-breakpoints"))
mock_open = mocker.mock_open()
mocker.patch.object(builtins, "open", mock_open)
save_breakpoints(mock_breakpoints)
mock_open.assert_called_with("saved-breakpoints", "w")
| 38.771429
| 72
| 0.692704
|
import collections
import pytest
from pudb.py3compat import builtins
from pudb.settings import load_breakpoints, save_breakpoints
def test_load_breakpoints(mocker):
fake_data = ["b /home/user/test.py:41"], ["b /home/user/test.py:50"]
mock_open = mocker.mock_open()
mock_open.return_value.readlines.side_effect = fake_data
mocker.patch.object(builtins, "open", mock_open)
mocker.patch("pudb.settings.lookup_module",
mocker.Mock(return_value="/home/user/test.py"))
mocker.patch("pudb.settings.get_breakpoint_invalid_reason",
mocker.Mock(return_value=None))
result = load_breakpoints()
expected = [("/home/user/test.py", 41, False, None, None),
("/home/user/test.py", 50, False, None, None)]
assert result == expected
def test_save_breakpoints(mocker):
MockBP = collections.namedtuple("MockBreakpoint", "file line cond")
mock_breakpoints = [MockBP("/home/user/test.py", 41, None),
MockBP("/home/user/test.py", 50, None)]
mocker.patch("pudb.settings.get_breakpoints_file_name",
mocker.Mock(return_value="saved-breakpoints"))
mock_open = mocker.mock_open()
mocker.patch.object(builtins, "open", mock_open)
save_breakpoints(mock_breakpoints)
mock_open.assert_called_with("saved-breakpoints", "w")
| true
| true
|
f709b235057889c4af135a5edda5c8d0cda7c681
| 10,703
|
py
|
Python
|
tests/unit/Containers.py
|
rashmi43/platform-engine
|
dd9a22742bc8dc43a530ea5edef39b3c35db57c1
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/Containers.py
|
rashmi43/platform-engine
|
dd9a22742bc8dc43a530ea5edef39b3c35db57c1
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/Containers.py
|
rashmi43/platform-engine
|
dd9a22742bc8dc43a530ea5edef39b3c35db57c1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import hashlib
from unittest.mock import MagicMock
from asyncy.AppConfig import Expose
from asyncy.Containers import Containers
from asyncy.Exceptions import ActionNotFound, ContainerSpecNotRegisteredError,\
EnvironmentVariableNotFound, K8sError
from asyncy.Kubernetes import Kubernetes
from asyncy.constants.LineConstants import LineConstants
from asyncy.constants.ServiceConstants import ServiceConstants
from asyncy.entities.Volume import Volume
from asyncy.processing import Story
import pytest
from pytest import fixture, mark
@fixture
def line():
return MagicMock()
def test_is_service_reusable(story):
story.app.services = {
'alpine': {
'configuration': {
'actions': {
'echo': {
'run': 'foo'
}
}
}
}
}
line = {
LineConstants.service: 'alpine',
LineConstants.command: 'echo'
}
assert Containers.is_service_reusable(story.app, line) is False
story.app.services['alpine']['configuration']['actions']['echo'][
'run'] = None
assert Containers.is_service_reusable(story.app, line) is True
@mark.parametrize('reusable', [False, True])
@mark.parametrize('name', ['alpine', 'a!lpine', 'ALPINE', '__aLpInE'])
def test_get_container_name(patch, story, line, reusable, name):
patch.object(Containers, 'is_service_reusable', return_value=reusable)
story.app.app_id = 'my_app'
story.app.version = 'v2'
ret = Containers.get_container_name(story.app, story.name, line, name)
if reusable:
assert ret == f'alpine-{Containers.hash_service_name(story.app, name)}'
else:
h = Containers.hash_service_name_and_story_line(story.app, story.name,
line, name)
assert ret == f'alpine-{h}'
@mark.asyncio
async def test_exec():
with pytest.raises(K8sError):
await Containers.exec(None, None, None, None, None)
@mark.asyncio
async def test_container_get_hostname(patch, story, line):
story.app.app_id = 'my_app'
patch.object(Containers, 'get_container_name', return_value='foo')
ret = await Containers.get_hostname(story, line, 'foo')
assert ret == 'foo.my_app.svc.cluster.local'
@mark.asyncio
async def test_clean_app(patch, async_mock):
patch.object(Kubernetes, 'clean_namespace', new=async_mock())
app = MagicMock()
await Containers.clean_app(app)
Kubernetes.clean_namespace.mock.assert_called_with(app)
@mark.asyncio
async def test_remove_volume(patch, story, line, async_mock):
patch.object(Kubernetes, 'remove_volume', new=async_mock())
await Containers.remove_volume(story.app, 'foo')
Kubernetes.remove_volume.mock.assert_called_with(story.app, 'foo')
@mark.asyncio
async def test_prepare_for_deployment(patch, async_mock):
patch.object(Kubernetes, 'clean_namespace', new=async_mock())
story = MagicMock()
await Containers.prepare_for_deployment(story)
Kubernetes.clean_namespace.mock.assert_called_with(story.app)
def test_format_command(logger, app, echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = echo_service
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo', '{"msg":"foo"}'] == cmd
@mark.parametrize('reusable', [True, False])
def test_hash_volume_name(patch, story, line, reusable):
line['ln'] = '1'
patch.object(Containers, 'is_service_reusable', return_value=reusable)
name = 'my_volume'
service = 'foo'
key = name + '-' + service
if not reusable:
key = f'{key}-{line["ln"]}'
expected = f'myvolume-' + hashlib.sha1(key.encode('utf-8')).hexdigest()
assert Containers.hash_volume_name(story.app, line, service, name) == \
expected
def test_hash_ingress_name():
e = Expose(service='service',
service_expose_name='expose_name',
http_path='expose_path')
ret = Containers.hash_ingress_name(e)
assert ret == 'exposename-0cf994f170f9d213bb814f74baca87ea149f7536'
@mark.asyncio
async def test_expose_service(app, patch, async_mock):
container_name = 'container_name'
patch.object(Containers, 'get_container_name',
return_value=container_name)
patch.object(Containers, 'create_and_start', new=async_mock())
patch.object(Kubernetes, 'create_ingress', new=async_mock())
e = Expose(service='service',
service_expose_name='expose_name',
http_path='expose_path')
ingress_name = Containers.hash_ingress_name(e)
hostname = f'{app.app_dns}--{Containers.get_simple_name(e.service)}'
await Containers.expose_service(app, e)
Containers.create_and_start.mock.assert_called_with(app, None, e.service,
container_name)
Kubernetes.create_ingress.mock.assert_called_with(ingress_name, app, e,
container_name,
hostname=hostname)
def test_service_name_and_story_line(patch, story):
patch.object(hashlib, 'sha1')
story.name = 'story_name'
story.app.version = 'v29'
ret = Containers.hash_service_name_and_story_line(
story.app, story.name, {'ln': '1'}, 'alpine')
hashlib.sha1.assert_called_with(f'alpine-v29-{story.name}-1'
.encode('utf-8'))
assert ret == hashlib.sha1().hexdigest()
def test_service_name(patch, story):
story.app.version = 'v2'
patch.object(hashlib, 'sha1')
ret = Containers.hash_service_name(story.app, 'alpine')
hashlib.sha1.assert_called_with(f'alpine-v2'.encode('utf-8'))
assert ret == hashlib.sha1().hexdigest()
@mark.asyncio
async def test_create_and_start_no_action(story):
story.app.services = {'alpine': {'configuration': {}}}
with pytest.raises(ActionNotFound):
await Containers.create_and_start(story.app, {'command': 'foo'},
'alpine', 'alpine')
@mark.parametrize('run_command', [None, ['/bin/bash', 'sleep', '10000']])
@mark.parametrize('with_volumes', [True, False])
@mark.parametrize('missing_required_var', [False, True])
@mark.asyncio
async def test_start(patch, story, async_mock,
missing_required_var,
run_command, with_volumes):
line = {
LineConstants.service: 'alpine',
LineConstants.command: 'echo',
'ln': '1'
}
patch.object(Kubernetes, 'create_pod', new=async_mock())
story.app.services = {
'alpine': {
ServiceConstants.config: {
'actions': {
'echo': {
}
},
'volumes': {
'db': {
'persist': True,
'target': '/db'
},
'tmp': {
'persist': False,
'target': '/tmp'
}
},
'environment': {
'param_1': {
'required': True
},
'alpine_only': {}
}
}
}
}
if not with_volumes:
del story.app.services['alpine'][ServiceConstants.config]['volumes']
if run_command is not None:
story.app.services['alpine'][ServiceConstants.config]['actions'][
'echo'] = {'run': {'command': run_command}}
story.app.environment = {
'alpine': {
'alpine_only': True,
'param_1': 'hello_world'
},
'global': 'yes'
}
if missing_required_var:
story.app.environment['alpine']['param_1'] = None
patch.object(Containers, 'get_container_name',
return_value='asyncy-alpine')
expected_volumes = []
if with_volumes:
hash_db = Containers.hash_volume_name(story.app, line, 'alpine', 'db')
hash_tmp = Containers.hash_volume_name(story.app, line, 'alpine',
'tmp')
expected_volumes = [
Volume(persist=True, name=hash_db, mount_path='/db'),
Volume(persist=False, name=hash_tmp, mount_path='/tmp'),
]
if missing_required_var:
with pytest.raises(EnvironmentVariableNotFound):
await Containers.start(story, line)
return
else:
await Containers.start(story, line)
Kubernetes.create_pod.mock.assert_called_with(
app=story.app, service='alpine',
image='alpine', container_name='asyncy-alpine',
start_command=run_command or ['tail', '-f', '/dev/null'],
shutdown_command=None,
env={'alpine_only': True, 'param_1': 'hello_world'},
volumes=expected_volumes)
@mark.asyncio
async def test_init(story, patch, async_mock):
patch.object(Kubernetes, 'create_namespace', new=async_mock())
await Containers.init(story.app)
Kubernetes.create_namespace.mock.assert_called_with(story.app)
def test_format_command_no_format(logger, app, echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = echo_service
config = app.services['alpine'][ServiceConstants.config]
config['actions']['echo']['format'] = None
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo', '{"msg":"foo"}'] == cmd
def test_format_command_no_spec(logger, app, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = {}
with pytest.raises(ContainerSpecNotRegisteredError):
Containers.format_command(story, echo_line, 'alpine', 'echo')
def test_format_command_no_args(logger, app, echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = echo_service
echo_service['alpine'][ServiceConstants.config]['actions']['echo'][
'arguments'] = None
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo'] == cmd
def test_format_command_with_format(patch, logger, app,
echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
patch.object(story, 'argument_by_name', return_value='asyncy')
app.services = echo_service
config = app.services['alpine'][ServiceConstants.config]
config['actions']['echo']['format'] = 'echo {msg}'
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo', 'asyncy'] == cmd
| 33.136223
| 79
| 0.624872
|
import hashlib
from unittest.mock import MagicMock
from asyncy.AppConfig import Expose
from asyncy.Containers import Containers
from asyncy.Exceptions import ActionNotFound, ContainerSpecNotRegisteredError,\
EnvironmentVariableNotFound, K8sError
from asyncy.Kubernetes import Kubernetes
from asyncy.constants.LineConstants import LineConstants
from asyncy.constants.ServiceConstants import ServiceConstants
from asyncy.entities.Volume import Volume
from asyncy.processing import Story
import pytest
from pytest import fixture, mark
@fixture
def line():
return MagicMock()
def test_is_service_reusable(story):
story.app.services = {
'alpine': {
'configuration': {
'actions': {
'echo': {
'run': 'foo'
}
}
}
}
}
line = {
LineConstants.service: 'alpine',
LineConstants.command: 'echo'
}
assert Containers.is_service_reusable(story.app, line) is False
story.app.services['alpine']['configuration']['actions']['echo'][
'run'] = None
assert Containers.is_service_reusable(story.app, line) is True
@mark.parametrize('reusable', [False, True])
@mark.parametrize('name', ['alpine', 'a!lpine', 'ALPINE', '__aLpInE'])
def test_get_container_name(patch, story, line, reusable, name):
patch.object(Containers, 'is_service_reusable', return_value=reusable)
story.app.app_id = 'my_app'
story.app.version = 'v2'
ret = Containers.get_container_name(story.app, story.name, line, name)
if reusable:
assert ret == f'alpine-{Containers.hash_service_name(story.app, name)}'
else:
h = Containers.hash_service_name_and_story_line(story.app, story.name,
line, name)
assert ret == f'alpine-{h}'
@mark.asyncio
async def test_exec():
with pytest.raises(K8sError):
await Containers.exec(None, None, None, None, None)
@mark.asyncio
async def test_container_get_hostname(patch, story, line):
story.app.app_id = 'my_app'
patch.object(Containers, 'get_container_name', return_value='foo')
ret = await Containers.get_hostname(story, line, 'foo')
assert ret == 'foo.my_app.svc.cluster.local'
@mark.asyncio
async def test_clean_app(patch, async_mock):
patch.object(Kubernetes, 'clean_namespace', new=async_mock())
app = MagicMock()
await Containers.clean_app(app)
Kubernetes.clean_namespace.mock.assert_called_with(app)
@mark.asyncio
async def test_remove_volume(patch, story, line, async_mock):
patch.object(Kubernetes, 'remove_volume', new=async_mock())
await Containers.remove_volume(story.app, 'foo')
Kubernetes.remove_volume.mock.assert_called_with(story.app, 'foo')
@mark.asyncio
async def test_prepare_for_deployment(patch, async_mock):
patch.object(Kubernetes, 'clean_namespace', new=async_mock())
story = MagicMock()
await Containers.prepare_for_deployment(story)
Kubernetes.clean_namespace.mock.assert_called_with(story.app)
def test_format_command(logger, app, echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = echo_service
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo', '{"msg":"foo"}'] == cmd
@mark.parametrize('reusable', [True, False])
def test_hash_volume_name(patch, story, line, reusable):
line['ln'] = '1'
patch.object(Containers, 'is_service_reusable', return_value=reusable)
name = 'my_volume'
service = 'foo'
key = name + '-' + service
if not reusable:
key = f'{key}-{line["ln"]}'
expected = f'myvolume-' + hashlib.sha1(key.encode('utf-8')).hexdigest()
assert Containers.hash_volume_name(story.app, line, service, name) == \
expected
def test_hash_ingress_name():
e = Expose(service='service',
service_expose_name='expose_name',
http_path='expose_path')
ret = Containers.hash_ingress_name(e)
assert ret == 'exposename-0cf994f170f9d213bb814f74baca87ea149f7536'
@mark.asyncio
async def test_expose_service(app, patch, async_mock):
container_name = 'container_name'
patch.object(Containers, 'get_container_name',
return_value=container_name)
patch.object(Containers, 'create_and_start', new=async_mock())
patch.object(Kubernetes, 'create_ingress', new=async_mock())
e = Expose(service='service',
service_expose_name='expose_name',
http_path='expose_path')
ingress_name = Containers.hash_ingress_name(e)
hostname = f'{app.app_dns}--{Containers.get_simple_name(e.service)}'
await Containers.expose_service(app, e)
Containers.create_and_start.mock.assert_called_with(app, None, e.service,
container_name)
Kubernetes.create_ingress.mock.assert_called_with(ingress_name, app, e,
container_name,
hostname=hostname)
def test_service_name_and_story_line(patch, story):
patch.object(hashlib, 'sha1')
story.name = 'story_name'
story.app.version = 'v29'
ret = Containers.hash_service_name_and_story_line(
story.app, story.name, {'ln': '1'}, 'alpine')
hashlib.sha1.assert_called_with(f'alpine-v29-{story.name}-1'
.encode('utf-8'))
assert ret == hashlib.sha1().hexdigest()
def test_service_name(patch, story):
story.app.version = 'v2'
patch.object(hashlib, 'sha1')
ret = Containers.hash_service_name(story.app, 'alpine')
hashlib.sha1.assert_called_with(f'alpine-v2'.encode('utf-8'))
assert ret == hashlib.sha1().hexdigest()
@mark.asyncio
async def test_create_and_start_no_action(story):
story.app.services = {'alpine': {'configuration': {}}}
with pytest.raises(ActionNotFound):
await Containers.create_and_start(story.app, {'command': 'foo'},
'alpine', 'alpine')
@mark.parametrize('run_command', [None, ['/bin/bash', 'sleep', '10000']])
@mark.parametrize('with_volumes', [True, False])
@mark.parametrize('missing_required_var', [False, True])
@mark.asyncio
async def test_start(patch, story, async_mock,
missing_required_var,
run_command, with_volumes):
line = {
LineConstants.service: 'alpine',
LineConstants.command: 'echo',
'ln': '1'
}
patch.object(Kubernetes, 'create_pod', new=async_mock())
story.app.services = {
'alpine': {
ServiceConstants.config: {
'actions': {
'echo': {
}
},
'volumes': {
'db': {
'persist': True,
'target': '/db'
},
'tmp': {
'persist': False,
'target': '/tmp'
}
},
'environment': {
'param_1': {
'required': True
},
'alpine_only': {}
}
}
}
}
if not with_volumes:
del story.app.services['alpine'][ServiceConstants.config]['volumes']
if run_command is not None:
story.app.services['alpine'][ServiceConstants.config]['actions'][
'echo'] = {'run': {'command': run_command}}
story.app.environment = {
'alpine': {
'alpine_only': True,
'param_1': 'hello_world'
},
'global': 'yes'
}
if missing_required_var:
story.app.environment['alpine']['param_1'] = None
patch.object(Containers, 'get_container_name',
return_value='asyncy-alpine')
expected_volumes = []
if with_volumes:
hash_db = Containers.hash_volume_name(story.app, line, 'alpine', 'db')
hash_tmp = Containers.hash_volume_name(story.app, line, 'alpine',
'tmp')
expected_volumes = [
Volume(persist=True, name=hash_db, mount_path='/db'),
Volume(persist=False, name=hash_tmp, mount_path='/tmp'),
]
if missing_required_var:
with pytest.raises(EnvironmentVariableNotFound):
await Containers.start(story, line)
return
else:
await Containers.start(story, line)
Kubernetes.create_pod.mock.assert_called_with(
app=story.app, service='alpine',
image='alpine', container_name='asyncy-alpine',
start_command=run_command or ['tail', '-f', '/dev/null'],
shutdown_command=None,
env={'alpine_only': True, 'param_1': 'hello_world'},
volumes=expected_volumes)
@mark.asyncio
async def test_init(story, patch, async_mock):
patch.object(Kubernetes, 'create_namespace', new=async_mock())
await Containers.init(story.app)
Kubernetes.create_namespace.mock.assert_called_with(story.app)
def test_format_command_no_format(logger, app, echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = echo_service
config = app.services['alpine'][ServiceConstants.config]
config['actions']['echo']['format'] = None
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo', '{"msg":"foo"}'] == cmd
def test_format_command_no_spec(logger, app, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = {}
with pytest.raises(ContainerSpecNotRegisteredError):
Containers.format_command(story, echo_line, 'alpine', 'echo')
def test_format_command_no_args(logger, app, echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
app.services = echo_service
echo_service['alpine'][ServiceConstants.config]['actions']['echo'][
'arguments'] = None
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo'] == cmd
def test_format_command_with_format(patch, logger, app,
echo_service, echo_line):
story = Story.story(app, logger, 'echo.story')
patch.object(story, 'argument_by_name', return_value='asyncy')
app.services = echo_service
config = app.services['alpine'][ServiceConstants.config]
config['actions']['echo']['format'] = 'echo {msg}'
cmd = Containers.format_command(story, echo_line, 'alpine', 'echo')
assert ['echo', 'asyncy'] == cmd
| true
| true
|
f709b2e65336b023bfdac6a056b5c4f86ebed150
| 1,327
|
py
|
Python
|
yepes/fields/postal_code.py
|
samuelmaudo/yepes
|
1ef9a42d4eaa70d9b3e6e7fa519396c1e1174fcb
|
[
"BSD-3-Clause"
] | null | null | null |
yepes/fields/postal_code.py
|
samuelmaudo/yepes
|
1ef9a42d4eaa70d9b3e6e7fa519396c1e1174fcb
|
[
"BSD-3-Clause"
] | null | null | null |
yepes/fields/postal_code.py
|
samuelmaudo/yepes
|
1ef9a42d4eaa70d9b3e6e7fa519396c1e1174fcb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from yepes import forms
from yepes.fields.char import CharField
from yepes.validators import PostalCodeValidator
from yepes.utils.deconstruct import clean_keywords
class PostalCodeField(CharField):
default_validators = [PostalCodeValidator()]
description = _('Generic postal code')
def __init__(self, *args, **kwargs):
kwargs['force_lower'] = False
kwargs['force_upper'] = True
kwargs.setdefault('max_length', 15)
kwargs['normalize_spaces'] = True
kwargs['trim_spaces'] = False
super(PostalCodeField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(PostalCodeField, self).deconstruct()
path = path.replace('yepes.fields.postal_code', 'yepes.fields')
clean_keywords(self, kwargs, variables={
'max_length': 15,
}, constants=[
'force_lower',
'force_upper',
'normalize_spaces',
'trim_spaces',
])
return name, path, args, kwargs
def formfield(self, **kwargs):
kwargs.setdefault('form_class', forms.PostalCodeField)
return super(PostalCodeField, self).formfield(**kwargs)
| 30.860465
| 77
| 0.660889
|
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from yepes import forms
from yepes.fields.char import CharField
from yepes.validators import PostalCodeValidator
from yepes.utils.deconstruct import clean_keywords
class PostalCodeField(CharField):
default_validators = [PostalCodeValidator()]
description = _('Generic postal code')
def __init__(self, *args, **kwargs):
kwargs['force_lower'] = False
kwargs['force_upper'] = True
kwargs.setdefault('max_length', 15)
kwargs['normalize_spaces'] = True
kwargs['trim_spaces'] = False
super(PostalCodeField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(PostalCodeField, self).deconstruct()
path = path.replace('yepes.fields.postal_code', 'yepes.fields')
clean_keywords(self, kwargs, variables={
'max_length': 15,
}, constants=[
'force_lower',
'force_upper',
'normalize_spaces',
'trim_spaces',
])
return name, path, args, kwargs
def formfield(self, **kwargs):
kwargs.setdefault('form_class', forms.PostalCodeField)
return super(PostalCodeField, self).formfield(**kwargs)
| true
| true
|
f709b3b126143357e20c2bcb075dff3ce91691c4
| 4,078
|
py
|
Python
|
pymodel/TestSuite.py
|
Python3pkg/PyModel
|
e0d404e122202c25c85dcebedcbd567837068b65
|
[
"BSD-3-Clause"
] | 3
|
2017-06-09T22:45:16.000Z
|
2021-02-13T23:18:44.000Z
|
pymodel/TestSuite.py
|
Python3pkg/PyModel
|
e0d404e122202c25c85dcebedcbd567837068b65
|
[
"BSD-3-Clause"
] | null | null | null |
pymodel/TestSuite.py
|
Python3pkg/PyModel
|
e0d404e122202c25c85dcebedcbd567837068b65
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Interface to a test suite module (one or more runs) used by ProductModelProgram
"""
from operator import concat
from .model import Model
from functools import reduce
class TestSuite(Model):
def __init__(self, module, exclude, include):
Model.__init__(self, module, exclude, include)
def post_init(self):
"""
Now that all modules have been imported and executed their __init__
do a postprocessing pass
to process metadata that might be affected by configuration modules
"""
# Do all of this work here rather than in __init__
# so it can include the effects of any pymodel config modules
# recognize PEP-8 style names (all lowercase) if present
if hasattr(self.module, 'testsuite'):
self.module.testSuite = self.module.testsuite
if hasattr(self.module, 'test_suite'):
self.module.testSuite = self.module.test_suite
if hasattr(self.module, 'actions'):
self.actions = list(self.module.actions) # copy, actions from cmd line
else:
self.actions = list(self.actions_in_suite()) # default, copy
Model.post_init(self) # uses self.actions
# Revise the test suite to account for excluded, included actions
self.test_suite = list()
for run in self.module.testSuite:
new_run = list() # list not tuple, must mutable
for action in run:
if action[0] in self.actions:
new_run.append(action)
else:
break # truncate the run before the excluded action
self.test_suite.append(new_run)
# prepare for first run
self.irun = 0 # index of current test run in test suite
self.pc = 0 # program counter
def actions_in_suite(self):
# there might be two or three items in action_tuple
return tuple(set(reduce(concat,[[action_tuple[0] for action_tuple in run]
for run in self.module.testSuite])))
def Accepting(self):
# In a test suite, the only accepting states are at ends of runs
# NB Here Accepting() is called *after* DoAction() that advances self.pc
length = len(self.test_suite[self.irun]) # number of tuples in run
return (self.pc == length)
def make_properties(self, accepting):
return { 'accepting': accepting, 'statefilter': True,
'stateinvariant': True }
def Properties(self):
return self.make_properties(self.Accepting())
def Reset(self): # needed by stepper
self.pc = 0
if self.irun < len(self.test_suite) - 1:
self.irun += 1
else:
raise StopIteration # no more runs in test suite
def ActionEnabled(self, a, args):
"""
action a with args is enabled in the current state
"""
step = self.test_suite[self.irun][self.pc]
action, arguments = step[0:2] # works whether or not step has result
return (a == action and args == arguments)
def EnabledTransitions(self, cleanup=False):
"""
Return list of all tuples for enabled actions. Here, there is just one.
(action, args, next state, next state is accepting state)
Next state is a list of two elements:the run number and step within the run
In a test suite, there is always just *one* next action, or *none*
Ignore cleanup, test suite should always end in accepting state.
"""
run = self.test_suite[self.irun]
length = len(run)
if self.pc < length:
step = run[self.pc]
action, args = step[0:2]
result = step[2] if len(step) > 2 else None # result is optional
next = self.pc + 1
accepting = (next == length)
return([(action, args, result, (self.irun,next),
self.make_properties(accepting))])
else:
return list() # test run finished, nothing enabled,
def DoAction(self, a, args):
step = self.test_suite[self.irun][self.pc]
result = step[2] if len(step) > 2 else None # result is optional
self.pc += 1
return result
def Current(self):
return (self.irun, self.pc)
def Restore(self, state):
"""
Restore state
"""
self.irun, self.pc = state
# GetNext not needed
| 34.268908
| 79
| 0.660128
|
from operator import concat
from .model import Model
from functools import reduce
class TestSuite(Model):
def __init__(self, module, exclude, include):
Model.__init__(self, module, exclude, include)
def post_init(self):
if hasattr(self.module, 'testsuite'):
self.module.testSuite = self.module.testsuite
if hasattr(self.module, 'test_suite'):
self.module.testSuite = self.module.test_suite
if hasattr(self.module, 'actions'):
self.actions = list(self.module.actions)
else:
self.actions = list(self.actions_in_suite())
Model.post_init(self)
self.test_suite = list()
for run in self.module.testSuite:
new_run = list()
for action in run:
if action[0] in self.actions:
new_run.append(action)
else:
break
self.test_suite.append(new_run)
self.irun = 0
self.pc = 0
def actions_in_suite(self):
return tuple(set(reduce(concat,[[action_tuple[0] for action_tuple in run]
for run in self.module.testSuite])))
def Accepting(self):
length = len(self.test_suite[self.irun])
return (self.pc == length)
def make_properties(self, accepting):
return { 'accepting': accepting, 'statefilter': True,
'stateinvariant': True }
def Properties(self):
return self.make_properties(self.Accepting())
def Reset(self):
self.pc = 0
if self.irun < len(self.test_suite) - 1:
self.irun += 1
else:
raise StopIteration
def ActionEnabled(self, a, args):
step = self.test_suite[self.irun][self.pc]
action, arguments = step[0:2]
return (a == action and args == arguments)
def EnabledTransitions(self, cleanup=False):
run = self.test_suite[self.irun]
length = len(run)
if self.pc < length:
step = run[self.pc]
action, args = step[0:2]
result = step[2] if len(step) > 2 else None
next = self.pc + 1
accepting = (next == length)
return([(action, args, result, (self.irun,next),
self.make_properties(accepting))])
else:
return list()
def DoAction(self, a, args):
step = self.test_suite[self.irun][self.pc]
result = step[2] if len(step) > 2 else None
self.pc += 1
return result
def Current(self):
return (self.irun, self.pc)
def Restore(self, state):
self.irun, self.pc = state
| true
| true
|
f709b422a4a86fca2bfa9d6d75f29ff165ea07aa
| 2,764
|
py
|
Python
|
eth/vm/forks/byzantium/__init__.py
|
dylanjw/py-evm
|
c78020fe0cf6b4d98b93264872dfd10c59757e06
|
[
"MIT"
] | 5
|
2018-09-28T20:01:42.000Z
|
2022-02-22T19:54:46.000Z
|
env/lib/python3.7/site-packages/eth/vm/forks/byzantium/__init__.py
|
kpeluso/vyper-dynamic-array
|
fb18070650c6fafeca9d3ab99d667147a4b3acc4
|
[
"MIT"
] | null | null | null |
env/lib/python3.7/site-packages/eth/vm/forks/byzantium/__init__.py
|
kpeluso/vyper-dynamic-array
|
fb18070650c6fafeca9d3ab99d667147a4b3acc4
|
[
"MIT"
] | 1
|
2019-02-27T21:29:16.000Z
|
2019-02-27T21:29:16.000Z
|
from typing import ( # noqa: F401
Type,
)
from cytoolz import (
curry,
)
from eth_utils import (
encode_hex,
ValidationError,
)
from eth.constants import (
MAX_UNCLE_DEPTH,
)
from eth.rlp.blocks import BaseBlock # noqa: F401
from eth.rlp.receipts import Receipt
from eth.validation import (
validate_lte,
)
from eth.vm.forks.spurious_dragon import SpuriousDragonVM
from eth.vm.forks.frontier import make_frontier_receipt
from eth.vm.state import BaseState # noqa: F401
from .blocks import ByzantiumBlock
from .constants import (
EIP649_BLOCK_REWARD,
EIP658_TRANSACTION_STATUS_CODE_FAILURE,
EIP658_TRANSACTION_STATUS_CODE_SUCCESS,
)
from .headers import (
create_byzantium_header_from_parent,
configure_byzantium_header,
compute_byzantium_difficulty,
)
from .state import ByzantiumState
def make_byzantium_receipt(base_header, transaction, computation, state):
frontier_receipt = make_frontier_receipt(base_header, transaction, computation, state)
if computation.is_error:
status_code = EIP658_TRANSACTION_STATUS_CODE_FAILURE
else:
status_code = EIP658_TRANSACTION_STATUS_CODE_SUCCESS
return frontier_receipt.copy(state_root=status_code)
@curry
def get_uncle_reward(block_reward, block_number, uncle):
block_number_delta = block_number - uncle.block_number
validate_lte(block_number_delta, MAX_UNCLE_DEPTH)
return (8 - block_number_delta) * block_reward // 8
EIP658_STATUS_CODES = {
EIP658_TRANSACTION_STATUS_CODE_SUCCESS,
EIP658_TRANSACTION_STATUS_CODE_FAILURE,
}
class ByzantiumVM(SpuriousDragonVM):
# fork name
fork = 'byzantium'
# classes
block_class = ByzantiumBlock # type: Type[BaseBlock]
_state_class = ByzantiumState # type: Type[BaseState]
# Methods
create_header_from_parent = staticmethod(create_byzantium_header_from_parent)
compute_difficulty = staticmethod(compute_byzantium_difficulty)
configure_header = configure_byzantium_header
make_receipt = staticmethod(make_byzantium_receipt)
get_uncle_reward = staticmethod(get_uncle_reward(EIP649_BLOCK_REWARD))
@classmethod
def validate_receipt(cls, receipt: Receipt) -> None:
super().validate_receipt(receipt)
if receipt.state_root not in EIP658_STATUS_CODES:
raise ValidationError(
"The receipt's `state_root` must be one of [{0}, {1}]. Got: "
"{2}".format(
encode_hex(EIP658_TRANSACTION_STATUS_CODE_SUCCESS),
encode_hex(EIP658_TRANSACTION_STATUS_CODE_FAILURE),
encode_hex(receipt.state_root),
)
)
@staticmethod
def get_block_reward():
return EIP649_BLOCK_REWARD
| 29.094737
| 90
| 0.736614
|
from typing import (
Type,
)
from cytoolz import (
curry,
)
from eth_utils import (
encode_hex,
ValidationError,
)
from eth.constants import (
MAX_UNCLE_DEPTH,
)
from eth.rlp.blocks import BaseBlock
from eth.rlp.receipts import Receipt
from eth.validation import (
validate_lte,
)
from eth.vm.forks.spurious_dragon import SpuriousDragonVM
from eth.vm.forks.frontier import make_frontier_receipt
from eth.vm.state import BaseState
from .blocks import ByzantiumBlock
from .constants import (
EIP649_BLOCK_REWARD,
EIP658_TRANSACTION_STATUS_CODE_FAILURE,
EIP658_TRANSACTION_STATUS_CODE_SUCCESS,
)
from .headers import (
create_byzantium_header_from_parent,
configure_byzantium_header,
compute_byzantium_difficulty,
)
from .state import ByzantiumState
def make_byzantium_receipt(base_header, transaction, computation, state):
frontier_receipt = make_frontier_receipt(base_header, transaction, computation, state)
if computation.is_error:
status_code = EIP658_TRANSACTION_STATUS_CODE_FAILURE
else:
status_code = EIP658_TRANSACTION_STATUS_CODE_SUCCESS
return frontier_receipt.copy(state_root=status_code)
@curry
def get_uncle_reward(block_reward, block_number, uncle):
block_number_delta = block_number - uncle.block_number
validate_lte(block_number_delta, MAX_UNCLE_DEPTH)
return (8 - block_number_delta) * block_reward // 8
EIP658_STATUS_CODES = {
EIP658_TRANSACTION_STATUS_CODE_SUCCESS,
EIP658_TRANSACTION_STATUS_CODE_FAILURE,
}
class ByzantiumVM(SpuriousDragonVM):
fork = 'byzantium'
block_class = ByzantiumBlock
_state_class = ByzantiumState
create_header_from_parent = staticmethod(create_byzantium_header_from_parent)
compute_difficulty = staticmethod(compute_byzantium_difficulty)
configure_header = configure_byzantium_header
make_receipt = staticmethod(make_byzantium_receipt)
get_uncle_reward = staticmethod(get_uncle_reward(EIP649_BLOCK_REWARD))
@classmethod
def validate_receipt(cls, receipt: Receipt) -> None:
super().validate_receipt(receipt)
if receipt.state_root not in EIP658_STATUS_CODES:
raise ValidationError(
"The receipt's `state_root` must be one of [{0}, {1}]. Got: "
"{2}".format(
encode_hex(EIP658_TRANSACTION_STATUS_CODE_SUCCESS),
encode_hex(EIP658_TRANSACTION_STATUS_CODE_FAILURE),
encode_hex(receipt.state_root),
)
)
@staticmethod
def get_block_reward():
return EIP649_BLOCK_REWARD
| true
| true
|
f709b5200ed53cfbc4b378054a4d8839207369cc
| 740
|
py
|
Python
|
setup.py
|
29next/next-theme-kit
|
8abe7234c0fcf8af6004385ee28d9fb29bcaef9c
|
[
"MIT"
] | 7
|
2021-05-26T11:57:20.000Z
|
2021-06-13T09:57:46.000Z
|
setup.py
|
29next/next-theme-kit
|
8abe7234c0fcf8af6004385ee28d9fb29bcaef9c
|
[
"MIT"
] | 1
|
2021-05-25T00:11:16.000Z
|
2021-05-25T02:18:56.000Z
|
setup.py
|
29next/theme-kit
|
8abe7234c0fcf8af6004385ee28d9fb29bcaef9c
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
__version__ = '1.0.1'
tests_require = [
"flake8==3.9.2",
"nose==1.3.7"
]
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='next-theme-kit',
author="29next",
author_email="dev@29next.com",
url='https://github.com/29next/theme-kit',
long_description=long_description,
long_description_content_type='text/markdown',
version=__version__,
install_requires=[
"PyYAML>=5.4",
"requests>=2.25",
"watchgod>=0.7",
"libsass>=0.21.0"
],
entry_points={
'console_scripts': [
'ntk = ntk.ntk:main',
],
},
packages=find_packages(),
python_requires='>=3.6'
)
| 20.555556
| 50
| 0.590541
|
from setuptools import find_packages, setup
__version__ = '1.0.1'
tests_require = [
"flake8==3.9.2",
"nose==1.3.7"
]
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='next-theme-kit',
author="29next",
author_email="dev@29next.com",
url='https://github.com/29next/theme-kit',
long_description=long_description,
long_description_content_type='text/markdown',
version=__version__,
install_requires=[
"PyYAML>=5.4",
"requests>=2.25",
"watchgod>=0.7",
"libsass>=0.21.0"
],
entry_points={
'console_scripts': [
'ntk = ntk.ntk:main',
],
},
packages=find_packages(),
python_requires='>=3.6'
)
| true
| true
|
f709b5d4d2e798dbd11856e6f3b5f1768f57e8b8
| 6,050
|
py
|
Python
|
tests/test_parser.py
|
dbrattli/Expression
|
1cf04ccd5d5e277baea7113c3b420a85f22712b5
|
[
"MIT"
] | 22
|
2020-11-03T03:17:12.000Z
|
2020-11-28T07:02:38.000Z
|
tests/test_parser.py
|
dbrattli/Expression
|
1cf04ccd5d5e277baea7113c3b420a85f22712b5
|
[
"MIT"
] | null | null | null |
tests/test_parser.py
|
dbrattli/Expression
|
1cf04ccd5d5e277baea7113c3b420a85f22712b5
|
[
"MIT"
] | 1
|
2020-11-08T13:24:32.000Z
|
2020-11-08T13:24:32.000Z
|
from __future__ import annotations
import string
from dataclasses import dataclass
from typing import Any, Tuple
from expression import Error, Nothing, Ok, Option, Some, TaggedUnion, match, pipe, tag
from expression.collections import Block
from expression.extra.parser import (
Parser,
and_then,
any_of,
choice,
many,
opt,
pchar,
pfloat,
pint,
pstring,
)
def test_parse_pchar():
input = "ABC"
parseA: Parser[str] = pchar("A")
result = parseA(input)
assert result.is_ok()
with match(result) as case:
for a in case(Ok[str, str]):
assert a == "A"
if case._:
assert False
def test_parse_pchar_fluent():
input = "ABC"
parseA: Parser[str] = Parser.pchar("A")
result = parseA(input)
assert result.is_ok()
with match(result) as case:
for a in case(Ok[str, str]):
assert a == "A"
if case._:
assert False
def test_parse_a_then_b():
input = "ABC"
parse_a: Parser[str] = pchar("A")
parse_b: Parser[str] = pchar("B")
parseAB = pipe(
parse_a,
and_then(parse_b),
)
result = parseAB(input)
assert result.is_ok()
with match(result) as case:
for (a, b) in case(Ok[Tuple[str, str], str]):
assert (a, b) == ("A", "B")
if case._:
assert False
def test_parse_a_then_b_fluent():
input = "ABC"
parseAB = pchar("A").and_then(pchar("B"))
result = parseAB(input)
assert result.is_ok()
with match(result) as case:
for (a, b) in case(Ok[Tuple[str, str], str]):
assert (a, b) == ("A", "B")
if case._:
assert False
def test_pstring():
parse_abc = pstring("ABC")
ret = parse_abc("ABCDE") # Success ("ABC", "DE")
assert ret.is_ok()
with match(ret) as case:
for success in case(Ok[str, str]):
assert success == "ABC"
if case._:
assert False
ret = parse_abc("A|CDE") # Failure "Expecting 'B'. Got '|'"
assert ret.is_error()
with match(ret) as case:
for error in case(Error[str, str]):
assert error == "Expecting 'B'. Got '|'"
if case._:
assert False
ret = parse_abc("AB|DE") # Failure "Expecting 'C'. Got '|'"
assert ret.is_error()
with match(ret) as case:
for error in case(Error[str, str]):
assert error == "Expecting 'C'. Got '|'"
if case._:
assert False
def test_int():
ret = pint("123C")
with match(ret) as case:
for success in case(Ok[int, str]):
assert success == 123
if case._:
assert False
def test_int_negative():
ret = pint("-123C")
with match(ret) as case:
for success in case(Ok[int, str]):
assert success == -123
if case._:
assert False
def test_float():
ret = pfloat("123C")
with match(ret) as case:
for success in case(Ok[float, str]):
assert success == 123
if case._:
assert False
def test_float_with_decimal():
ret = pfloat("123.45C")
with match(ret) as case:
for success in case(Ok[float, str]):
assert success == 123.45
if case._:
assert False
def test_negative_float_with_decimal():
ret = pfloat("-123.45C")
with match(ret) as case:
for success in case(Ok[float, str]):
assert success == -123.45
if case._:
assert False
class ComparisonOperator(TaggedUnion):
EQ = tag()
NOT_EQ = tag()
LT = tag()
LT_E = tag()
GT = tag()
GT_E = tag()
IS = tag()
IS_NOT = tag()
IN = tag()
NOT_IN = tag()
@staticmethod
def eq() -> ComparisonOperator:
return ComparisonOperator(ComparisonOperator.EQ)
@staticmethod
def not_eq() -> ComparisonOperator:
return ComparisonOperator(ComparisonOperator.NOT_EQ)
@dataclass
class Compare:
left: Expression
comparators: Block[Expression]
ops: Block[ComparisonOperator]
class BoolOp(TaggedUnion):
AND = tag()
OR = tag()
@staticmethod
def and_() -> BoolOp:
return BoolOp(BoolOp.AND)
@staticmethod
def or_() -> BoolOp:
return BoolOp(BoolOp.OR)
class Expression(TaggedUnion):
CONSTANT = tag(Any)
NAME = tag(str)
BOOL_OP = tag(BoolOp)
COMPARE = tag(Compare)
@staticmethod
def name(name: str) -> Expression:
return Expression(Expression.NAME, name)
@staticmethod
def compare(compare: Compare) -> Expression:
return Expression(Expression.COMPARE, compare)
@staticmethod
def constant(value: Any) -> Expression:
return Expression(Expression.CONSTANT, value)
def pname() -> Parser[Expression]:
first = any_of(string.ascii_letters + "_")
rest = pipe(
any_of(string.ascii_letters + string.digits + "_"),
many,
opt,
)
def mapper(first: str, rest: Option[Block[str]]) -> str:
with match(rest) as case:
if case(Nothing):
return first
for letters in case(Some[Block[str]]):
return first + "".join(letters)
return case.default(first)
return first.and_then(rest).starmap(mapper).map(Expression.name)
def pexpr() -> Parser[Expression]:
parsers = [
pname(),
]
return pipe(
parsers,
Block[Parser[Expression]].of_seq,
choice,
)
def test_parse_name_expr():
name = pipe(
"test",
pexpr(),
)
assert name.is_ok()
with match(name) as case:
if case(Nothing):
assert False
for expr in case(Ok[Expression, str]):
with match(expr) as case:
for name in case(Expression.NAME):
assert name == "test"
break
else:
assert False
break
else:
assert False
| 22.242647
| 86
| 0.561322
|
from __future__ import annotations
import string
from dataclasses import dataclass
from typing import Any, Tuple
from expression import Error, Nothing, Ok, Option, Some, TaggedUnion, match, pipe, tag
from expression.collections import Block
from expression.extra.parser import (
Parser,
and_then,
any_of,
choice,
many,
opt,
pchar,
pfloat,
pint,
pstring,
)
def test_parse_pchar():
input = "ABC"
parseA: Parser[str] = pchar("A")
result = parseA(input)
assert result.is_ok()
with match(result) as case:
for a in case(Ok[str, str]):
assert a == "A"
if case._:
assert False
def test_parse_pchar_fluent():
input = "ABC"
parseA: Parser[str] = Parser.pchar("A")
result = parseA(input)
assert result.is_ok()
with match(result) as case:
for a in case(Ok[str, str]):
assert a == "A"
if case._:
assert False
def test_parse_a_then_b():
input = "ABC"
parse_a: Parser[str] = pchar("A")
parse_b: Parser[str] = pchar("B")
parseAB = pipe(
parse_a,
and_then(parse_b),
)
result = parseAB(input)
assert result.is_ok()
with match(result) as case:
for (a, b) in case(Ok[Tuple[str, str], str]):
assert (a, b) == ("A", "B")
if case._:
assert False
def test_parse_a_then_b_fluent():
input = "ABC"
parseAB = pchar("A").and_then(pchar("B"))
result = parseAB(input)
assert result.is_ok()
with match(result) as case:
for (a, b) in case(Ok[Tuple[str, str], str]):
assert (a, b) == ("A", "B")
if case._:
assert False
def test_pstring():
parse_abc = pstring("ABC")
ret = parse_abc("ABCDE")
assert ret.is_ok()
with match(ret) as case:
for success in case(Ok[str, str]):
assert success == "ABC"
if case._:
assert False
ret = parse_abc("A|CDE")
assert ret.is_error()
with match(ret) as case:
for error in case(Error[str, str]):
assert error == "Expecting 'B'. Got '|'"
if case._:
assert False
ret = parse_abc("AB|DE")
assert ret.is_error()
with match(ret) as case:
for error in case(Error[str, str]):
assert error == "Expecting 'C'. Got '|'"
if case._:
assert False
def test_int():
ret = pint("123C")
with match(ret) as case:
for success in case(Ok[int, str]):
assert success == 123
if case._:
assert False
def test_int_negative():
ret = pint("-123C")
with match(ret) as case:
for success in case(Ok[int, str]):
assert success == -123
if case._:
assert False
def test_float():
ret = pfloat("123C")
with match(ret) as case:
for success in case(Ok[float, str]):
assert success == 123
if case._:
assert False
def test_float_with_decimal():
ret = pfloat("123.45C")
with match(ret) as case:
for success in case(Ok[float, str]):
assert success == 123.45
if case._:
assert False
def test_negative_float_with_decimal():
ret = pfloat("-123.45C")
with match(ret) as case:
for success in case(Ok[float, str]):
assert success == -123.45
if case._:
assert False
class ComparisonOperator(TaggedUnion):
EQ = tag()
NOT_EQ = tag()
LT = tag()
LT_E = tag()
GT = tag()
GT_E = tag()
IS = tag()
IS_NOT = tag()
IN = tag()
NOT_IN = tag()
@staticmethod
def eq() -> ComparisonOperator:
return ComparisonOperator(ComparisonOperator.EQ)
@staticmethod
def not_eq() -> ComparisonOperator:
return ComparisonOperator(ComparisonOperator.NOT_EQ)
@dataclass
class Compare:
left: Expression
comparators: Block[Expression]
ops: Block[ComparisonOperator]
class BoolOp(TaggedUnion):
AND = tag()
OR = tag()
@staticmethod
def and_() -> BoolOp:
return BoolOp(BoolOp.AND)
@staticmethod
def or_() -> BoolOp:
return BoolOp(BoolOp.OR)
class Expression(TaggedUnion):
CONSTANT = tag(Any)
NAME = tag(str)
BOOL_OP = tag(BoolOp)
COMPARE = tag(Compare)
@staticmethod
def name(name: str) -> Expression:
return Expression(Expression.NAME, name)
@staticmethod
def compare(compare: Compare) -> Expression:
return Expression(Expression.COMPARE, compare)
@staticmethod
def constant(value: Any) -> Expression:
return Expression(Expression.CONSTANT, value)
def pname() -> Parser[Expression]:
first = any_of(string.ascii_letters + "_")
rest = pipe(
any_of(string.ascii_letters + string.digits + "_"),
many,
opt,
)
def mapper(first: str, rest: Option[Block[str]]) -> str:
with match(rest) as case:
if case(Nothing):
return first
for letters in case(Some[Block[str]]):
return first + "".join(letters)
return case.default(first)
return first.and_then(rest).starmap(mapper).map(Expression.name)
def pexpr() -> Parser[Expression]:
parsers = [
pname(),
]
return pipe(
parsers,
Block[Parser[Expression]].of_seq,
choice,
)
def test_parse_name_expr():
name = pipe(
"test",
pexpr(),
)
assert name.is_ok()
with match(name) as case:
if case(Nothing):
assert False
for expr in case(Ok[Expression, str]):
with match(expr) as case:
for name in case(Expression.NAME):
assert name == "test"
break
else:
assert False
break
else:
assert False
| true
| true
|
f709b65418dc777da8e49db95809cebc85771242
| 400
|
py
|
Python
|
2017-08-23/exemplo_servidor_django/meuprojeto/petshop/migrations/0003_animal_dono.py
|
dunossauro/bora_falar_de_python
|
7fe92d6257a2cad1c570255bc9be069f6c8e38d3
|
[
"Apache-2.0"
] | 6
|
2017-09-07T20:24:48.000Z
|
2018-09-12T16:16:32.000Z
|
2017-08-23/exemplo_servidor_django/meuprojeto/petshop/migrations/0003_animal_dono.py
|
dunossauro/bora_falar_de_python
|
7fe92d6257a2cad1c570255bc9be069f6c8e38d3
|
[
"Apache-2.0"
] | 1
|
2017-12-22T01:47:12.000Z
|
2017-12-24T13:59:13.000Z
|
2017-08-23/exemplo_servidor_django/meuprojeto/petshop/migrations/0003_animal_dono.py
|
dunossauro/bora_falar_de_python
|
7fe92d6257a2cad1c570255bc9be069f6c8e38d3
|
[
"Apache-2.0"
] | 6
|
2017-10-20T01:25:01.000Z
|
2018-09-11T22:54:01.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('petshop', '0002_dono'),
]
operations = [
migrations.AddField(
model_name='animal',
name='dono',
field=models.ForeignKey(to='petshop.Dono', default=1),
),
]
| 20
| 66
| 0.5875
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('petshop', '0002_dono'),
]
operations = [
migrations.AddField(
model_name='animal',
name='dono',
field=models.ForeignKey(to='petshop.Dono', default=1),
),
]
| true
| true
|
f709b6ad81d25a0c074deaa1308cf04158654f02
| 1,373
|
py
|
Python
|
tests/book/ch05/classify_name.py
|
TITC/pyhanlp
|
ad062f358805da5bf97f78d9f37f441c06ae4d19
|
[
"Apache-2.0"
] | null | null | null |
tests/book/ch05/classify_name.py
|
TITC/pyhanlp
|
ad062f358805da5bf97f78d9f37f441c06ae4d19
|
[
"Apache-2.0"
] | null | null | null |
tests/book/ch05/classify_name.py
|
TITC/pyhanlp
|
ad062f358805da5bf97f78d9f37f441c06ae4d19
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Author:hankcs
# Date: 2018-06-21 19:46
# 《自然语言处理入门》5.3 基于感知机的人名性别分类
# 配套书籍:http://nlp.hankcs.com/book.php
# 讨论答疑:https://bbs.hankcs.com/
import sys,os# environment, adjust the priority
sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
from pyhanlp import *
from tests.test_utility import ensure_data
PerceptronNameGenderClassifier = JClass('com.hankcs.hanlp.model.perceptron.PerceptronNameGenderClassifier')
cnname = ensure_data('cnname', 'http://file.hankcs.com/corpus/cnname.zip')
TRAINING_SET = os.path.join(cnname, 'train.csv')
TESTING_SET = os.path.join(cnname, 'test.csv')
MODEL = cnname + ".bin"
def run_classifier(averaged_perceptron):
print('=====%s=====' % ('平均感知机算法' if averaged_perceptron else '朴素感知机算法'))
classifier = PerceptronNameGenderClassifier()
print('训练集准确率:', classifier.train(TRAINING_SET, 10, averaged_perceptron))
model = classifier.getModel()
print('特征数量:', len(model.parameter))
# model.save(MODEL, model.featureMap.entrySet(), 0, True)
# classifier = PerceptronNameGenderClassifier(MODEL)
for name in "赵建军", "沈雁冰", "陆雪琪", "李冰冰":
print('%s=%s' % (name, classifier.predict(name)))
print('测试集准确率:', classifier.evaluate(TESTING_SET))
if __name__ == '__main__':
run_classifier(False)
run_classifier(True)
| 38.138889
| 112
| 0.718864
|
import sys,os
sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
from pyhanlp import *
from tests.test_utility import ensure_data
PerceptronNameGenderClassifier = JClass('com.hankcs.hanlp.model.perceptron.PerceptronNameGenderClassifier')
cnname = ensure_data('cnname', 'http://file.hankcs.com/corpus/cnname.zip')
TRAINING_SET = os.path.join(cnname, 'train.csv')
TESTING_SET = os.path.join(cnname, 'test.csv')
MODEL = cnname + ".bin"
def run_classifier(averaged_perceptron):
print('=====%s=====' % ('平均感知机算法' if averaged_perceptron else '朴素感知机算法'))
classifier = PerceptronNameGenderClassifier()
print('训练集准确率:', classifier.train(TRAINING_SET, 10, averaged_perceptron))
model = classifier.getModel()
print('特征数量:', len(model.parameter))
for name in "赵建军", "沈雁冰", "陆雪琪", "李冰冰":
print('%s=%s' % (name, classifier.predict(name)))
print('测试集准确率:', classifier.evaluate(TESTING_SET))
if __name__ == '__main__':
run_classifier(False)
run_classifier(True)
| true
| true
|
f709b7b4e48264871c1d14816252623fa84ae826
| 579
|
py
|
Python
|
pepdb/core/migrations/0143_auto_20180403_1255.py
|
dchaplinsky/pep.org.ua
|
8633a65fb657d7f04dbdb12eb8ae705fa6be67e3
|
[
"MIT"
] | 7
|
2015-12-21T03:52:46.000Z
|
2020-07-24T19:17:23.000Z
|
pepdb/core/migrations/0143_auto_20180403_1255.py
|
dchaplinsky/pep.org.ua
|
8633a65fb657d7f04dbdb12eb8ae705fa6be67e3
|
[
"MIT"
] | 12
|
2016-03-05T18:11:05.000Z
|
2021-06-17T20:20:03.000Z
|
pepdb/core/migrations/0143_auto_20180403_1255.py
|
dchaplinsky/pep.org.ua
|
8633a65fb657d7f04dbdb12eb8ae705fa6be67e3
|
[
"MIT"
] | 4
|
2016-07-17T20:19:38.000Z
|
2021-03-23T12:47:20.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-04-03 09:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0142_auto_20180301_2143'),
]
operations = [
migrations.AlterField(
model_name='declaration',
name='person',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='declarations', to='core.Person'),
),
]
| 26.318182
| 142
| 0.661485
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0142_auto_20180301_2143'),
]
operations = [
migrations.AlterField(
model_name='declaration',
name='person',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='declarations', to='core.Person'),
),
]
| true
| true
|
f709b7e141af88e2c6bddfcf36ef2b4d97d0b978
| 4,567
|
py
|
Python
|
app/hrcm/classes/candidate.py
|
bastienbot/hr-challenges-manager
|
1fe8e4fa34f6866a724c2461bc17cc442fe50a4c
|
[
"MIT"
] | null | null | null |
app/hrcm/classes/candidate.py
|
bastienbot/hr-challenges-manager
|
1fe8e4fa34f6866a724c2461bc17cc442fe50a4c
|
[
"MIT"
] | null | null | null |
app/hrcm/classes/candidate.py
|
bastienbot/hr-challenges-manager
|
1fe8e4fa34f6866a724c2461bc17cc442fe50a4c
|
[
"MIT"
] | null | null | null |
import os
import json
from datetime import datetime
from .challenge import Challenge
from hrcm.services.db import DBConnector
from hrcm.errors.bad_request import BadRequest
from hrcm.helpers import format_username, format_message
class Candidate:
"""
@desc We prepare all the instance parameters along side the db instance
@params informations: a list of cli parameters
@returns
"""
def __init__(self, informations):
self._id = informations.get("_id", None)
self.firstname = informations.get("firstname")
self.lastname = informations.get("lastname")
self.email = informations.get("email")
self.job = informations.get("job")
self.phone = informations.get("phone", str())
self.username = format_username(
informations.get("firstname"),
informations.get("lastname")
)
self.messages = [format_message(message) for message in informations.get("messages", list())]
self.archived = informations.get("archived", False)
self.challenge = None
self.db = DBConnector()
def __repr__(self):
return json.dumps(self.get_profile())
def get_messages(self):
self.messages = self.db.get_messages(self)
return self
"""
@desc This methods create a new candidate and adds its id to self when
the DB requires an id, we first want to check if the user
doesn't already exist, only if the id is not yet set
@params self: instance of Candidate
@returns instance of Candidate
"""
def create(self):
profile = self.db.get_profile_by_email(self.email)
if self._id is not None or profile is not None:
raise BadRequest("This email already exists.")
self.db.create_candidate(self)
print("User created successfuly")
return self
def update(self):
self.db.update_candidate(self)
def delete(self):
self.db.delete_candidate(self)
print("User deleted successfuly")
return self
def archive(self):
self.archived = True
self.db.save_profile(self)
print("Candidate archived")
return self
def create_send_challenge(self):
self.challenge = Challenge()
self.challenge.send_challenge(self)
self.messages = self.challenge.get_sent_messages()
return self
def preview_challenge(self):
self.challenge = Challenge()
return self.challenge.preview_challenge(self)
def evaluate_candidate(self, evaluated_criterias):
self.challenge = Challenge()
return self.challenge.evaluate_challenge(self, evaluated_criterias)
def get_challenge_criterias(self):
self.challenge = Challenge()
return self.challenge.get_evalution_criterias(self)
def get_profile(self, show_id=True):
profile = {
"firstname": self.firstname,
"lastname": self.lastname,
"email": self.email,
"job": self.job,
"phone": self.phone,
"messages": self.messages,
"username": self.username,
"archived": self.archived
}
if show_id is True:
profile["_id"] = str(self._id)
return profile
"""
@desc Get the candidate profile, create a new instance of Candidate it the candidate
exists, else create a new one with the profile informations
@params profile: dict
@returns instance of Candidate
"""
@classmethod
def load_or_new(cls, profile):
loaded_candidate = cls.load_candidate(profile.get("email"))
if loaded_candidate is not None:
return loaded_candidate
else:
return cls(profile)
"""
@desc Get the candidate profile and returns an instance of Candidate
@params email: str
@returns instance of Candidate, or None
"""
@classmethod
def load_candidate(cls, email):
db = DBConnector()
try:
profile = db.get_profile_by_email(email)
return cls(profile)
except:
return None
"""
@desc Get all the candidates and return an list of Candidate instances
The archive option tells if the method returns the (non-)archived candidates
@params: archive: bool
@returns [instance of Candidate]
"""
@classmethod
def load_candidates(cls, archive=False):
db = DBConnector()
return [cls(candidate) for candidate in db.get_profiles(archived=False)]
| 31.496552
| 101
| 0.640245
|
import os
import json
from datetime import datetime
from .challenge import Challenge
from hrcm.services.db import DBConnector
from hrcm.errors.bad_request import BadRequest
from hrcm.helpers import format_username, format_message
class Candidate:
def __init__(self, informations):
self._id = informations.get("_id", None)
self.firstname = informations.get("firstname")
self.lastname = informations.get("lastname")
self.email = informations.get("email")
self.job = informations.get("job")
self.phone = informations.get("phone", str())
self.username = format_username(
informations.get("firstname"),
informations.get("lastname")
)
self.messages = [format_message(message) for message in informations.get("messages", list())]
self.archived = informations.get("archived", False)
self.challenge = None
self.db = DBConnector()
def __repr__(self):
return json.dumps(self.get_profile())
def get_messages(self):
self.messages = self.db.get_messages(self)
return self
def create(self):
profile = self.db.get_profile_by_email(self.email)
if self._id is not None or profile is not None:
raise BadRequest("This email already exists.")
self.db.create_candidate(self)
print("User created successfuly")
return self
def update(self):
self.db.update_candidate(self)
def delete(self):
self.db.delete_candidate(self)
print("User deleted successfuly")
return self
def archive(self):
self.archived = True
self.db.save_profile(self)
print("Candidate archived")
return self
def create_send_challenge(self):
self.challenge = Challenge()
self.challenge.send_challenge(self)
self.messages = self.challenge.get_sent_messages()
return self
def preview_challenge(self):
self.challenge = Challenge()
return self.challenge.preview_challenge(self)
def evaluate_candidate(self, evaluated_criterias):
self.challenge = Challenge()
return self.challenge.evaluate_challenge(self, evaluated_criterias)
def get_challenge_criterias(self):
self.challenge = Challenge()
return self.challenge.get_evalution_criterias(self)
def get_profile(self, show_id=True):
profile = {
"firstname": self.firstname,
"lastname": self.lastname,
"email": self.email,
"job": self.job,
"phone": self.phone,
"messages": self.messages,
"username": self.username,
"archived": self.archived
}
if show_id is True:
profile["_id"] = str(self._id)
return profile
@classmethod
def load_or_new(cls, profile):
loaded_candidate = cls.load_candidate(profile.get("email"))
if loaded_candidate is not None:
return loaded_candidate
else:
return cls(profile)
@classmethod
def load_candidate(cls, email):
db = DBConnector()
try:
profile = db.get_profile_by_email(email)
return cls(profile)
except:
return None
@classmethod
def load_candidates(cls, archive=False):
db = DBConnector()
return [cls(candidate) for candidate in db.get_profiles(archived=False)]
| true
| true
|
f709b80e5f1c7707c4509530ecca4a245f8ec708
| 3,847
|
py
|
Python
|
tests/PySys/environments/environment_c8y.py
|
PradeepKiruvale/localworkflow
|
b5f3c97c835cb36ae87f14b8697bedcca5d22619
|
[
"Apache-2.0"
] | 6
|
2021-09-14T10:14:15.000Z
|
2021-11-20T13:42:26.000Z
|
tests/PySys/environments/environment_c8y.py
|
PradeepKiruvale/localworkflow
|
b5f3c97c835cb36ae87f14b8697bedcca5d22619
|
[
"Apache-2.0"
] | null | null | null |
tests/PySys/environments/environment_c8y.py
|
PradeepKiruvale/localworkflow
|
b5f3c97c835cb36ae87f14b8697bedcca5d22619
|
[
"Apache-2.0"
] | null | null | null |
import base64
import json
import re
import requests
import psutil
from pysys.basetest import BaseTest
from pysys.constants import FAILED
from cumulocity import Cumulocity
from environment_tedge import TedgeEnvironment
"""
Environment to manage automated connects and disconnects to c8y
"""
class EnvironmentC8y(TedgeEnvironment):
"""
Pysys Environment to manage automated connect and disconnect to c8y
Tests that derive from class EnvironmentC8y use automated connect and
disconnect to Cumulocity. Additional checks are made for the status of
service mosquitto and service tedge-mapper.
"""
cumulocity: Cumulocity
def setup(self):
self.log.debug("EnvironmentC8y Setup")
super().setup()
if self.project.c8yurl == "":
self.abort(
FAILED,
"Cumulocity tenant URL is not set. Set with the env variable C8YURL",
)
if self.project.tenant == "":
self.abort(
FAILED,
"Cumulocity tenant ID is not set. Set with the env variable C8YTENANT",
)
if self.project.c8yusername == "":
self.abort(
FAILED,
"Cumulocity tenant username is not set. Set with the env variable C8YUSERNAME",
)
if self.project.c8ypass == "":
self.abort(
FAILED,
"Cumulocity tenant password is not set. Set with the env variable C8YPASS",
)
if self.project.deviceid == "":
self.abort(
FAILED, "Device ID is not set. Set with the env variable C8YDEVICEID"
)
self.log.info("EnvironmentC8y Setup")
self.addCleanupFunction(self.myenvcleanup)
# Check if tedge-mapper is in disabled state
serv_mapper = self.startProcess(
command=self.systemctl,
arguments=["status", self.tedge_mapper_c8y],
stdouterr="serv_mapper1",
expectedExitStatus="==3", # 3: disabled
)
# Connect the bridge
self.tedge_connect_c8y()
# Test the bridge connection
self.tedge_connect_c8y_test()
# Check if mosquitto is running well
serv_mosq = self.startProcess(
command=self.systemctl,
arguments=["status", "mosquitto"],
stdouterr="serv_mosq2",
)
# Check if tedge-mapper is active again
serv_mapper = self.startProcess(
command=self.systemctl,
arguments=["status", self.tedge_mapper_c8y],
stdouterr="serv_mapper3",
)
self.cumulocity = Cumulocity(
self.project.c8yurl,
self.project.tenant,
self.project.c8yusername,
self.project.c8ypass,
self.log,
)
def execute(self):
self.log.debug("EnvironmentC8y Execute")
def validate(self):
self.log.debug("EnvironmentC8y Validate")
# Check if mosquitto is running well
serv_mosq = self.startProcess(
command=self.systemctl,
arguments=["status", "mosquitto"],
stdouterr="serv_mosq",
)
# Check if tedge-mapper is active
serv_mapper = self.startProcess(
command=self.systemctl,
arguments=["status", self.tedge_mapper_c8y],
stdouterr="serv_mapper4",
)
def myenvcleanup(self):
self.log.debug("EnvironmentC8y Cleanup")
# Disconnect Bridge
self.tedge_disconnect_c8y()
# Check if tedge-mapper is disabled
serv_mosq = self.startProcess(
command=self.systemctl,
arguments=["status", self.tedge_mapper_c8y],
stdouterr="serv_mapper5",
expectedExitStatus="==3",
)
| 29.592308
| 95
| 0.593709
|
import base64
import json
import re
import requests
import psutil
from pysys.basetest import BaseTest
from pysys.constants import FAILED
from cumulocity import Cumulocity
from environment_tedge import TedgeEnvironment
class EnvironmentC8y(TedgeEnvironment):
cumulocity: Cumulocity
def setup(self):
self.log.debug("EnvironmentC8y Setup")
super().setup()
if self.project.c8yurl == "":
self.abort(
FAILED,
"Cumulocity tenant URL is not set. Set with the env variable C8YURL",
)
if self.project.tenant == "":
self.abort(
FAILED,
"Cumulocity tenant ID is not set. Set with the env variable C8YTENANT",
)
if self.project.c8yusername == "":
self.abort(
FAILED,
"Cumulocity tenant username is not set. Set with the env variable C8YUSERNAME",
)
if self.project.c8ypass == "":
self.abort(
FAILED,
"Cumulocity tenant password is not set. Set with the env variable C8YPASS",
)
if self.project.deviceid == "":
self.abort(
FAILED, "Device ID is not set. Set with the env variable C8YDEVICEID"
)
self.log.info("EnvironmentC8y Setup")
self.addCleanupFunction(self.myenvcleanup)
serv_mapper = self.startProcess(
command=self.systemctl,
arguments=["status", self.tedge_mapper_c8y],
stdouterr="serv_mapper1",
expectedExitStatus="==3",
)
self.tedge_connect_c8y()
self.tedge_connect_c8y_test()
serv_mosq = self.startProcess(
command=self.systemctl,
arguments=["status", "mosquitto"],
stdouterr="serv_mosq2",
)
serv_mapper = self.startProcess(
command=self.systemctl,
arguments=["status", self.tedge_mapper_c8y],
stdouterr="serv_mapper3",
)
self.cumulocity = Cumulocity(
self.project.c8yurl,
self.project.tenant,
self.project.c8yusername,
self.project.c8ypass,
self.log,
)
def execute(self):
self.log.debug("EnvironmentC8y Execute")
def validate(self):
self.log.debug("EnvironmentC8y Validate")
serv_mosq = self.startProcess(
command=self.systemctl,
arguments=["status", "mosquitto"],
stdouterr="serv_mosq",
)
serv_mapper = self.startProcess(
command=self.systemctl,
arguments=["status", self.tedge_mapper_c8y],
stdouterr="serv_mapper4",
)
def myenvcleanup(self):
self.log.debug("EnvironmentC8y Cleanup")
self.tedge_disconnect_c8y()
serv_mosq = self.startProcess(
command=self.systemctl,
arguments=["status", self.tedge_mapper_c8y],
stdouterr="serv_mapper5",
expectedExitStatus="==3",
)
| true
| true
|
f709b828175fdc3722692be33dce392102dcbe34
| 416
|
py
|
Python
|
examples/compile-only.p4app/main.py
|
serhatarslan-hub/p4app
|
6ac7e769ee2e73382c32c96dfff8729e46b51431
|
[
"Apache-2.0"
] | null | null | null |
examples/compile-only.p4app/main.py
|
serhatarslan-hub/p4app
|
6ac7e769ee2e73382c32c96dfff8729e46b51431
|
[
"Apache-2.0"
] | null | null | null |
examples/compile-only.p4app/main.py
|
serhatarslan-hub/p4app
|
6ac7e769ee2e73382c32c96dfff8729e46b51431
|
[
"Apache-2.0"
] | 2
|
2021-05-19T16:36:42.000Z
|
2021-11-01T21:35:51.000Z
|
from p4app import P4Program
import json
# Compile a P4_16 program:
prog16 = P4Program('wire.p4')
prog16.compile()
# Inspect the compiled JSON file
with open(prog16.json(), 'r') as f:
bmv2_json = json.load(f)
#print bmv2_json['actions']
# Compile a P4_14 program:
prog14 = P4Program('wire14.p4', version=14)
prog14.compile()
with open(prog14.json(), 'r') as f:
bmv2_json = json.load(f)
print("OK")
| 18.086957
| 43
| 0.689904
|
from p4app import P4Program
import json
prog16 = P4Program('wire.p4')
prog16.compile()
with open(prog16.json(), 'r') as f:
bmv2_json = json.load(f)
prog14 = P4Program('wire14.p4', version=14)
prog14.compile()
with open(prog14.json(), 'r') as f:
bmv2_json = json.load(f)
print("OK")
| true
| true
|
f709b82b33a33885a4357d82d763d405a1fc0a14
| 4,267
|
py
|
Python
|
pyretrace/reader.py
|
probablyodd/pyretrace
|
a65f456597514e8a845ff4ad50deeca1acc13245
|
[
"BSD-2-Clause-FreeBSD"
] | 20
|
2015-10-27T08:17:32.000Z
|
2022-03-13T09:43:30.000Z
|
pyretrace/reader.py
|
probablyodd/pyretrace
|
a65f456597514e8a845ff4ad50deeca1acc13245
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
pyretrace/reader.py
|
probablyodd/pyretrace
|
a65f456597514e8a845ff4ad50deeca1acc13245
|
[
"BSD-2-Clause-FreeBSD"
] | 8
|
2015-12-28T18:52:09.000Z
|
2021-01-06T10:13:35.000Z
|
from __future__ import print_function
import sys
class MappingReader():
def __init__(self, mapping_file):
self.mapping_file = mapping_file
def pump(self, mapping_processor):
reader = open(self.mapping_file, 'r')
try:
class_name = None
# Read the subsequent class mappings and class member mappings.
while True:
line = reader.readline()
if not line:
break
line = line.strip()
# The distinction between a class mapping and a class
# member mapping is the initial whitespace.
if line.endswith(':'):
# Process the class mapping and remember the class's
# old name.
class_name = self.process_class_mapping(line, mapping_processor)
elif class_name is not None:
# Process the class member mapping, in the context of the
# current old class name.
self.process_class_member_mapping(class_name, line, mapping_processor)
except Exception as ex:
print('Can\'t process mapping file (%s)' % ex)
sys.exit(1)
finally:
reader.close()
@staticmethod
def process_class_mapping(line, mapping_processor):
# See if we can parse "___ -> ___:", containing the original
# class name and the new class name.
arrow_index = line.find('->')
if arrow_index < 0:
return None
colon_index = line.find(':', arrow_index + 2)
if colon_index < 0:
return None
# Extract the elements.
class_name = line[0: arrow_index].strip()
new_class_name = line[arrow_index + 2: colon_index].strip()
# Process this class name mapping.
interested = mapping_processor.process_class_mapping(class_name, new_class_name)
if interested:
return class_name
else:
return None
@staticmethod
def process_class_member_mapping(class_name, line, mapping_processor):
# See if we can parse "___:___:___ ___(___) -> ___",
# containing the optional line numbers, the return type, the original
# field/method name, optional arguments, and the new field/method name.
colon_index1 = line.find(':')
colon_index2 = -1 if colon_index1 < 0 else line.find(':', colon_index1 + 1)
space_index = line.find(' ', colon_index2 + 2)
argument_index1 = line.find('(', space_index + 1)
argument_index2 = -1 if argument_index1 < 0 else line.find(')', argument_index1 + 1)
arrow_index = line.find('->', max(space_index, argument_index2) + 1)
if space_index < 0 or arrow_index < 0:
return
# Extract the elements.
type = line[colon_index2 + 1: space_index].strip()
name = line[space_index + 1: argument_index1 if argument_index1 >= 0 else arrow_index].strip()
new_name = line[arrow_index + 2: len(line)].strip()
# Process this class member mapping.
if len(type) > 0 and \
len(name) > 0 and \
len(new_name) > 0:
# Is it a field or a method?
if argument_index2 < 0:
mapping_processor.process_field_mapping(class_name, type, name, new_name)
else:
first_line_number = 0
last_line_number = 0
if colon_index2 > 0:
first_line_number = int(line[0: colon_index1].strip())
last_line_number = int(line[colon_index1 + 1: colon_index2].strip())
arguments = line[argument_index1 + 1: argument_index2].strip()
mapping_processor.process_method_mapping(class_name,
first_line_number,
last_line_number,
type,
name,
arguments,
new_name)
| 37.429825
| 102
| 0.542536
|
from __future__ import print_function
import sys
class MappingReader():
def __init__(self, mapping_file):
self.mapping_file = mapping_file
def pump(self, mapping_processor):
reader = open(self.mapping_file, 'r')
try:
class_name = None
while True:
line = reader.readline()
if not line:
break
line = line.strip()
if line.endswith(':'):
# old name.
class_name = self.process_class_mapping(line, mapping_processor)
elif class_name is not None:
# Process the class member mapping, in the context of the
# current old class name.
self.process_class_member_mapping(class_name, line, mapping_processor)
except Exception as ex:
print('Can\'t process mapping file (%s)' % ex)
sys.exit(1)
finally:
reader.close()
@staticmethod
def process_class_mapping(line, mapping_processor):
arrow_index = line.find('->')
if arrow_index < 0:
return None
colon_index = line.find(':', arrow_index + 2)
if colon_index < 0:
return None
class_name = line[0: arrow_index].strip()
new_class_name = line[arrow_index + 2: colon_index].strip()
interested = mapping_processor.process_class_mapping(class_name, new_class_name)
if interested:
return class_name
else:
return None
@staticmethod
def process_class_member_mapping(class_name, line, mapping_processor):
colon_index1 = line.find(':')
colon_index2 = -1 if colon_index1 < 0 else line.find(':', colon_index1 + 1)
space_index = line.find(' ', colon_index2 + 2)
argument_index1 = line.find('(', space_index + 1)
argument_index2 = -1 if argument_index1 < 0 else line.find(')', argument_index1 + 1)
arrow_index = line.find('->', max(space_index, argument_index2) + 1)
if space_index < 0 or arrow_index < 0:
return
type = line[colon_index2 + 1: space_index].strip()
name = line[space_index + 1: argument_index1 if argument_index1 >= 0 else arrow_index].strip()
new_name = line[arrow_index + 2: len(line)].strip()
if len(type) > 0 and \
len(name) > 0 and \
len(new_name) > 0:
if argument_index2 < 0:
mapping_processor.process_field_mapping(class_name, type, name, new_name)
else:
first_line_number = 0
last_line_number = 0
if colon_index2 > 0:
first_line_number = int(line[0: colon_index1].strip())
last_line_number = int(line[colon_index1 + 1: colon_index2].strip())
arguments = line[argument_index1 + 1: argument_index2].strip()
mapping_processor.process_method_mapping(class_name,
first_line_number,
last_line_number,
type,
name,
arguments,
new_name)
| true
| true
|
f709b83b7dd0d4dc8f2ed9be76428c1683165b7d
| 1,895
|
py
|
Python
|
luafun/game/config.py
|
Delaunay/LuaFun
|
bd0efd8fc2b064d6bf58993e59a6ad4ac6713b39
|
[
"BSD-3-Clause"
] | 1
|
2021-02-06T06:42:29.000Z
|
2021-02-06T06:42:29.000Z
|
luafun/game/config.py
|
Delaunay/LuaFun
|
bd0efd8fc2b064d6bf58993e59a6ad4ac6713b39
|
[
"BSD-3-Clause"
] | 6
|
2021-04-08T21:46:06.000Z
|
2021-05-09T01:40:04.000Z
|
luafun/game/config.py
|
Delaunay/LuaFun
|
bd0efd8fc2b064d6bf58993e59a6ad4ac6713b39
|
[
"BSD-3-Clause"
] | null | null | null |
import os
EXECUTABLE_PATH_WINDOWS = '/game/bin/win64/dota2.exe'
EXECUTABLE_PATH_LINUX = '/game/dota.sh'
EXECUTABLE_PATH_LINUX = '/game/bin/linuxsteamrt64/dota2'
BOT_PATH = '/game/dota/scripts/vscripts/bots/'
CONSOLE_LOG = '/game/dota/scripts/vscripts/bots/console.log'
SEND_MSG = '/game/dota/scripts/vscripts/bots/IPC_recv.lua'
CONFIG_MSG = '/game/dota/scripts/vscripts/bots/IPC_config.lua'
LINUX_APP_PATH = "~/Steam/steamapps/common/dota 2 beta"
OSX_APP_PATH = "~/Library/Application Support/Steam/SteamApps/common/dota 2 beta"
WINDOWS_APP_PATH = "C:/Program Files (x86)/Steam/steamapps/common/dota 2 beta"
# <steam path>/ubuntu12_32/steam-runtime/run.sh
class DotaPaths:
"""Class to hold system specific configuration"""
def __init__(self, path=None):
if path is None:
path = self.guess()
self.path = path
def guess(self):
from sys import platform
if platform == "linux" or platform == "linux2":
return os.path.expanduser(LINUX_APP_PATH)
elif platform == "darwin":
return os.path.expanduser(OSX_APP_PATH)
return WINDOWS_APP_PATH
@property
def executable_path(self):
from sys import platform
if platform == "linux" or platform == "linux2":
return self.path + '/' + EXECUTABLE_PATH_LINUX
return self.path + '/' + EXECUTABLE_PATH_WINDOWS
@property
def ipc_recv_handle(self):
return self.path + '/' + CONSOLE_LOG
@property
def console_log(self):
return self.ipc_recv_handle
@property
def ipc_send_handle(self):
return self.path + '/' + SEND_MSG
@property
def ipc_config_handle(self):
return self.path + '/' + CONFIG_MSG
def bot_file(self, filename):
"""Return a file path that is located in the bot folder"""
return self.path + '/' + BOT_PATH + filename
| 28.712121
| 81
| 0.667018
|
import os
EXECUTABLE_PATH_WINDOWS = '/game/bin/win64/dota2.exe'
EXECUTABLE_PATH_LINUX = '/game/dota.sh'
EXECUTABLE_PATH_LINUX = '/game/bin/linuxsteamrt64/dota2'
BOT_PATH = '/game/dota/scripts/vscripts/bots/'
CONSOLE_LOG = '/game/dota/scripts/vscripts/bots/console.log'
SEND_MSG = '/game/dota/scripts/vscripts/bots/IPC_recv.lua'
CONFIG_MSG = '/game/dota/scripts/vscripts/bots/IPC_config.lua'
LINUX_APP_PATH = "~/Steam/steamapps/common/dota 2 beta"
OSX_APP_PATH = "~/Library/Application Support/Steam/SteamApps/common/dota 2 beta"
WINDOWS_APP_PATH = "C:/Program Files (x86)/Steam/steamapps/common/dota 2 beta"
class DotaPaths:
def __init__(self, path=None):
if path is None:
path = self.guess()
self.path = path
def guess(self):
from sys import platform
if platform == "linux" or platform == "linux2":
return os.path.expanduser(LINUX_APP_PATH)
elif platform == "darwin":
return os.path.expanduser(OSX_APP_PATH)
return WINDOWS_APP_PATH
@property
def executable_path(self):
from sys import platform
if platform == "linux" or platform == "linux2":
return self.path + '/' + EXECUTABLE_PATH_LINUX
return self.path + '/' + EXECUTABLE_PATH_WINDOWS
@property
def ipc_recv_handle(self):
return self.path + '/' + CONSOLE_LOG
@property
def console_log(self):
return self.ipc_recv_handle
@property
def ipc_send_handle(self):
return self.path + '/' + SEND_MSG
@property
def ipc_config_handle(self):
return self.path + '/' + CONFIG_MSG
def bot_file(self, filename):
return self.path + '/' + BOT_PATH + filename
| true
| true
|
f709b94b5dc3cc9000a3be6044866aa2ad321e0c
| 1,671
|
py
|
Python
|
emet/hypothesis/generate_dictionary.py
|
stephanefschwarz/EMET
|
92ab8b0a53bbdfe5618353f0055eba98ae93f53f
|
[
"MIT"
] | 3
|
2020-05-19T19:45:06.000Z
|
2021-03-21T03:59:19.000Z
|
emet/hypothesis/generate_dictionary.py
|
stephanefschwarz/EMET
|
92ab8b0a53bbdfe5618353f0055eba98ae93f53f
|
[
"MIT"
] | null | null | null |
emet/hypothesis/generate_dictionary.py
|
stephanefschwarz/EMET
|
92ab8b0a53bbdfe5618353f0055eba98ae93f53f
|
[
"MIT"
] | 2
|
2021-03-21T04:36:58.000Z
|
2022-01-31T07:29:49.000Z
|
import sys
import pandas as pd
import requests
import nltk
nltk.download('stopwords')
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from bs4 import BeautifulSoup
# --- open dataset --- #
data = pd.read_csv('./dataset/translated_twitter_posts.csv')
documents = data['translated_posts']
# --- create an instance of tokenizer --- #
premises = []
tokenizer = RegexpTokenizer(r'\w+')
progress = 0
total_posts = documents.shape[0]
for document in documents:
sentence = ''
tokens = tokenizer.tokenize(document)
for token in tokens:
if not token in stopwords.words('english'):
try:
request = requests.get("http://www.urbandictionary.com/define.php?term={}".format(token))
extract_mening = BeautifulSoup(request.content, 'html.parser')
meaning = extract_mening.find("div",attrs={"class":"meaning"})
if meaning != None:
meaning = meaning.text
sentence = sentence + meaning + ' '
else:
sentence = sentence + token + ' '
except Exception as e:
print('Exception at token ', token, '\n', e)
else:
sentence = sentence + token + ' '
premises.append(sentence)
progress = progress + 1
percentage = round((progress / total_posts) * 100, 2)
output_print = "{}% | {}/{}".format(percentage, progress, total_posts)
# Poor way to show a progress bar :|
sys.stdout.write("\r {:<70}".format(output_print))
sys.stdout.flush()
data['premises'] = premises
data.to_csv('./dataset/premises_twitter_posts.csv')
| 28.810345
| 105
| 0.617594
|
import sys
import pandas as pd
import requests
import nltk
nltk.download('stopwords')
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from bs4 import BeautifulSoup
data = pd.read_csv('./dataset/translated_twitter_posts.csv')
documents = data['translated_posts']
premises = []
tokenizer = RegexpTokenizer(r'\w+')
progress = 0
total_posts = documents.shape[0]
for document in documents:
sentence = ''
tokens = tokenizer.tokenize(document)
for token in tokens:
if not token in stopwords.words('english'):
try:
request = requests.get("http://www.urbandictionary.com/define.php?term={}".format(token))
extract_mening = BeautifulSoup(request.content, 'html.parser')
meaning = extract_mening.find("div",attrs={"class":"meaning"})
if meaning != None:
meaning = meaning.text
sentence = sentence + meaning + ' '
else:
sentence = sentence + token + ' '
except Exception as e:
print('Exception at token ', token, '\n', e)
else:
sentence = sentence + token + ' '
premises.append(sentence)
progress = progress + 1
percentage = round((progress / total_posts) * 100, 2)
output_print = "{}% | {}/{}".format(percentage, progress, total_posts)
sys.stdout.write("\r {:<70}".format(output_print))
sys.stdout.flush()
data['premises'] = premises
data.to_csv('./dataset/premises_twitter_posts.csv')
| true
| true
|
f709b9bf5ba6566862a18830554448f31ea2f564
| 20,398
|
py
|
Python
|
hs_labels/models.py
|
hydroshare/hydroshare
|
bf9888bbe61507aff070b1dfcec2fdec1921468d
|
[
"BSD-3-Clause"
] | 178
|
2015-01-08T23:03:36.000Z
|
2022-03-03T13:56:45.000Z
|
hs_labels/models.py
|
hydroshare/hydroshare
|
bf9888bbe61507aff070b1dfcec2fdec1921468d
|
[
"BSD-3-Clause"
] | 4,125
|
2015-01-01T14:26:15.000Z
|
2022-03-31T16:38:55.000Z
|
hs_labels/models.py
|
hydroshare/hydroshare
|
bf9888bbe61507aff070b1dfcec2fdec1921468d
|
[
"BSD-3-Clause"
] | 53
|
2015-03-15T17:56:51.000Z
|
2022-03-17T00:32:16.000Z
|
"""
This model supports user labeling of resources in various ways.
For a User u, this instantiates a subobject u.ulabels (like u.uaccess)
that contains all the labeling functions.
Functions include:
* u.ulabels.label_resource(r, label)
instantiates a label for a resource. Resources can have multiple labels.
* u.ulabels.unlabel_resource(r, label)
removes a label; there can be many labels.
* u.ulabels.clear_resource_labels(r)
removes all labels for a resource
* u.ulabels.favorite_resource(r)
favorites a resource
* u.ulabels.unfavorite_resource(r)
removes a favorite
and the reporting functions
* u.ulabels.labeled_resources
A queryset of resources that are labeled.
* u.ulabels.favorited_resources
A queryset of resources that have been favorited
* u.ulabels.get_resources_with_label(label)
Get a queryset of resources possessing a specific label.
For a BaseResource r, this also adds a subobject rlabels that reports on labels for resources
* r.rlabels.get_labels(u)
* r.rlabels.is_favorite(u)
* r.rlabels.is_mine(u)
"""
# TODO: combine label filtering with access control
import re
from django.contrib.auth.models import User
from django.db import models
from django.db import transaction
from django.db.models import Q
from hs_core.models import BaseResource
class FlagCodes(object):
"""
Flag codes describe the meanings of per-user flags for a resource.
* 1 or FlagCodes.FAVORITE:
marked as a favorite on "My Resources" page
* 2 or FlagCodes.MINE:
marked as being part of "My Resources" on "Discover" page.
"""
FAVORITE = 1
MINE = 2
OPEN_WITH_APP = 3
FLAG_CHOICES = (
(FAVORITE, 'Favorite'), # marked as favorite in my resources page.
(MINE, 'Mine'), # marked as mine in discovery page.
(OPEN_WITH_APP, 'Open With App'), # marked as a open_with app
)
class UserResourceLabels(models.Model):
"""
Labels of a user for a resource
This model stores labels of an individual user, like an access control list. T
"""
start = models.DateTimeField(editable=False, auto_now=True)
user = models.ForeignKey(User, null=False, editable=False,
related_name='u2url', # unused but must be defined and unique
help_text='user assigning a label',
on_delete=models.CASCADE)
resource = models.ForeignKey(BaseResource, null=False, editable=False,
related_name='r2url', # unused but must be defined and unique
help_text='resource to which a label applies',
on_delete=models.CASCADE)
label = models.TextField(null=False)
class Meta:
unique_together = ('user', 'resource', 'label')
class UserResourceFlags(models.Model):
"""
Per-user flagging of resources.
This model stores labels of an individual user, like an access
control list; There are several kinds of labels documented in FlagCodes.
These are similar in implementation but differ in semantics.
"""
kind = models.IntegerField(choices=FlagCodes.FLAG_CHOICES,
editable=False,
default=FlagCodes.FAVORITE)
start = models.DateTimeField(editable=False, auto_now=True)
user = models.ForeignKey(User, null=False, editable=False,
related_name='u2urf', # unused but must be defined and unique
help_text='user assigning a flag',
on_delete=models.CASCADE)
resource = models.ForeignKey(BaseResource, null=False, editable=False,
related_name="r2urf", # unused but must be defined and unique
help_text='resource to which a flag applies',
on_delete=models.CASCADE)
class Meta:
unique_together = ('user', 'resource', 'kind')
class UserStoredLabels(models.Model):
"""
Storage class for persistent labels that are reusable across different kinds of objects
"""
user = models.ForeignKey(User, null=False,
help_text='user who stored the label',
related_name='ul2usl',
on_delete=models.CASCADE)
label = models.TextField(help_text='label to be stored by user')
class Meta:
unique_together = ('user', 'label')
class UserLabels(models.Model):
"""
Projection class puts methods and content inside basic User object
so that one can access things easily from that context.
This model is injected into the BaseResource as the related name "user".
Thus for an User u, u.user is this model.
"""
user = models.OneToOneField(User,
editable=False,
null=True,
related_name='ulabels', # induced field in User class.
related_query_name='ulabels',
on_delete=models.CASCADE)
##########################################
# PUBLIC FUNCTIONS: resources
##########################################
@property
def labeled_resources(self):
"""
Get a QuerySet of resources labeled by a user.
This eliminates duplicates.
"""
return BaseResource.objects.filter(r2url__user=self.user).distinct()
def get_flagged_resources(self, this_flagcode):
"""
Get resources with a specific flag.
"""
if __debug__: # during testing only, check argument types and preconditions
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
return BaseResource.objects.filter(r2urf__user=self.user,
r2urf__kind=this_flagcode)
@property
def favorited_resources(self):
"""
Get a QuerySet of resources favorited by a user.
This eliminates duplicates.
"""
return self.get_flagged_resources(FlagCodes.FAVORITE)
@property
def my_resources(self):
"""
Get a QuerySet of resources marked as mine (add to my resources) by a user.
This eliminates duplicates.
"""
return self.get_flagged_resources(FlagCodes.MINE)
@property
def resources_of_interest(self):
"""
Get a QuerySet of resources the user has tagged in any way.
"""
return BaseResource.objects.filter(Q(r2url__user=self.user) | Q(r2urf__user=self.user)).distinct()
def get_resources_with_label(self, this_label):
"""
Get a QuerySet of resources with a specific label.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_label, str)
label_string = UserLabels.clean_label(this_label) # remove leading and trailing spaces
return BaseResource.objects.filter(r2url__user=self.user,
r2url__label__exact=label_string)\
.distinct()\
.order_by('r2url__label')
@property
def user_labels(self):
"""
Get a QuerySet of labels in use now.
"""
return UserResourceLabels.objects.values_list('label', flat=True)\
.filter(user=self.user)\
.distinct().order_by('label')
######################################
# Label a resource
######################################
@staticmethod
def clean_label(name):
label_string = re.sub('/', r'', name) # no /'s
label_string = label_string.strip() # no leading or trailing whitespace
label_string = re.sub(r'\s+', r' ', label_string) # collapse multiple whitespace, including tabs
return label_string
def label_resource(self, this_resource, this_label):
"""
Assign a label to a resource
Users are allowed to label any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert isinstance(this_label, str)
# remove leading and trailing spaces
label_string = UserLabels.clean_label(this_label)
with transaction.atomic(): # empirically, get_or_create is not atomic.
UserResourceLabels.objects.get_or_create(resource=this_resource,
label=label_string,
user=self.user)
def unlabel_resource(self, this_resource, this_label):
"""
Remove one label from a resource
Users are allowed to label any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert isinstance(this_label, str)
# remove leading and trailing spaces
label_string = UserLabels.clean_label(this_label)
UserResourceLabels.objects.filter(resource=this_resource,
label__exact=label_string,
user=self.user).delete()
def clear_resource_labels(self, this_resource):
"""
Clear all labels for a resource
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
UserResourceLabels.objects.filter(resource=this_resource,
user=self.user).delete()
def remove_resource_label(self, this_label):
"""
clear a label from the labeling system.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_label, str)
UserResourceLabels.objects.filter(label=this_label, user=self.user)\
.delete()
##########################################
# general flagging of resources
##########################################
def flag_resource(self, this_resource, this_flagcode):
"""
flag a resource with a specific flag code from FlagCodes
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because flagging information is private.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
with transaction.atomic(): # empirically, get_or_create is not atomic.
UserResourceFlags.objects.get_or_create(resource=this_resource,
kind=this_flagcode,
user=self.user)
def unflag_resource(self, this_resource, this_flagcode):
"""
unflag a resource with a specific flag.
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because flagging information is private.
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
UserResourceFlags.objects.filter(user=self.user,
resource=this_resource,
kind=this_flagcode).delete()
def clear_all_flags(self, this_flagcode):
"""
remove all flags of a specific kind for a user
"""
UserResourceFlags.objects.filter(user=self.user,
kind=this_flagcode)\
.delete()
##########################################
# favorite resources
##########################################
def favorite_resource(self, this_resource):
"""
Mark a resource as favorite.
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
self.flag_resource(this_resource, FlagCodes.FAVORITE)
def unfavorite_resource(self, this_resource):
"""
Clear favorite label for a resource
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
self.unflag_resource(this_resource, FlagCodes.FAVORITE)
##########################################
# my resources
##########################################
def claim_resource(self, this_resource):
"""
Label a resource as 'MINE' (adds to my resources).
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
self.flag_resource(this_resource, FlagCodes.MINE)
def unclaim_resource(self, this_resource):
"""
Clear 'MINE' label for a resource (removes from my resources)
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
self.unflag_resource(this_resource, FlagCodes.MINE)
##########################################
# open with app
##########################################
def add_open_with_app(self, this_resource):
"""
Mark a webapp resource as open-with-app
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
The calling function should make sure resource is a webapp resource
"""
self.flag_resource(this_resource, FlagCodes.OPEN_WITH_APP)
def remove_open_with_app(self, this_resource):
"""
Unmark a webapp resource as open-with-app
Users are allowed to flag any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
The calling function should make sure resource is a webapp resource
"""
self.unflag_resource(this_resource, FlagCodes.OPEN_WITH_APP)
##########################################
# routines that apply to all kinds of annotations
##########################################
def clear_resource_all(self, this_resource):
"""
Clear all annotations for a resource
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
UserResourceLabels.objects\
.filter(resource=this_resource,
user=self.user)\
.delete()
UserResourceFlags.objects\
.filter(resource=this_resource,
user=self.user)\
.delete()
##########################################
# save unused labels
##########################################
def save_label(self, this_label):
"""
Save a label for use later.
Users are allowed to label any resource, including resources to which they do not have access.
This is not an access control problem because labeling information is private.
"""
label_string = UserLabels.clean_label(this_label) # remove leading and trailing spaces
with transaction.atomic(): # empirically, get_or_create is not atomic.
UserStoredLabels.objects.get_or_create(label=label_string, user=self.user)
def unsave_label(self, this_label):
"""
Remove the specified saved label.
"""
# remove leading and trailing spaces
label_string = UserLabels.clean_label(this_label)
UserStoredLabels.objects.filter(label__exact=label_string, user=self.user).delete()
# remove all uses of that label from resources.
self.remove_resource_label(label_string)
def clear_saved_labels(self):
"""
Clear all saved labels for a user
"""
UserStoredLabels.objects.filter(user=self.user).delete()
@property
def saved_labels(self):
"""
Return a QuerySet of saved labels.
"""
return UserStoredLabels.objects.filter(user=self.user).values_list('label', flat=True).distinct()
class ResourceLabels(models.Model):
"""
For a BaseResource r, r.rlabels is this model. It contains functions relevant to resources.
"""
resource = models.OneToOneField(BaseResource,
editable=False,
null=True,
related_name='rlabels',
related_query_name='rlabels',
on_delete=models.CASCADE)
def get_users(self):
"""
Return a QuerySet of all users who have labeled this resource.
"""
return User.objects.filter(Q(u2url__resource=self.resource) | Q(u2urf__resource=self.resource))
def get_labels(self, this_user):
"""
Return a QuerySet of all user assigned labels for a resource
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_user, User)
labels = UserResourceLabels.objects\
.values_list('label', flat=True)\
.filter(user=this_user,
resource=self.resource)\
.order_by("label").all()
return labels
def is_flagged(self, this_user, this_flagcode):
"""
Return True if this resource has been flagged by a given user
"""
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_user, User)
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
return UserResourceFlags.objects.filter(user=this_user,
resource=self.resource,
kind=this_flagcode).exists()
def is_favorite(self, this_user):
"""
Return True if this resource has been favorited by a given user
"""
return self.is_flagged(this_user, FlagCodes.FAVORITE)
def is_mine(self, this_user):
"""
Return True if this resource has been labeled as mine by a given user
"""
return self.is_flagged(this_user, FlagCodes.MINE)
def is_open_with_app(self, this_user):
"""
Return True if this resource has been set as open-with-app by a given user
"""
return self.is_flagged(this_user, FlagCodes.OPEN_WITH_APP)
| 38.779468
| 110
| 0.595402
|
import re
from django.contrib.auth.models import User
from django.db import models
from django.db import transaction
from django.db.models import Q
from hs_core.models import BaseResource
class FlagCodes(object):
FAVORITE = 1
MINE = 2
OPEN_WITH_APP = 3
FLAG_CHOICES = (
(FAVORITE, 'Favorite'),
(MINE, 'Mine'),
(OPEN_WITH_APP, 'Open With App'),
)
class UserResourceLabels(models.Model):
start = models.DateTimeField(editable=False, auto_now=True)
user = models.ForeignKey(User, null=False, editable=False,
related_name='u2url',
help_text='user assigning a label',
on_delete=models.CASCADE)
resource = models.ForeignKey(BaseResource, null=False, editable=False,
related_name='r2url',
help_text='resource to which a label applies',
on_delete=models.CASCADE)
label = models.TextField(null=False)
class Meta:
unique_together = ('user', 'resource', 'label')
class UserResourceFlags(models.Model):
kind = models.IntegerField(choices=FlagCodes.FLAG_CHOICES,
editable=False,
default=FlagCodes.FAVORITE)
start = models.DateTimeField(editable=False, auto_now=True)
user = models.ForeignKey(User, null=False, editable=False,
related_name='u2urf',
help_text='user assigning a flag',
on_delete=models.CASCADE)
resource = models.ForeignKey(BaseResource, null=False, editable=False,
related_name="r2urf",
help_text='resource to which a flag applies',
on_delete=models.CASCADE)
class Meta:
unique_together = ('user', 'resource', 'kind')
class UserStoredLabels(models.Model):
user = models.ForeignKey(User, null=False,
help_text='user who stored the label',
related_name='ul2usl',
on_delete=models.CASCADE)
label = models.TextField(help_text='label to be stored by user')
class Meta:
unique_together = ('user', 'label')
class UserLabels(models.Model):
user = models.OneToOneField(User,
editable=False,
null=True,
related_name='ulabels',
related_query_name='ulabels',
on_delete=models.CASCADE)
)
def clear_resource_labels(self, this_resource):
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
UserResourceLabels.objects.filter(resource=this_resource,
user=self.user).delete()
def remove_resource_label(self, this_label):
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_label, str)
UserResourceLabels.objects.filter(label=this_label, user=self.user)\
.delete()
##########################################
# general flagging of resources
##########################################
def flag_resource(self, this_resource, this_flagcode):
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
with transaction.atomic(): # empirically, get_or_create is not atomic.
UserResourceFlags.objects.get_or_create(resource=this_resource,
kind=this_flagcode,
user=self.user)
def unflag_resource(self, this_resource, this_flagcode):
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
UserResourceFlags.objects.filter(user=self.user,
resource=this_resource,
kind=this_flagcode).delete()
def clear_all_flags(self, this_flagcode):
UserResourceFlags.objects.filter(user=self.user,
kind=this_flagcode)\
.delete()
##########################################
# favorite resources
##########################################
def favorite_resource(self, this_resource):
self.flag_resource(this_resource, FlagCodes.FAVORITE)
def unfavorite_resource(self, this_resource):
self.unflag_resource(this_resource, FlagCodes.FAVORITE)
##########################################
# my resources
##########################################
def claim_resource(self, this_resource):
self.flag_resource(this_resource, FlagCodes.MINE)
def unclaim_resource(self, this_resource):
self.unflag_resource(this_resource, FlagCodes.MINE)
##########################################
# open with app
##########################################
def add_open_with_app(self, this_resource):
self.flag_resource(this_resource, FlagCodes.OPEN_WITH_APP)
def remove_open_with_app(self, this_resource):
self.unflag_resource(this_resource, FlagCodes.OPEN_WITH_APP)
##########################################
# routines that apply to all kinds of annotations
##########################################
def clear_resource_all(self, this_resource):
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_resource, BaseResource)
UserResourceLabels.objects\
.filter(resource=this_resource,
user=self.user)\
.delete()
UserResourceFlags.objects\
.filter(resource=this_resource,
user=self.user)\
.delete()
##########################################
# save unused labels
##########################################
def save_label(self, this_label):
label_string = UserLabels.clean_label(this_label) # remove leading and trailing spaces
with transaction.atomic(): # empirically, get_or_create is not atomic.
UserStoredLabels.objects.get_or_create(label=label_string, user=self.user)
def unsave_label(self, this_label):
# remove leading and trailing spaces
label_string = UserLabels.clean_label(this_label)
UserStoredLabels.objects.filter(label__exact=label_string, user=self.user).delete()
# remove all uses of that label from resources.
self.remove_resource_label(label_string)
def clear_saved_labels(self):
UserStoredLabels.objects.filter(user=self.user).delete()
@property
def saved_labels(self):
return UserStoredLabels.objects.filter(user=self.user).values_list('label', flat=True).distinct()
class ResourceLabels(models.Model):
resource = models.OneToOneField(BaseResource,
editable=False,
null=True,
related_name='rlabels',
related_query_name='rlabels',
on_delete=models.CASCADE)
def get_users(self):
return User.objects.filter(Q(u2url__resource=self.resource) | Q(u2urf__resource=self.resource))
def get_labels(self, this_user):
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_user, User)
labels = UserResourceLabels.objects\
.values_list('label', flat=True)\
.filter(user=this_user,
resource=self.resource)\
.order_by("label").all()
return labels
def is_flagged(self, this_user, this_flagcode):
if __debug__: # during testing only, check argument types and preconditions
assert isinstance(this_user, User)
assert this_flagcode == FlagCodes.FAVORITE or this_flagcode == FlagCodes.MINE or \
this_flagcode == FlagCodes.OPEN_WITH_APP
return UserResourceFlags.objects.filter(user=this_user,
resource=self.resource,
kind=this_flagcode).exists()
def is_favorite(self, this_user):
return self.is_flagged(this_user, FlagCodes.FAVORITE)
def is_mine(self, this_user):
return self.is_flagged(this_user, FlagCodes.MINE)
def is_open_with_app(self, this_user):
return self.is_flagged(this_user, FlagCodes.OPEN_WITH_APP)
| true
| true
|
f709ba30f2ddb928e1e17aeebb2ecdb73dfac7e8
| 5,549
|
py
|
Python
|
official/nlp/bert/run_squad.py
|
gujralsanyam22/models
|
d96f8f043dbe2b5ca8ea1785f57df8faf68d8875
|
[
"Apache-2.0"
] | 2
|
2020-12-11T04:07:55.000Z
|
2020-12-11T04:08:11.000Z
|
official/nlp/bert/run_squad.py
|
gujralsanyam22/models
|
d96f8f043dbe2b5ca8ea1785f57df8faf68d8875
|
[
"Apache-2.0"
] | null | null | null |
official/nlp/bert/run_squad.py
|
gujralsanyam22/models
|
d96f8f043dbe2b5ca8ea1785f57df8faf68d8875
|
[
"Apache-2.0"
] | 3
|
2021-02-22T13:24:07.000Z
|
2021-02-26T02:06:24.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run BERT on SQuAD 1.1 and SQuAD 2.0 in TF 2.x."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import time
# Import libraries
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow as tf
from official.common import distribute_utils
from official.nlp.bert import configs as bert_configs
from official.nlp.bert import run_squad_helper
from official.nlp.bert import tokenization
from official.nlp.data import squad_lib as squad_lib_wp
from official.utils.misc import keras_utils
flags.DEFINE_string('vocab_file', None,
'The vocabulary file that the BERT model was trained on.')
# More flags can be found in run_squad_helper.
run_squad_helper.define_common_squad_flags()
FLAGS = flags.FLAGS
def train_squad(strategy,
input_meta_data,
custom_callbacks=None,
run_eagerly=False,
init_checkpoint=None,
sub_model_export_name=None):
"""Run bert squad training."""
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
init_checkpoint = init_checkpoint or FLAGS.init_checkpoint
run_squad_helper.train_squad(strategy, input_meta_data, bert_config,
custom_callbacks, run_eagerly, init_checkpoint,
sub_model_export_name=sub_model_export_name)
def predict_squad(strategy, input_meta_data):
"""Makes predictions for the squad dataset."""
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
run_squad_helper.predict_squad(
strategy, input_meta_data, tokenizer, bert_config, squad_lib_wp)
def eval_squad(strategy, input_meta_data):
"""Evaluate on the squad dataset."""
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
eval_metrics = run_squad_helper.eval_squad(
strategy, input_meta_data, tokenizer, bert_config, squad_lib_wp)
return eval_metrics
def export_squad(model_export_path, input_meta_data):
"""Exports a trained model as a `SavedModel` for inference.
Args:
model_export_path: a string specifying the path to the SavedModel directory.
input_meta_data: dictionary containing meta data about input and model.
Raises:
Export path is not specified, got an empty string or None.
"""
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
run_squad_helper.export_squad(model_export_path, input_meta_data, bert_config)
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)
with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:
input_meta_data = json.loads(reader.read().decode('utf-8'))
if FLAGS.mode == 'export_only':
export_squad(FLAGS.model_export_path, input_meta_data)
return
# Configures cluster spec for multi-worker distribution strategy.
if FLAGS.num_gpus > 0:
_ = distribute_utils.configure_cluster(FLAGS.worker_hosts, FLAGS.task_index)
strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
all_reduce_alg=FLAGS.all_reduce_alg,
tpu_address=FLAGS.tpu)
if 'train' in FLAGS.mode:
if FLAGS.log_steps:
custom_callbacks = [keras_utils.TimeHistory(
batch_size=FLAGS.train_batch_size,
log_steps=FLAGS.log_steps,
logdir=FLAGS.model_dir,
)]
else:
custom_callbacks = None
train_squad(
strategy,
input_meta_data,
custom_callbacks=custom_callbacks,
run_eagerly=FLAGS.run_eagerly,
sub_model_export_name=FLAGS.sub_model_export_name,
)
if 'predict' in FLAGS.mode:
predict_squad(strategy, input_meta_data)
if 'eval' in FLAGS.mode:
eval_metrics = eval_squad(strategy, input_meta_data)
f1_score = eval_metrics['final_f1']
logging.info('SQuAD eval F1-score: %f', f1_score)
summary_dir = os.path.join(FLAGS.model_dir, 'summaries', 'eval')
summary_writer = tf.summary.create_file_writer(summary_dir)
with summary_writer.as_default():
# TODO(lehou): write to the correct step number.
tf.summary.scalar('F1-score', f1_score, step=0)
summary_writer.flush()
# Also write eval_metrics to json file.
squad_lib_wp.write_to_json_files(
eval_metrics, os.path.join(summary_dir, 'eval_metrics.json'))
time.sleep(60)
if __name__ == '__main__':
flags.mark_flag_as_required('bert_config_file')
flags.mark_flag_as_required('model_dir')
app.run(main)
| 36.267974
| 80
| 0.73707
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import time
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow as tf
from official.common import distribute_utils
from official.nlp.bert import configs as bert_configs
from official.nlp.bert import run_squad_helper
from official.nlp.bert import tokenization
from official.nlp.data import squad_lib as squad_lib_wp
from official.utils.misc import keras_utils
flags.DEFINE_string('vocab_file', None,
'The vocabulary file that the BERT model was trained on.')
run_squad_helper.define_common_squad_flags()
FLAGS = flags.FLAGS
def train_squad(strategy,
input_meta_data,
custom_callbacks=None,
run_eagerly=False,
init_checkpoint=None,
sub_model_export_name=None):
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
init_checkpoint = init_checkpoint or FLAGS.init_checkpoint
run_squad_helper.train_squad(strategy, input_meta_data, bert_config,
custom_callbacks, run_eagerly, init_checkpoint,
sub_model_export_name=sub_model_export_name)
def predict_squad(strategy, input_meta_data):
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
run_squad_helper.predict_squad(
strategy, input_meta_data, tokenizer, bert_config, squad_lib_wp)
def eval_squad(strategy, input_meta_data):
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
eval_metrics = run_squad_helper.eval_squad(
strategy, input_meta_data, tokenizer, bert_config, squad_lib_wp)
return eval_metrics
def export_squad(model_export_path, input_meta_data):
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
run_squad_helper.export_squad(model_export_path, input_meta_data, bert_config)
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)
with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:
input_meta_data = json.loads(reader.read().decode('utf-8'))
if FLAGS.mode == 'export_only':
export_squad(FLAGS.model_export_path, input_meta_data)
return
if FLAGS.num_gpus > 0:
_ = distribute_utils.configure_cluster(FLAGS.worker_hosts, FLAGS.task_index)
strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
all_reduce_alg=FLAGS.all_reduce_alg,
tpu_address=FLAGS.tpu)
if 'train' in FLAGS.mode:
if FLAGS.log_steps:
custom_callbacks = [keras_utils.TimeHistory(
batch_size=FLAGS.train_batch_size,
log_steps=FLAGS.log_steps,
logdir=FLAGS.model_dir,
)]
else:
custom_callbacks = None
train_squad(
strategy,
input_meta_data,
custom_callbacks=custom_callbacks,
run_eagerly=FLAGS.run_eagerly,
sub_model_export_name=FLAGS.sub_model_export_name,
)
if 'predict' in FLAGS.mode:
predict_squad(strategy, input_meta_data)
if 'eval' in FLAGS.mode:
eval_metrics = eval_squad(strategy, input_meta_data)
f1_score = eval_metrics['final_f1']
logging.info('SQuAD eval F1-score: %f', f1_score)
summary_dir = os.path.join(FLAGS.model_dir, 'summaries', 'eval')
summary_writer = tf.summary.create_file_writer(summary_dir)
with summary_writer.as_default():
tf.summary.scalar('F1-score', f1_score, step=0)
summary_writer.flush()
squad_lib_wp.write_to_json_files(
eval_metrics, os.path.join(summary_dir, 'eval_metrics.json'))
time.sleep(60)
if __name__ == '__main__':
flags.mark_flag_as_required('bert_config_file')
flags.mark_flag_as_required('model_dir')
app.run(main)
| true
| true
|
f709ba80071c7ed33b34ab298cb31c2a7898803c
| 2,292
|
py
|
Python
|
mininet_topology/Topo10hosts&router/Topo10.py
|
medic0803/Ginkgo-RnD-Project-Floodlight
|
4cac7a7152ec49be93a6e42dcb3c3bf614546e9a
|
[
"Apache-2.0"
] | 3
|
2021-01-11T11:08:09.000Z
|
2021-03-28T01:02:50.000Z
|
mininet_topology/Topo10hosts&router/Topo10.py
|
medic0803/Ginkgo-RnD-Project-Floodlight
|
4cac7a7152ec49be93a6e42dcb3c3bf614546e9a
|
[
"Apache-2.0"
] | 10
|
2020-12-25T10:30:25.000Z
|
2021-05-17T13:09:32.000Z
|
mininet_topology/Topo10hosts&router/Topo10.py
|
medic0803/Ginkgo-RnD-Project-Floodlight
|
4cac7a7152ec49be93a6e42dcb3c3bf614546e9a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
from mininet.net import Mininet
from mininet.node import Controller, RemoteController, OVSController
from mininet.node import CPULimitedHost, Host, Node
from mininet.node import OVSKernelSwitch, UserSwitch
from mininet.node import IVSSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import TCLink, Intf
from subprocess import call
from mininet.node import OVSKernelSwitch, UserSwitch
def myNetwork():
net = Mininet( topo=None,
build=False,
ipBase='10.0.0.0/8',controller=RemoteController,host=CPULimitedHost,link=TCLink,switch=UserSwitch)
info( '*** Adding controller\n' )
net.addController('c0',controller=RemoteController,ip='192.168.56.1',port=6653)
info( '*** Add routers\n')
r1 = net.addHost('r1', cls=Node, ip='0.0.0.0')
info( '*** Add switches\n')
s1 =net.addSwitch('s1')
s2 =net.addSwitch('s2')
## switch = net.switches[ 0 ]
info( '*** Add hosts\n')
h1 = net.addHost('h1', cls=Host, ip='192.168.11.1/24', defaultRoute=None)
h2 = net.addHost('h2', cls=Host, ip='192.168.12.1/24', defaultRoute=None)
info( '*** Add links\n')
net.addLink(r1, s1, cls=TCLink )
net.addLink(s1, r1, cls=TCLink )
net.addLink(s2, r1, cls=TCLink )
net.addLink(r1, s2, cls=TCLink )
net.addLink(h1, s1, cls=TCLink )
net.addLink(h2, s2, cls=TCLink )
## net.addLink(r1, h1, cls=TCLink )
## net.addLink(h2, r1, cls=TCLink )
info( '*** Starting network\n')
net.build()
info( '*** Starting controllers\n')
for controller in net.controllers:
controller.start()
info( '*** Starting switches\n')
info( '*** Post configure switches and hosts\n')
r1.cmd('ifconfig r1-eth0 192.168.11.2 netmask 255.255.255.0')
r1.cmd('ifconfig r1-eth1 192.168.12.2 netmask 255.255.255.0')
## r1.cmd('ifconfig r1-eth3 10.0.2.225 netmask 255.255.255.0')
h1.cmd('route add default gw 192.168.11.2')
h2.cmd('route add default gw 192.168.12.2')
## r1.cmd('route add default gw 192.168.56.1')
r1.cmd('sysctl net.ipv4.ip_forward=1')
CLI(net)
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
myNetwork()
| 27.614458
| 117
| 0.637871
|
from mininet.net import Mininet
from mininet.node import Controller, RemoteController, OVSController
from mininet.node import CPULimitedHost, Host, Node
from mininet.node import OVSKernelSwitch, UserSwitch
from mininet.node import IVSSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import TCLink, Intf
from subprocess import call
from mininet.node import OVSKernelSwitch, UserSwitch
def myNetwork():
net = Mininet( topo=None,
build=False,
ipBase='10.0.0.0/8',controller=RemoteController,host=CPULimitedHost,link=TCLink,switch=UserSwitch)
info( '*** Adding controller\n' )
net.addController('c0',controller=RemoteController,ip='192.168.56.1',port=6653)
info( '*** Add routers\n')
r1 = net.addHost('r1', cls=Node, ip='0.0.0.0')
info( '*** Add switches\n')
s1 =net.addSwitch('s1')
s2 =net.addSwitch('s2')
h1 = net.addHost('h1', cls=Host, ip='192.168.11.1/24', defaultRoute=None)
h2 = net.addHost('h2', cls=Host, ip='192.168.12.1/24', defaultRoute=None)
info( '*** Add links\n')
net.addLink(r1, s1, cls=TCLink )
net.addLink(s1, r1, cls=TCLink )
net.addLink(s2, r1, cls=TCLink )
net.addLink(r1, s2, cls=TCLink )
net.addLink(h1, s1, cls=TCLink )
net.addLink(h2, s2, cls=TCLink )
Starting controllers\n')
for controller in net.controllers:
controller.start()
info( '*** Starting switches\n')
info( '*** Post configure switches and hosts\n')
r1.cmd('ifconfig r1-eth0 192.168.11.2 netmask 255.255.255.0')
r1.cmd('ifconfig r1-eth1 192.168.12.2 netmask 255.255.255.0')
h2.cmd('route add default gw 192.168.12.2')
CLI(net)
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
myNetwork()
| true
| true
|
f709bae64888c279d680e86fbedd96c227f6f4d8
| 9,085
|
py
|
Python
|
plaidrl/torch/smac/launcher.py
|
charliec443/plaid-rl
|
2e8fbf389af9efecd41361df80e40e0bf932056d
|
[
"MIT"
] | null | null | null |
plaidrl/torch/smac/launcher.py
|
charliec443/plaid-rl
|
2e8fbf389af9efecd41361df80e40e0bf932056d
|
[
"MIT"
] | null | null | null |
plaidrl/torch/smac/launcher.py
|
charliec443/plaid-rl
|
2e8fbf389af9efecd41361df80e40e0bf932056d
|
[
"MIT"
] | null | null | null |
import pickle
import plaidrl.torch.pytorch_util as ptu
from plaidrl.core import logger
from plaidrl.core.meta_rl_algorithm import MetaRLAlgorithm
from plaidrl.core.simple_offline_rl_algorithm import OfflineMetaRLAlgorithm
from plaidrl.data_management.env_replay_buffer import EnvReplayBuffer
from plaidrl.demos.source.mdp_path_loader import MDPPathLoader
from plaidrl.envs.pearl_envs import ENVS, register_pearl_envs
from plaidrl.envs.wrappers import NormalizedBoxEnv
from plaidrl.torch.networks import ConcatMlp
from plaidrl.torch.smac.agent import SmacAgent
from plaidrl.torch.smac.diagnostics import get_env_info_sizes
from plaidrl.torch.smac.launcher_util import (
EvalPearl,
load_buffer_onto_algo,
load_macaw_buffer_onto_algo,
policy_class_from_str,
relabel_offline_data,
)
from plaidrl.torch.smac.networks import DummyMlpEncoder, MlpDecoder, MlpEncoder
from plaidrl.torch.smac.smac import SmacTrainer
from plaidrl.util.io import load_local_or_remote_file
def smac_experiment(
trainer_kwargs=None,
algo_kwargs=None,
qf_kwargs=None,
policy_kwargs=None,
context_encoder_kwargs=None,
context_decoder_kwargs=None,
env_name=None,
env_params=None,
path_loader_kwargs=None,
latent_dim=None,
policy_class="TanhGaussianPolicy",
# video/debug
debug=False,
use_dummy_encoder=False,
networks_ignore_context=False,
use_ground_truth_context=False,
save_video=False,
save_video_period=False,
# Pre-train params
pretrain_rl=False,
pretrain_offline_algo_kwargs=None,
pretrain_buffer_kwargs=None,
load_buffer_kwargs=None,
saved_tasks_path=None,
macaw_format_base_path=None, # overrides saved_tasks_path and load_buffer_kwargs
load_macaw_buffer_kwargs=None,
train_task_idxs=None,
eval_task_idxs=None,
relabel_offline_dataset=False,
skip_initial_data_collection_if_pretrained=False,
relabel_kwargs=None,
# PEARL
n_train_tasks=0,
n_eval_tasks=0,
use_next_obs_in_context=False,
tags=None,
online_trainer_kwargs=None,
):
if not skip_initial_data_collection_if_pretrained:
raise NotImplementedError("deprecated! make sure to skip it!")
if relabel_kwargs is None:
relabel_kwargs = {}
del tags
pretrain_buffer_kwargs = pretrain_buffer_kwargs or {}
context_decoder_kwargs = context_decoder_kwargs or {}
pretrain_offline_algo_kwargs = pretrain_offline_algo_kwargs or {}
online_trainer_kwargs = online_trainer_kwargs or {}
register_pearl_envs()
env_params = env_params or {}
context_encoder_kwargs = context_encoder_kwargs or {}
trainer_kwargs = trainer_kwargs or {}
path_loader_kwargs = path_loader_kwargs or {}
load_macaw_buffer_kwargs = load_macaw_buffer_kwargs or {}
base_env = ENVS[env_name](**env_params)
if saved_tasks_path:
task_data = load_local_or_remote_file(saved_tasks_path, file_type="joblib")
tasks = task_data["tasks"]
train_task_idxs = task_data["train_task_indices"]
eval_task_idxs = task_data["eval_task_indices"]
base_env.tasks = tasks
elif macaw_format_base_path is not None:
tasks = pickle.load(open("{}/tasks.pkl".format(macaw_format_base_path), "rb"))
base_env.tasks = tasks
else:
tasks = base_env.tasks
task_indices = base_env.get_all_task_idx()
train_task_idxs = list(task_indices[:n_train_tasks])
eval_task_idxs = list(task_indices[-n_eval_tasks:])
if hasattr(base_env, "task_to_vec"):
train_tasks = [base_env.task_to_vec(tasks[i]) for i in train_task_idxs]
eval_tasks = [base_env.task_to_vec(tasks[i]) for i in eval_task_idxs]
else:
train_tasks = [tasks[i] for i in train_task_idxs]
eval_tasks = [tasks[i] for i in eval_task_idxs]
if use_ground_truth_context:
latent_dim = len(train_tasks[0])
expl_env = NormalizedBoxEnv(base_env)
reward_dim = 1
if debug:
algo_kwargs["max_path_length"] = 50
algo_kwargs["batch_size"] = 5
algo_kwargs["num_epochs"] = 5
algo_kwargs["num_eval_steps_per_epoch"] = 100
algo_kwargs["num_expl_steps_per_train_loop"] = 100
algo_kwargs["num_trains_per_train_loop"] = 10
algo_kwargs["min_num_steps_before_training"] = 100
obs_dim = expl_env.observation_space.low.size
action_dim = expl_env.action_space.low.size
if use_next_obs_in_context:
context_encoder_input_dim = 2 * obs_dim + action_dim + reward_dim
else:
context_encoder_input_dim = obs_dim + action_dim + reward_dim
context_encoder_output_dim = latent_dim * 2
def create_qf():
return ConcatMlp(
input_size=obs_dim + action_dim + latent_dim, output_size=1, **qf_kwargs
)
qf1 = create_qf()
qf2 = create_qf()
target_qf1 = create_qf()
target_qf2 = create_qf()
if isinstance(policy_class, str):
policy_class = policy_class_from_str(policy_class)
policy = policy_class(
obs_dim=obs_dim + latent_dim,
action_dim=action_dim,
**policy_kwargs,
)
encoder_class = DummyMlpEncoder if use_dummy_encoder else MlpEncoder
context_encoder = encoder_class(
input_size=context_encoder_input_dim,
output_size=context_encoder_output_dim,
hidden_sizes=[200, 200, 200],
use_ground_truth_context=use_ground_truth_context,
**context_encoder_kwargs,
)
context_decoder = MlpDecoder(
input_size=obs_dim + action_dim + latent_dim,
output_size=1,
**context_decoder_kwargs,
)
reward_predictor = context_decoder
agent = SmacAgent(
latent_dim,
context_encoder,
policy,
reward_predictor,
use_next_obs_in_context=use_next_obs_in_context,
_debug_ignore_context=networks_ignore_context,
_debug_use_ground_truth_context=use_ground_truth_context,
)
trainer = SmacTrainer(
agent=agent,
env=expl_env,
latent_dim=latent_dim,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
reward_predictor=reward_predictor,
context_encoder=context_encoder,
context_decoder=context_decoder,
_debug_ignore_context=networks_ignore_context,
_debug_use_ground_truth_context=use_ground_truth_context,
**trainer_kwargs,
)
algorithm = MetaRLAlgorithm(
agent=agent,
env=expl_env,
trainer=trainer,
train_task_indices=train_task_idxs,
eval_task_indices=eval_task_idxs,
train_tasks=train_tasks,
eval_tasks=eval_tasks,
use_next_obs_in_context=use_next_obs_in_context,
use_ground_truth_context=use_ground_truth_context,
env_info_sizes=get_env_info_sizes(expl_env),
**algo_kwargs,
)
if macaw_format_base_path:
load_macaw_buffer_onto_algo(
algo=algorithm,
base_directory=macaw_format_base_path,
train_task_idxs=train_task_idxs,
**load_macaw_buffer_kwargs,
)
elif load_buffer_kwargs:
load_buffer_onto_algo(algorithm, **load_buffer_kwargs)
if relabel_offline_dataset:
relabel_offline_data(
algorithm, tasks=tasks, env=expl_env.wrapped_env, **relabel_kwargs
)
if path_loader_kwargs:
replay_buffer = algorithm.replay_buffer.task_buffers[0]
enc_replay_buffer = algorithm.enc_replay_buffer.task_buffers[0]
demo_test_buffer = EnvReplayBuffer(env=expl_env, **pretrain_buffer_kwargs)
path_loader = MDPPathLoader(
trainer,
replay_buffer=replay_buffer,
demo_train_buffer=enc_replay_buffer,
demo_test_buffer=demo_test_buffer,
**path_loader_kwargs,
)
path_loader.load_demos()
if pretrain_rl:
eval_pearl_fn = EvalPearl(algorithm, train_task_idxs, eval_task_idxs)
pretrain_algo = OfflineMetaRLAlgorithm(
meta_replay_buffer=algorithm.meta_replay_buffer,
replay_buffer=algorithm.replay_buffer,
task_embedding_replay_buffer=algorithm.enc_replay_buffer,
trainer=trainer,
train_tasks=train_task_idxs,
extra_eval_fns=[eval_pearl_fn],
use_meta_learning_buffer=algorithm.use_meta_learning_buffer,
**pretrain_offline_algo_kwargs,
)
pretrain_algo.to(ptu.device)
logger.remove_tabular_output("progress.csv", relative_to_snapshot_dir=True)
logger.add_tabular_output("pretrain.csv", relative_to_snapshot_dir=True)
pretrain_algo.train()
logger.remove_tabular_output("pretrain.csv", relative_to_snapshot_dir=True)
logger.add_tabular_output(
"progress.csv",
relative_to_snapshot_dir=True,
)
if skip_initial_data_collection_if_pretrained:
algorithm.num_initial_steps = 0
algorithm.trainer.configure(**online_trainer_kwargs)
algorithm.to(ptu.device)
algorithm.train()
| 36.633065
| 86
| 0.716346
|
import pickle
import plaidrl.torch.pytorch_util as ptu
from plaidrl.core import logger
from plaidrl.core.meta_rl_algorithm import MetaRLAlgorithm
from plaidrl.core.simple_offline_rl_algorithm import OfflineMetaRLAlgorithm
from plaidrl.data_management.env_replay_buffer import EnvReplayBuffer
from plaidrl.demos.source.mdp_path_loader import MDPPathLoader
from plaidrl.envs.pearl_envs import ENVS, register_pearl_envs
from plaidrl.envs.wrappers import NormalizedBoxEnv
from plaidrl.torch.networks import ConcatMlp
from plaidrl.torch.smac.agent import SmacAgent
from plaidrl.torch.smac.diagnostics import get_env_info_sizes
from plaidrl.torch.smac.launcher_util import (
EvalPearl,
load_buffer_onto_algo,
load_macaw_buffer_onto_algo,
policy_class_from_str,
relabel_offline_data,
)
from plaidrl.torch.smac.networks import DummyMlpEncoder, MlpDecoder, MlpEncoder
from plaidrl.torch.smac.smac import SmacTrainer
from plaidrl.util.io import load_local_or_remote_file
def smac_experiment(
trainer_kwargs=None,
algo_kwargs=None,
qf_kwargs=None,
policy_kwargs=None,
context_encoder_kwargs=None,
context_decoder_kwargs=None,
env_name=None,
env_params=None,
path_loader_kwargs=None,
latent_dim=None,
policy_class="TanhGaussianPolicy",
debug=False,
use_dummy_encoder=False,
networks_ignore_context=False,
use_ground_truth_context=False,
save_video=False,
save_video_period=False,
pretrain_rl=False,
pretrain_offline_algo_kwargs=None,
pretrain_buffer_kwargs=None,
load_buffer_kwargs=None,
saved_tasks_path=None,
macaw_format_base_path=None,
load_macaw_buffer_kwargs=None,
train_task_idxs=None,
eval_task_idxs=None,
relabel_offline_dataset=False,
skip_initial_data_collection_if_pretrained=False,
relabel_kwargs=None,
n_train_tasks=0,
n_eval_tasks=0,
use_next_obs_in_context=False,
tags=None,
online_trainer_kwargs=None,
):
if not skip_initial_data_collection_if_pretrained:
raise NotImplementedError("deprecated! make sure to skip it!")
if relabel_kwargs is None:
relabel_kwargs = {}
del tags
pretrain_buffer_kwargs = pretrain_buffer_kwargs or {}
context_decoder_kwargs = context_decoder_kwargs or {}
pretrain_offline_algo_kwargs = pretrain_offline_algo_kwargs or {}
online_trainer_kwargs = online_trainer_kwargs or {}
register_pearl_envs()
env_params = env_params or {}
context_encoder_kwargs = context_encoder_kwargs or {}
trainer_kwargs = trainer_kwargs or {}
path_loader_kwargs = path_loader_kwargs or {}
load_macaw_buffer_kwargs = load_macaw_buffer_kwargs or {}
base_env = ENVS[env_name](**env_params)
if saved_tasks_path:
task_data = load_local_or_remote_file(saved_tasks_path, file_type="joblib")
tasks = task_data["tasks"]
train_task_idxs = task_data["train_task_indices"]
eval_task_idxs = task_data["eval_task_indices"]
base_env.tasks = tasks
elif macaw_format_base_path is not None:
tasks = pickle.load(open("{}/tasks.pkl".format(macaw_format_base_path), "rb"))
base_env.tasks = tasks
else:
tasks = base_env.tasks
task_indices = base_env.get_all_task_idx()
train_task_idxs = list(task_indices[:n_train_tasks])
eval_task_idxs = list(task_indices[-n_eval_tasks:])
if hasattr(base_env, "task_to_vec"):
train_tasks = [base_env.task_to_vec(tasks[i]) for i in train_task_idxs]
eval_tasks = [base_env.task_to_vec(tasks[i]) for i in eval_task_idxs]
else:
train_tasks = [tasks[i] for i in train_task_idxs]
eval_tasks = [tasks[i] for i in eval_task_idxs]
if use_ground_truth_context:
latent_dim = len(train_tasks[0])
expl_env = NormalizedBoxEnv(base_env)
reward_dim = 1
if debug:
algo_kwargs["max_path_length"] = 50
algo_kwargs["batch_size"] = 5
algo_kwargs["num_epochs"] = 5
algo_kwargs["num_eval_steps_per_epoch"] = 100
algo_kwargs["num_expl_steps_per_train_loop"] = 100
algo_kwargs["num_trains_per_train_loop"] = 10
algo_kwargs["min_num_steps_before_training"] = 100
obs_dim = expl_env.observation_space.low.size
action_dim = expl_env.action_space.low.size
if use_next_obs_in_context:
context_encoder_input_dim = 2 * obs_dim + action_dim + reward_dim
else:
context_encoder_input_dim = obs_dim + action_dim + reward_dim
context_encoder_output_dim = latent_dim * 2
def create_qf():
return ConcatMlp(
input_size=obs_dim + action_dim + latent_dim, output_size=1, **qf_kwargs
)
qf1 = create_qf()
qf2 = create_qf()
target_qf1 = create_qf()
target_qf2 = create_qf()
if isinstance(policy_class, str):
policy_class = policy_class_from_str(policy_class)
policy = policy_class(
obs_dim=obs_dim + latent_dim,
action_dim=action_dim,
**policy_kwargs,
)
encoder_class = DummyMlpEncoder if use_dummy_encoder else MlpEncoder
context_encoder = encoder_class(
input_size=context_encoder_input_dim,
output_size=context_encoder_output_dim,
hidden_sizes=[200, 200, 200],
use_ground_truth_context=use_ground_truth_context,
**context_encoder_kwargs,
)
context_decoder = MlpDecoder(
input_size=obs_dim + action_dim + latent_dim,
output_size=1,
**context_decoder_kwargs,
)
reward_predictor = context_decoder
agent = SmacAgent(
latent_dim,
context_encoder,
policy,
reward_predictor,
use_next_obs_in_context=use_next_obs_in_context,
_debug_ignore_context=networks_ignore_context,
_debug_use_ground_truth_context=use_ground_truth_context,
)
trainer = SmacTrainer(
agent=agent,
env=expl_env,
latent_dim=latent_dim,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
reward_predictor=reward_predictor,
context_encoder=context_encoder,
context_decoder=context_decoder,
_debug_ignore_context=networks_ignore_context,
_debug_use_ground_truth_context=use_ground_truth_context,
**trainer_kwargs,
)
algorithm = MetaRLAlgorithm(
agent=agent,
env=expl_env,
trainer=trainer,
train_task_indices=train_task_idxs,
eval_task_indices=eval_task_idxs,
train_tasks=train_tasks,
eval_tasks=eval_tasks,
use_next_obs_in_context=use_next_obs_in_context,
use_ground_truth_context=use_ground_truth_context,
env_info_sizes=get_env_info_sizes(expl_env),
**algo_kwargs,
)
if macaw_format_base_path:
load_macaw_buffer_onto_algo(
algo=algorithm,
base_directory=macaw_format_base_path,
train_task_idxs=train_task_idxs,
**load_macaw_buffer_kwargs,
)
elif load_buffer_kwargs:
load_buffer_onto_algo(algorithm, **load_buffer_kwargs)
if relabel_offline_dataset:
relabel_offline_data(
algorithm, tasks=tasks, env=expl_env.wrapped_env, **relabel_kwargs
)
if path_loader_kwargs:
replay_buffer = algorithm.replay_buffer.task_buffers[0]
enc_replay_buffer = algorithm.enc_replay_buffer.task_buffers[0]
demo_test_buffer = EnvReplayBuffer(env=expl_env, **pretrain_buffer_kwargs)
path_loader = MDPPathLoader(
trainer,
replay_buffer=replay_buffer,
demo_train_buffer=enc_replay_buffer,
demo_test_buffer=demo_test_buffer,
**path_loader_kwargs,
)
path_loader.load_demos()
if pretrain_rl:
eval_pearl_fn = EvalPearl(algorithm, train_task_idxs, eval_task_idxs)
pretrain_algo = OfflineMetaRLAlgorithm(
meta_replay_buffer=algorithm.meta_replay_buffer,
replay_buffer=algorithm.replay_buffer,
task_embedding_replay_buffer=algorithm.enc_replay_buffer,
trainer=trainer,
train_tasks=train_task_idxs,
extra_eval_fns=[eval_pearl_fn],
use_meta_learning_buffer=algorithm.use_meta_learning_buffer,
**pretrain_offline_algo_kwargs,
)
pretrain_algo.to(ptu.device)
logger.remove_tabular_output("progress.csv", relative_to_snapshot_dir=True)
logger.add_tabular_output("pretrain.csv", relative_to_snapshot_dir=True)
pretrain_algo.train()
logger.remove_tabular_output("pretrain.csv", relative_to_snapshot_dir=True)
logger.add_tabular_output(
"progress.csv",
relative_to_snapshot_dir=True,
)
if skip_initial_data_collection_if_pretrained:
algorithm.num_initial_steps = 0
algorithm.trainer.configure(**online_trainer_kwargs)
algorithm.to(ptu.device)
algorithm.train()
| true
| true
|
f709bb26c60915265cade2b384fdde8847a123c3
| 1,070
|
py
|
Python
|
messenger/migrations/0001_initial.py
|
lucida-no/hdo-quiz-service
|
32e03165e8d495f1290edd2b96cc1cba415f9799
|
[
"BSD-3-Clause"
] | null | null | null |
messenger/migrations/0001_initial.py
|
lucida-no/hdo-quiz-service
|
32e03165e8d495f1290edd2b96cc1cba415f9799
|
[
"BSD-3-Clause"
] | 13
|
2017-01-01T23:23:29.000Z
|
2017-05-27T11:15:38.000Z
|
messenger/migrations/0001_initial.py
|
lucida-no/hdo-messenger-backend
|
32e03165e8d495f1290edd2b96cc1cba415f9799
|
[
"BSD-3-Clause"
] | 1
|
2017-01-01T16:32:30.000Z
|
2017-01-01T16:32:30.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-03-05 13:47
from __future__ import unicode_literals
from django.db import migrations, models
import jsonfield.fields
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ChatSession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', models.CharField(choices=[('in_progress', 'In progress'), ('complete', 'Complete')], default='in_progress', max_length=100)),
('uuid', models.UUIDField(default=uuid.uuid4, unique=True)),
('user_id', models.CharField(db_index=True, max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, db_index=True)),
('meta', jsonfield.fields.JSONField(blank=True, default=dict)),
],
),
]
| 34.516129
| 151
| 0.614019
|
from __future__ import unicode_literals
from django.db import migrations, models
import jsonfield.fields
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ChatSession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', models.CharField(choices=[('in_progress', 'In progress'), ('complete', 'Complete')], default='in_progress', max_length=100)),
('uuid', models.UUIDField(default=uuid.uuid4, unique=True)),
('user_id', models.CharField(db_index=True, max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, db_index=True)),
('meta', jsonfield.fields.JSONField(blank=True, default=dict)),
],
),
]
| true
| true
|
f709bc9106901d8180f4e8a8ab96e5dcd594d6f5
| 7,740
|
py
|
Python
|
community/migrations/0001_squashed_0004_auto_20170831_0541.py
|
ewjoachim/pythondotorg
|
382741cc6208fc56aa827cdd1da41983fb7e6ba8
|
[
"Apache-2.0"
] | 911
|
2015-01-03T22:16:06.000Z
|
2022-03-31T23:56:22.000Z
|
community/migrations/0001_squashed_0004_auto_20170831_0541.py
|
ewjoachim/pythondotorg
|
382741cc6208fc56aa827cdd1da41983fb7e6ba8
|
[
"Apache-2.0"
] | 1,342
|
2015-01-02T16:14:45.000Z
|
2022-03-28T08:01:20.000Z
|
community/migrations/0001_squashed_0004_auto_20170831_0541.py
|
ewjoachim/pythondotorg
|
382741cc6208fc56aa827cdd1da41983fb7e6ba8
|
[
"Apache-2.0"
] | 551
|
2015-01-04T02:17:31.000Z
|
2022-03-23T11:59:25.000Z
|
# Generated by Django 1.9.13 on 2017-08-31 05:44
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import markupfield.fields
class Migration(migrations.Migration):
replaces = [('community', '0001_initial'), ('community', '0002_auto_20150416_1853'), ('community', '0003_auto_20170831_0358'), ('community', '0004_auto_20170831_0541')]
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Link',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, db_index=True, default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now, blank=True)),
('url', models.URLField(blank=True, max_length=1000, verbose_name='URL')),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_link_creator', to=settings.AUTH_USER_MODEL)),
('last_modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_link_modified', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Links',
'ordering': ['-created'],
'verbose_name': 'Link',
'get_latest_by': 'created',
},
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, db_index=True, default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now, blank=True)),
('image', models.ImageField(blank=True, upload_to='community/photos/')),
('image_url', models.URLField(blank=True, max_length=1000, verbose_name='Image URL')),
('caption', models.TextField(blank=True)),
('click_through_url', models.URLField(blank=True)),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_photo_creator', to=settings.AUTH_USER_MODEL)),
('last_modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_photo_modified', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'photos',
'ordering': ['-created'],
'verbose_name': 'photo',
'get_latest_by': 'created',
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, db_index=True, default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now, blank=True)),
('title', models.CharField(blank=True, max_length=200, null=True)),
('content', markupfield.fields.MarkupField(rendered_field=True)),
('abstract', models.TextField(blank=True, null=True)),
('content_markup_type', models.CharField(choices=[('', '--'), ('html', 'html'), ('plain', 'plain'), ('markdown', 'markdown'), ('restructuredtext', 'restructuredtext')], default='html', max_length=30)),
('_content_rendered', models.TextField(editable=False)),
('media_type', models.IntegerField(choices=[(1, 'text'), (2, 'photo'), (3, 'video'), (4, 'link')], default=1)),
('source_url', models.URLField(blank=True, max_length=1000)),
('meta', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={})),
('status', models.IntegerField(choices=[(1, 'private'), (2, 'public')], db_index=True, default=1)),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_post_creator', to=settings.AUTH_USER_MODEL)),
('last_modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_post_modified', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'posts',
'ordering': ['-created'],
'verbose_name': 'post',
'get_latest_by': 'created',
},
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, db_index=True, default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now, blank=True)),
('video_embed', models.TextField(blank=True)),
('video_data', models.FileField(blank=True, upload_to='community/videos/')),
('caption', models.TextField(blank=True)),
('click_through_url', models.URLField(blank=True, verbose_name='Click Through URL')),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_video_creator', to=settings.AUTH_USER_MODEL)),
('last_modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_video_modified', to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='related_video', to='community.Post')),
],
options={
'verbose_name_plural': 'videos',
'ordering': ['-created'],
'verbose_name': 'video',
'get_latest_by': 'created',
},
),
migrations.AddField(
model_name='photo',
name='post',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='related_photo', to='community.Post'),
),
migrations.AddField(
model_name='link',
name='post',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='related_link', to='community.Post'),
),
migrations.AlterField(
model_name='post',
name='content_markup_type',
field=models.CharField(choices=[('', '--'), ('html', 'HTML'), ('plain', 'Plain'), ('markdown', 'Markdown'), ('restructuredtext', 'Restructured Text')], default='html', max_length=30),
),
migrations.AlterField(
model_name='post',
name='meta',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}),
),
migrations.AlterField(
model_name='post',
name='meta',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict),
),
]
| 59.083969
| 217
| 0.613824
|
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import markupfield.fields
class Migration(migrations.Migration):
replaces = [('community', '0001_initial'), ('community', '0002_auto_20150416_1853'), ('community', '0003_auto_20170831_0358'), ('community', '0004_auto_20170831_0541')]
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Link',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, db_index=True, default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now, blank=True)),
('url', models.URLField(blank=True, max_length=1000, verbose_name='URL')),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_link_creator', to=settings.AUTH_USER_MODEL)),
('last_modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_link_modified', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Links',
'ordering': ['-created'],
'verbose_name': 'Link',
'get_latest_by': 'created',
},
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, db_index=True, default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now, blank=True)),
('image', models.ImageField(blank=True, upload_to='community/photos/')),
('image_url', models.URLField(blank=True, max_length=1000, verbose_name='Image URL')),
('caption', models.TextField(blank=True)),
('click_through_url', models.URLField(blank=True)),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_photo_creator', to=settings.AUTH_USER_MODEL)),
('last_modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_photo_modified', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'photos',
'ordering': ['-created'],
'verbose_name': 'photo',
'get_latest_by': 'created',
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, db_index=True, default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now, blank=True)),
('title', models.CharField(blank=True, max_length=200, null=True)),
('content', markupfield.fields.MarkupField(rendered_field=True)),
('abstract', models.TextField(blank=True, null=True)),
('content_markup_type', models.CharField(choices=[('', '--'), ('html', 'html'), ('plain', 'plain'), ('markdown', 'markdown'), ('restructuredtext', 'restructuredtext')], default='html', max_length=30)),
('_content_rendered', models.TextField(editable=False)),
('media_type', models.IntegerField(choices=[(1, 'text'), (2, 'photo'), (3, 'video'), (4, 'link')], default=1)),
('source_url', models.URLField(blank=True, max_length=1000)),
('meta', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={})),
('status', models.IntegerField(choices=[(1, 'private'), (2, 'public')], db_index=True, default=1)),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_post_creator', to=settings.AUTH_USER_MODEL)),
('last_modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_post_modified', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'posts',
'ordering': ['-created'],
'verbose_name': 'post',
'get_latest_by': 'created',
},
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, db_index=True, default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now, blank=True)),
('video_embed', models.TextField(blank=True)),
('video_data', models.FileField(blank=True, upload_to='community/videos/')),
('caption', models.TextField(blank=True)),
('click_through_url', models.URLField(blank=True, verbose_name='Click Through URL')),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_video_creator', to=settings.AUTH_USER_MODEL)),
('last_modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_video_modified', to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='related_video', to='community.Post')),
],
options={
'verbose_name_plural': 'videos',
'ordering': ['-created'],
'verbose_name': 'video',
'get_latest_by': 'created',
},
),
migrations.AddField(
model_name='photo',
name='post',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='related_photo', to='community.Post'),
),
migrations.AddField(
model_name='link',
name='post',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='related_link', to='community.Post'),
),
migrations.AlterField(
model_name='post',
name='content_markup_type',
field=models.CharField(choices=[('', '--'), ('html', 'HTML'), ('plain', 'Plain'), ('markdown', 'Markdown'), ('restructuredtext', 'Restructured Text')], default='html', max_length=30),
),
migrations.AlterField(
model_name='post',
name='meta',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}),
),
migrations.AlterField(
model_name='post',
name='meta',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict),
),
]
| true
| true
|
f709bea1e2a5cc19ce8c1b6caa6b04ad1d2ec215
| 3,976
|
py
|
Python
|
test/unit/test_crypto_encryption_decryptor.py
|
farleyb-amazon/aws-encryption-sdk-python
|
7950abd73ee333407d2dadd02ef2d57c3df464cf
|
[
"Apache-2.0"
] | 95
|
2018-08-20T23:10:00.000Z
|
2022-02-17T02:54:32.000Z
|
test/unit/test_crypto_encryption_decryptor.py
|
farleyb-amazon/aws-encryption-sdk-python
|
7950abd73ee333407d2dadd02ef2d57c3df464cf
|
[
"Apache-2.0"
] | 220
|
2018-08-01T20:56:29.000Z
|
2022-03-28T18:12:35.000Z
|
test/unit/test_crypto_encryption_decryptor.py
|
farleyb-amazon/aws-encryption-sdk-python
|
7950abd73ee333407d2dadd02ef2d57c3df464cf
|
[
"Apache-2.0"
] | 63
|
2018-08-01T19:37:33.000Z
|
2022-03-20T17:14:15.000Z
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Unit test suite for ``aws_encryption_sdk.internal.crypto.encryption.Decryptor``."""
import pytest
from mock import MagicMock, sentinel
from pytest_mock import mocker # noqa pylint: disable=unused-import
import aws_encryption_sdk.internal.crypto.encryption
from aws_encryption_sdk.internal.crypto.encryption import Decryptor, decrypt
pytestmark = [pytest.mark.unit, pytest.mark.local]
@pytest.fixture
def patch_default_backend(mocker):
mocker.patch.object(aws_encryption_sdk.internal.crypto.encryption, "default_backend")
yield aws_encryption_sdk.internal.crypto.encryption.default_backend
@pytest.fixture
def patch_cipher(mocker):
mocker.patch.object(aws_encryption_sdk.internal.crypto.encryption, "Cipher")
yield aws_encryption_sdk.internal.crypto.encryption.Cipher
@pytest.fixture
def patch_decryptor(mocker):
mocker.patch.object(aws_encryption_sdk.internal.crypto.encryption, "Decryptor")
yield aws_encryption_sdk.internal.crypto.encryption.Decryptor
def test_decryptor_init(patch_default_backend, patch_cipher):
mock_algorithm = MagicMock()
tester = Decryptor(
algorithm=mock_algorithm, key=sentinel.key, associated_data=sentinel.aad, iv=sentinel.iv, tag=sentinel.tag
)
assert tester.source_key is sentinel.key
mock_algorithm.encryption_algorithm.assert_called_once_with(sentinel.key)
mock_algorithm.encryption_mode.assert_called_once_with(sentinel.iv, sentinel.tag)
patch_default_backend.assert_called_once_with()
patch_cipher.assert_called_once_with(
mock_algorithm.encryption_algorithm.return_value,
mock_algorithm.encryption_mode.return_value,
backend=patch_default_backend.return_value,
)
patch_cipher.return_value.decryptor.assert_called_once_with()
assert tester._decryptor is patch_cipher.return_value.decryptor.return_value
tester._decryptor.authenticate_additional_data.assert_called_once_with(sentinel.aad)
def test_decryptor_update(patch_default_backend, patch_cipher):
tester = Decryptor(
algorithm=MagicMock(), key=sentinel.key, associated_data=sentinel.aad, iv=sentinel.iv, tag=sentinel.tag
)
test = tester.update(sentinel.ciphertext)
tester._decryptor.update.assert_called_once_with(sentinel.ciphertext)
assert test is tester._decryptor.update.return_value
def test_decryptor_finalize(patch_default_backend, patch_cipher):
tester = Decryptor(
algorithm=MagicMock(), key=sentinel.key, associated_data=sentinel.aad, iv=sentinel.iv, tag=sentinel.tag
)
test = tester.finalize()
tester._decryptor.finalize.assert_called_once_with()
assert test is tester._decryptor.finalize.return_value
def test_decrypt(patch_decryptor):
patch_decryptor.return_value.update.return_value = b"some data-"
patch_decryptor.return_value.finalize.return_value = b"some more data"
test = decrypt(
algorithm=sentinel.algorithm,
key=sentinel.key,
encrypted_data=MagicMock(iv=sentinel.iv, tag=sentinel.tag, ciphertext=sentinel.ciphertext),
associated_data=sentinel.aad,
)
patch_decryptor.assert_called_once_with(sentinel.algorithm, sentinel.key, sentinel.aad, sentinel.iv, sentinel.tag)
patch_decryptor.return_value.update.assert_called_once_with(sentinel.ciphertext)
patch_decryptor.return_value.finalize.assert_called_once_with()
assert test == b"some data-some more data"
| 40.161616
| 118
| 0.788229
|
import pytest
from mock import MagicMock, sentinel
from pytest_mock import mocker
import aws_encryption_sdk.internal.crypto.encryption
from aws_encryption_sdk.internal.crypto.encryption import Decryptor, decrypt
pytestmark = [pytest.mark.unit, pytest.mark.local]
@pytest.fixture
def patch_default_backend(mocker):
mocker.patch.object(aws_encryption_sdk.internal.crypto.encryption, "default_backend")
yield aws_encryption_sdk.internal.crypto.encryption.default_backend
@pytest.fixture
def patch_cipher(mocker):
mocker.patch.object(aws_encryption_sdk.internal.crypto.encryption, "Cipher")
yield aws_encryption_sdk.internal.crypto.encryption.Cipher
@pytest.fixture
def patch_decryptor(mocker):
mocker.patch.object(aws_encryption_sdk.internal.crypto.encryption, "Decryptor")
yield aws_encryption_sdk.internal.crypto.encryption.Decryptor
def test_decryptor_init(patch_default_backend, patch_cipher):
mock_algorithm = MagicMock()
tester = Decryptor(
algorithm=mock_algorithm, key=sentinel.key, associated_data=sentinel.aad, iv=sentinel.iv, tag=sentinel.tag
)
assert tester.source_key is sentinel.key
mock_algorithm.encryption_algorithm.assert_called_once_with(sentinel.key)
mock_algorithm.encryption_mode.assert_called_once_with(sentinel.iv, sentinel.tag)
patch_default_backend.assert_called_once_with()
patch_cipher.assert_called_once_with(
mock_algorithm.encryption_algorithm.return_value,
mock_algorithm.encryption_mode.return_value,
backend=patch_default_backend.return_value,
)
patch_cipher.return_value.decryptor.assert_called_once_with()
assert tester._decryptor is patch_cipher.return_value.decryptor.return_value
tester._decryptor.authenticate_additional_data.assert_called_once_with(sentinel.aad)
def test_decryptor_update(patch_default_backend, patch_cipher):
tester = Decryptor(
algorithm=MagicMock(), key=sentinel.key, associated_data=sentinel.aad, iv=sentinel.iv, tag=sentinel.tag
)
test = tester.update(sentinel.ciphertext)
tester._decryptor.update.assert_called_once_with(sentinel.ciphertext)
assert test is tester._decryptor.update.return_value
def test_decryptor_finalize(patch_default_backend, patch_cipher):
tester = Decryptor(
algorithm=MagicMock(), key=sentinel.key, associated_data=sentinel.aad, iv=sentinel.iv, tag=sentinel.tag
)
test = tester.finalize()
tester._decryptor.finalize.assert_called_once_with()
assert test is tester._decryptor.finalize.return_value
def test_decrypt(patch_decryptor):
patch_decryptor.return_value.update.return_value = b"some data-"
patch_decryptor.return_value.finalize.return_value = b"some more data"
test = decrypt(
algorithm=sentinel.algorithm,
key=sentinel.key,
encrypted_data=MagicMock(iv=sentinel.iv, tag=sentinel.tag, ciphertext=sentinel.ciphertext),
associated_data=sentinel.aad,
)
patch_decryptor.assert_called_once_with(sentinel.algorithm, sentinel.key, sentinel.aad, sentinel.iv, sentinel.tag)
patch_decryptor.return_value.update.assert_called_once_with(sentinel.ciphertext)
patch_decryptor.return_value.finalize.assert_called_once_with()
assert test == b"some data-some more data"
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.