id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
11392727 | from ....models.models import Role
from ...generics.create import CreateAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from .deduplicate_permissions_mixin import DeduplicatePermissionsMixin
@register_action("role.create")
class RoleCreate(DeduplicatePermissionsMixin, CreateAction):
"""
Action to create roles.
"""
model = Role()
schema = DefaultSchema(Role()).get_create_schema(
required_properties=["organisation_id", "name"],
optional_properties=["permissions"],
)
| StarcoderdataPython |
351955 | import configparser
import copy
import re
import sys
import types
from dikort.print import print_error
_FILE_CONFIG_INT_OPTIONS = ("min_length", "max_length")
_FILE_CONFIG_BOOL_OPTIONS = (
"enable_length",
"enable_capitalized_summary",
"enable_trailing_period",
"enable_singleline_summary",
"enable_signoff",
"enable_gpg",
"enable_regex",
"enable_author_name_regex",
"enable_author_email_regex",
"capitalized_summary",
"trailing_period",
"singleline_summary",
"signoff",
"gpg",
"enabled",
)
ERROR_EXIT_CODE = 128
FAILED_EXIT_CODE = 1
DEFAULTS = types.MappingProxyType(
{
"main": {
"config": "./.dikort.cfg",
"repository": "./",
"range": "HEAD~1..HEAD",
},
"rules": {
"enable_length": False,
"enable_capitalized_summary": False,
"enable_trailing_period": False,
"enable_singleline_summary": False,
"enable_signoff": False,
"enable_gpg": False,
"enable_regex": False,
"enable_author_name_regex": False,
"enable_author_email_regex": False,
},
"rules.settings": {
"min_length": 10,
"max_length": 50,
"capitalized_summary": True,
"trailing_period": False,
"singleline_summary": True,
"signoff": True,
"gpg": True,
"regex": ".*",
"author_name_regex": ".*",
"author_email_regex": ".*",
},
"merge_rules": {
"enable_length": False,
"enable_capitalized_summary": False,
"enable_trailing_period": False,
"enable_singleline_summary": False,
"enable_signoff": False,
"enable_gpg": False,
"enable_regex": False,
"enable_author_name_regex": False,
"enable_author_email_regex": False,
},
"merge_rules.settings": {
"min_length": 10,
"max_length": 50,
"capitalized_summary": True,
"trailing_period": False,
"singleline_summary": True,
"signoff": True,
"gpg": True,
"regex": ".*",
"author_name_regex": ".*",
"author_email_regex": ".*",
},
"logging": {
"enabled": False,
"format": "%(levelname)s - %(asctime)s - %(filename)s:%(lineno)d - %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
"level": "INFO",
},
}
)
def _from_cmd_args_to_config(cmd_args): # noqa: WPS210
args_dict = vars(cmd_args)
filtered_dict = {param_name: args_dict[param_name] for param_name in args_dict if args_dict[param_name] is not None}
result_config = {}
for option in filtered_dict:
section, param_name = option.split(":")
result_config.setdefault(section, {})[param_name] = filtered_dict[option]
return result_config
def merge(cmd_args):
result_config = copy.deepcopy(DEFAULTS.copy())
cmd_args_config_location = vars(cmd_args)["main:config"]
_merge_fileconfig(result_config, cmd_args_config_location or result_config["main"]["config"])
config_from_cmdline = _from_cmd_args_to_config(cmd_args)
for section in result_config:
if section not in config_from_cmdline:
continue
result_config[section].update(config_from_cmdline[section])
_validate(result_config)
_post_processing(result_config)
return result_config
def _merge_fileconfig(config, file_config_path): # noqa: WPS231
file_config = configparser.ConfigParser(interpolation=None)
_read_file_config(file_config, file_config_path)
for section in config:
if section not in file_config.sections():
continue
for option in config[section]:
if option not in file_config.options(section):
continue
config[section][option] = _parse_value_from_file(file_config, option, section)
def _parse_value_from_file(file_config, option, section):
option_value = file_config[section][option]
try:
if option in _FILE_CONFIG_INT_OPTIONS:
option_value = file_config[section].getint(option)
elif option in _FILE_CONFIG_BOOL_OPTIONS:
option_value = file_config[section].getboolean(option)
except ValueError:
print_error(f"Cannot parse option {section}:{option}")
sys.exit(ERROR_EXIT_CODE)
return option_value
def _read_file_config(file_config, file_config_path):
config_filename = file_config_path
try:
with open(config_filename) as config_fp:
file_config.read_file(config_fp)
except FileNotFoundError:
if file_config_path != DEFAULTS["main"]["config"]:
print_error(f"Cannot open file {config_filename}")
sys.exit(ERROR_EXIT_CODE)
except OSError:
print_error(f"Cannot open file {config_filename}")
sys.exit(ERROR_EXIT_CODE)
def _validate(config):
if config["rules.settings"]["min_length"] > config["rules.settings"]["max_length"]:
print_error("rules.settings.min_length is greater than rules.settings.max_length")
sys.exit(ERROR_EXIT_CODE)
if config["merge_rules.settings"]["min_length"] > config["merge_rules.settings"]["max_length"]:
print_error("merge_rules.settings.min_length is greater than merge_rules.settings.max_length")
sys.exit(ERROR_EXIT_CODE)
def _post_processing(config):
config["rules.settings"]["regex"] = re.compile(config["rules.settings"]["regex"])
config["rules.settings"]["author_name_regex"] = re.compile(config["rules.settings"]["author_name_regex"])
config["rules.settings"]["author_email_regex"] = re.compile(config["rules.settings"]["author_email_regex"])
config["merge_rules.settings"]["regex"] = re.compile(config["merge_rules.settings"]["regex"])
config["merge_rules.settings"]["author_name_regex"] = re.compile(
config["merge_rules.settings"]["author_name_regex"]
)
config["merge_rules.settings"]["author_email_regex"] = re.compile(
config["merge_rules.settings"]["author_email_regex"]
)
def configure_argparser(cmd_args_parser): # noqa: WPS213
cmd_args_parser.add_argument(
"-c",
"--config",
dest="main:config",
metavar="PATH",
help=f"Config file location (default: {DEFAULTS['main']['config']}",
)
cmd_args_parser.add_argument(
"-r",
"--repository",
dest="main:repository",
metavar="PATH",
help=f"Repository location (default: {DEFAULTS['main']['repository']})",
)
cmd_args_parser.add_argument(
"--enable-logging",
dest="logging:enabled",
help=f"Enable logs output to stderr (default: {DEFAULTS['logging']['enabled']})",
default=None,
action="store_true",
)
cmd_args_parser.add_argument(
"--logging-format",
dest="logging:format",
metavar="STR",
help="Format string for logging (Python style)",
)
cmd_args_parser.add_argument(
"--logging-datefmt",
dest="logging:datefmt",
metavar="STR",
help="Format string for logging datetime (Python style)",
)
cmd_args_parser.add_argument(
"--logging-level",
dest="logging:level",
metavar="STR",
help="Logging level (Python style)",
)
cmd_args_parser.add_argument(
"--min-length",
metavar="INT",
dest="rules.settings:min_length",
type=int,
help=f"Minimum commit length (default: {DEFAULTS['rules.settings']['min_length']})",
)
cmd_args_parser.add_argument(
"--max-length",
dest="rules.settings:max_length",
metavar="STR",
type=int,
help=f"Maximum commit length (default: {DEFAULTS['rules.settings']['max_length']})",
)
cmd_args_parser.add_argument(
"--regex",
metavar="STR",
help="Regex to check commit message summary",
dest="rules.settings:regex",
)
cmd_args_parser.add_argument(
"--author-name-regex",
metavar="STR",
help="Regex to check author name",
dest="rules.settings:author_name_regex",
)
cmd_args_parser.add_argument(
"--author-email-regex",
metavar="STR",
help="Regex to check author email",
dest="rules.settings:author_email_regex",
)
cmd_args_parser.add_argument(
"--capitalized-summary",
default=None,
dest="rules.settings:capitalized_summary",
action="store_true",
help=f"Capitalized summary (default: {DEFAULTS['rules.settings']['capitalized_summary']})",
)
cmd_args_parser.add_argument(
"--no-capitalized-summary",
dest="rules.settings:capitalized_summary",
action="store_false",
help="Not capitalized summary",
)
cmd_args_parser.add_argument(
"--trailing-period",
default=None,
dest="rules.settings:trailing_period",
action="store_true",
help="Presence of trailing period",
)
cmd_args_parser.add_argument(
"--no-trailing-period",
dest="rules.settings:trailing_period",
action="store_false",
help=f"No trailing period (default: {DEFAULTS['rules.settings']['trailing_period']})",
)
cmd_args_parser.add_argument(
"--singleline-summary",
default=None,
dest="rules.settings:singleline_summary",
action="store_true",
help=f"Singleline summary (default: {DEFAULTS['rules.settings']['singleline_summary']})",
)
cmd_args_parser.add_argument(
"--no-singleline-summary",
dest="rules.settings:singleline_summary",
action="store_false",
help="Multiline summary",
)
cmd_args_parser.add_argument(
"--signoff",
default=None,
dest="rules.settings:signoff",
action="store_true",
help=f"Presence of signoff (default: {DEFAULTS['rules.settings']['signoff']})",
)
cmd_args_parser.add_argument(
"--no-signoff",
dest="rules.settings:signoff",
action="store_false",
help="No signoff",
)
cmd_args_parser.add_argument(
"--gpg",
default=None,
dest="rules.settings:gpg",
action="store_true",
help=f"Presence of GPG sign (default: {DEFAULTS['rules.settings']['gpg']})",
)
cmd_args_parser.add_argument(
"--no-gpg",
dest="rules.settings:gpg",
action="store_false",
help="No GPG sign",
)
cmd_args_parser.add_argument(
"--enable-length-check",
action="store_true",
dest="rules:enable_length",
default=None,
help=f"Enable length check (default: {DEFAULTS['rules']['enable_length']})",
)
cmd_args_parser.add_argument(
"--enable-capitalized-summary-check",
action="store_true",
dest="rules:enable_capitalized_summary",
default=None,
help=f"Enable capitalized summary check (default: {DEFAULTS['rules']['enable_capitalized_summary']})",
)
cmd_args_parser.add_argument(
"--enable-trailing-period-check",
action="store_true",
dest="rules:enable_trailing_period",
default=None,
help=f"Enable trailing period check (default: {DEFAULTS['rules']['enable_trailing_period']})",
)
cmd_args_parser.add_argument(
"--enable-singleline-summary-check",
action="store_true",
dest="rules:enable_singleline_summary",
default=None,
help=f"Enable single line summary check (default: {DEFAULTS['rules']['enable_singleline_summary']})",
)
cmd_args_parser.add_argument(
"--enable-signoff-check",
action="store_true",
dest="rules:enable_signoff",
default=None,
help=f"Enable checking for signoff (default: {DEFAULTS['rules']['enable_signoff']})",
)
cmd_args_parser.add_argument(
"--enable-gpg-check",
action="store_true",
dest="rules:enable_gpg",
default=None,
help=f"Enable checking for GPG sign (default: {DEFAULTS['rules']['enable_gpg']})",
)
cmd_args_parser.add_argument(
"--enable-regex-check",
action="store_true",
dest="rules:enable_regex",
default=None,
help=f"Enable check by regex (default: {DEFAULTS['rules']['enable_regex']})",
)
cmd_args_parser.add_argument(
"--enable-author-name-regex-check",
action="store_true",
dest="rules:enable_author_name_regex",
default=None,
help=f"Enable author name check by regex (default: {DEFAULTS['rules']['enable_author_name_regex']})",
)
cmd_args_parser.add_argument(
"--enable-author-email-regex-check",
action="store_true",
dest="rules:enable_author_email_regex",
default=None,
help=f"Enable author email check by regex (default: {DEFAULTS['rules']['enable_author_email_regex']})",
)
cmd_args_parser.add_argument(
"--merge-min-length",
type=int,
metavar="INT",
dest="merge_rules.settings:min_length",
help=f"Minimum commit length (default: {DEFAULTS['merge_rules.settings']['min_length']})",
)
cmd_args_parser.add_argument(
"--merge-max-length",
type=int,
metavar="INT",
dest="merge_rules.settings:max_length",
help=f"Maximum commit length (default: {DEFAULTS['merge_rules.settings']['max_length']})",
)
cmd_args_parser.add_argument(
"--merge-regex",
metavar="STR",
help="Regex to check commit message summary",
dest="merge_rules.settings:regex",
)
cmd_args_parser.add_argument(
"--merge-author-name-regex",
metavar="STR",
help="Regex to check author name",
dest="merge_rules.settings:author_name_regex",
)
cmd_args_parser.add_argument(
"--merge-author-email-regex",
metavar="STR",
help="Regex to check author email",
dest="merge_rules.settings:author_email_regex",
)
cmd_args_parser.add_argument(
"--merge-capitalized-summary",
default=None,
action="store_true",
dest="merge_rules.settings:capitalized_summary",
help=f"Capitalized summary (default: {DEFAULTS['merge_rules.settings']['capitalized_summary']})",
)
cmd_args_parser.add_argument(
"--no-merge-capitalized-summary",
dest="merge_rules.settings:capitalized_summary",
action="store_false",
help="Not capitalized summary",
)
cmd_args_parser.add_argument(
"--merge-trailing-period",
default=None,
dest="merge_rules.settings:trailing_period",
action="store_true",
help="Presence of trailing period",
)
cmd_args_parser.add_argument(
"--no-merge-trailing-period",
dest="merge_rules.settings:trailing_period",
action="store_false",
help=f"No trailing period (default: {DEFAULTS['merge_rules.settings']['trailing_period']})",
)
cmd_args_parser.add_argument(
"--merge-singleline-summary",
default=None,
action="store_true",
dest="merge_rules.settings:singleline_summary",
help=f"Singleline summary (default: {DEFAULTS['merge_rules.settings']['singleline_summary']})",
)
cmd_args_parser.add_argument(
"--no-merge-singleline-summary",
dest="merge_rules.settings:singleline_summary",
action="store_false",
help="Multiline summary",
)
cmd_args_parser.add_argument(
"--merge-signoff",
default=None,
dest="merge_rules.settings:signoff",
action="store_true",
help=f"Presence of signoff (default: {DEFAULTS['merge_rules.settings']['signoff']})",
)
cmd_args_parser.add_argument(
"--no-merge-signoff",
dest="merge_rules.settings:signoff",
action="store_false",
help="No signoff",
)
cmd_args_parser.add_argument(
"--merge-gpg",
default=None,
action="store_true",
dest="merge_rules.settings:gpg",
help=f"Presence of GPG sign (default: {DEFAULTS['merge_rules.settings']['gpg']})",
)
cmd_args_parser.add_argument(
"--no-merge-gpg",
dest="merge_rules.settings:gpg",
action="store_false",
help="No GPG sign",
)
cmd_args_parser.add_argument(
"--enable-merge-length-check",
action="store_true",
default=None,
dest="merge_rules:enable_length",
help=f"Enable length check (default: {DEFAULTS['merge_rules']['enable_length']})",
)
cmd_args_parser.add_argument(
"--enable-merge-capitalized-summary-check",
action="store_true",
default=None,
dest="merge_rules:enable_capitalized_summary",
help=f"Enable capitalized summary check (default: {DEFAULTS['merge_rules']['enable_capitalized_summary']})",
)
cmd_args_parser.add_argument(
"--enable-merge-trailing-period-check",
action="store_true",
default=None,
dest="merge_rules:enable_trailing_period",
help=f"Enable trailing period check (default: {DEFAULTS['merge_rules']['enable_trailing_period']})",
)
cmd_args_parser.add_argument(
"--enable-merge-singleline-summary-check",
action="store_true",
default=None,
dest="merge_rules:enable_singleline_summary",
help=f"Enable single line summary check (default: {DEFAULTS['merge_rules']['enable_singleline_summary']})",
)
cmd_args_parser.add_argument(
"--enable-merge-signoff-check",
action="store_true",
default=None,
dest="merge_rules:enable_signoff",
help=f"Enable checking for signoff (default: {DEFAULTS['merge_rules']['enable_signoff']})",
)
cmd_args_parser.add_argument(
"--enable-merge-gpg-check",
action="store_true",
default=None,
dest="merge_rules:enable_gpg",
help=f"Enable checking for GPG sign (default: {DEFAULTS['merge_rules']['enable_gpg']})",
)
cmd_args_parser.add_argument(
"--enable-merge-regex-check",
action="store_true",
default=None,
dest="merge_rules:enable_regex",
help=f"Enable check by regex (default: {DEFAULTS['merge_rules']['enable_regex']})",
)
cmd_args_parser.add_argument(
"--enable-merge-author-name-regex-check",
action="store_true",
default=None,
dest="merge_rules:enable_author_name_regex",
help=f"Enable author name check by regex (default: {DEFAULTS['merge_rules']['enable_author_name_regex']})",
)
cmd_args_parser.add_argument(
"--enable-merge-author-email-regex-check",
action="store_true",
default=None,
dest="merge_rules:enable_author_email_regex",
help=f"Enable author email check by regex (default: {DEFAULTS['merge_rules']['enable_author_email_regex']})",
)
cmd_args_parser.add_argument(
"main:range",
nargs="?",
metavar="range",
help=f"Commit range (default: {DEFAULTS['main']['range']})",
)
| StarcoderdataPython |
5126375 | <filename>fondInformatica/python/Esercitazione2_max3n (lupia).py
x = int(input("Inserisci il primo numero:> "))
y = int(input("Inserisci il secondo numero:> "))
z = int(input("Inserisci il terzo numero:> "))
if x >= y and x >= z:
massimo = x
elif y >= x and y >= z:
massimo = y
else:
massimo = z
print('Il massimo è', massimo)
| StarcoderdataPython |
4926052 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.secretmanager_v1beta1.proto import (
resources_pb2 as google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2,
)
from google.cloud.secretmanager_v1beta1.proto import (
service_pb2 as google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2,
)
from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2
from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class SecretManagerServiceStub(object):
"""`projects/*/secrets/*/versions/latest` is an alias to the `latest`
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
Secret Manager Service
Manages secrets and operations using those secrets. Implements a REST
model with the following objects:
* [Secret][google.cloud.secrets.v1beta1.Secret]
* [SecretVersion][google.cloud.secrets.v1beta1.SecretVersion]
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListSecrets = channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/ListSecrets",
request_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.ListSecretsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.ListSecretsResponse.FromString,
)
self.CreateSecret = channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/CreateSecret",
request_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.CreateSecretRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2.Secret.FromString,
)
self.AddSecretVersion = channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/AddSecretVersion",
request_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.AddSecretVersionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2.SecretVersion.FromString,
)
self.GetSecret = channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/GetSecret",
request_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.GetSecretRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2.Secret.FromString,
)
self.UpdateSecret = channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/UpdateSecret",
request_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.UpdateSecretRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2.Secret.FromString,
)
self.DeleteSecret = channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/DeleteSecret",
request_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.DeleteSecretRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListSecretVersions = channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/ListSecretVersions",
request_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.ListSecretVersionsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.ListSecretVersionsResponse.FromString,
)
self.GetSecretVersion = channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/GetSecretVersion",
request_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.GetSecretVersionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2.SecretVersion.FromString,
)
self.AccessSecretVersion = channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/AccessSecretVersion",
request_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.AccessSecretVersionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.AccessSecretVersionResponse.FromString,
)
self.DisableSecretVersion = channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/DisableSecretVersion",
request_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.DisableSecretVersionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2.SecretVersion.FromString,
)
self.EnableSecretVersion = channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/EnableSecretVersion",
request_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.EnableSecretVersionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2.SecretVersion.FromString,
)
self.DestroySecretVersion = channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/DestroySecretVersion",
request_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.DestroySecretVersionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2.SecretVersion.FromString,
)
self.SetIamPolicy = channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/SetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.GetIamPolicy = channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/GetIamPolicy",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString,
)
self.TestIamPermissions = channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/TestIamPermissions",
request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString,
)
class SecretManagerServiceServicer(object):
"""`projects/*/secrets/*/versions/latest` is an alias to the `latest`
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
Secret Manager Service
Manages secrets and operations using those secrets. Implements a REST
model with the following objects:
* [Secret][google.cloud.secrets.v1beta1.Secret]
* [SecretVersion][google.cloud.secrets.v1beta1.SecretVersion]
"""
def ListSecrets(self, request, context):
"""Lists [Secrets][google.cloud.secrets.v1beta1.Secret].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateSecret(self, request, context):
"""Creates a new [Secret][google.cloud.secrets.v1beta1.Secret] containing no [SecretVersions][google.cloud.secrets.v1beta1.SecretVersion].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def AddSecretVersion(self, request, context):
"""Creates a new [SecretVersion][google.cloud.secrets.v1beta1.SecretVersion] containing secret data and attaches
it to an existing [Secret][google.cloud.secrets.v1beta1.Secret].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetSecret(self, request, context):
"""Gets metadata for a given [Secret][google.cloud.secrets.v1beta1.Secret].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateSecret(self, request, context):
"""Updates metadata of an existing [Secret][google.cloud.secrets.v1beta1.Secret].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteSecret(self, request, context):
"""Deletes a [Secret][google.cloud.secrets.v1beta1.Secret].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListSecretVersions(self, request, context):
"""Lists [SecretVersions][google.cloud.secrets.v1beta1.SecretVersion]. This call does not return secret
data.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetSecretVersion(self, request, context):
"""Gets metadata for a [SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
`projects/*/secrets/*/versions/latest` is an alias to the `latest`
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def AccessSecretVersion(self, request, context):
"""Accesses a [SecretVersion][google.cloud.secrets.v1beta1.SecretVersion]. This call returns the secret data.
`projects/*/secrets/*/versions/latest` is an alias to the `latest`
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DisableSecretVersion(self, request, context):
"""Disables a [SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
Sets the [state][google.cloud.secrets.v1beta1.SecretVersion.state] of the [SecretVersion][google.cloud.secrets.v1beta1.SecretVersion] to
[DISABLED][google.cloud.secrets.v1beta1.SecretVersion.State.DISABLED].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def EnableSecretVersion(self, request, context):
"""Enables a [SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
Sets the [state][google.cloud.secrets.v1beta1.SecretVersion.state] of the [SecretVersion][google.cloud.secrets.v1beta1.SecretVersion] to
[ENABLED][google.cloud.secrets.v1beta1.SecretVersion.State.ENABLED].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DestroySecretVersion(self, request, context):
"""Destroys a [SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
Sets the [state][google.cloud.secrets.v1beta1.SecretVersion.state] of the [SecretVersion][google.cloud.secrets.v1beta1.SecretVersion] to
[DESTROYED][google.cloud.secrets.v1beta1.SecretVersion.State.DESTROYED] and irrevocably destroys the
secret data.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetIamPolicy(self, request, context):
"""Sets the access control policy on the specified secret. Replaces any
existing policy.
Permissions on [SecretVersions][google.cloud.secrets.v1beta1.SecretVersion] are enforced according
to the policy set on the associated [Secret][google.cloud.secrets.v1beta1.Secret].
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetIamPolicy(self, request, context):
"""Gets the access control policy for a secret.
Returns empty policy if the secret exists and does not have a policy set.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def TestIamPermissions(self, request, context):
"""Returns permissions that a caller has for the specified secret.
If the secret does not exist, this call returns an empty set of
permissions, not a NOT_FOUND error.
Note: This operation is designed to be used for building permission-aware
UIs and command-line tools, not for authorization checking. This operation
may "fail open" without warning.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_SecretManagerServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"ListSecrets": grpc.unary_unary_rpc_method_handler(
servicer.ListSecrets,
request_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.ListSecretsRequest.FromString,
response_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.ListSecretsResponse.SerializeToString,
),
"CreateSecret": grpc.unary_unary_rpc_method_handler(
servicer.CreateSecret,
request_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.CreateSecretRequest.FromString,
response_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2.Secret.SerializeToString,
),
"AddSecretVersion": grpc.unary_unary_rpc_method_handler(
servicer.AddSecretVersion,
request_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.AddSecretVersionRequest.FromString,
response_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2.SecretVersion.SerializeToString,
),
"GetSecret": grpc.unary_unary_rpc_method_handler(
servicer.GetSecret,
request_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.GetSecretRequest.FromString,
response_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2.Secret.SerializeToString,
),
"UpdateSecret": grpc.unary_unary_rpc_method_handler(
servicer.UpdateSecret,
request_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.UpdateSecretRequest.FromString,
response_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2.Secret.SerializeToString,
),
"DeleteSecret": grpc.unary_unary_rpc_method_handler(
servicer.DeleteSecret,
request_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.DeleteSecretRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ListSecretVersions": grpc.unary_unary_rpc_method_handler(
servicer.ListSecretVersions,
request_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.ListSecretVersionsRequest.FromString,
response_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.ListSecretVersionsResponse.SerializeToString,
),
"GetSecretVersion": grpc.unary_unary_rpc_method_handler(
servicer.GetSecretVersion,
request_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.GetSecretVersionRequest.FromString,
response_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2.SecretVersion.SerializeToString,
),
"AccessSecretVersion": grpc.unary_unary_rpc_method_handler(
servicer.AccessSecretVersion,
request_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.AccessSecretVersionRequest.FromString,
response_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.AccessSecretVersionResponse.SerializeToString,
),
"DisableSecretVersion": grpc.unary_unary_rpc_method_handler(
servicer.DisableSecretVersion,
request_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.DisableSecretVersionRequest.FromString,
response_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2.SecretVersion.SerializeToString,
),
"EnableSecretVersion": grpc.unary_unary_rpc_method_handler(
servicer.EnableSecretVersion,
request_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.EnableSecretVersionRequest.FromString,
response_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2.SecretVersion.SerializeToString,
),
"DestroySecretVersion": grpc.unary_unary_rpc_method_handler(
servicer.DestroySecretVersion,
request_deserializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_service__pb2.DestroySecretVersionRequest.FromString,
response_serializer=google_dot_cloud_dot_secrets__v1beta1_dot_proto_dot_resources__pb2.SecretVersion.SerializeToString,
),
"SetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.SetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"GetIamPolicy": grpc.unary_unary_rpc_method_handler(
servicer.GetIamPolicy,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString,
),
"TestIamPermissions": grpc.unary_unary_rpc_method_handler(
servicer.TestIamPermissions,
request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString,
response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.secrets.v1beta1.SecretManagerService", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| StarcoderdataPython |
6605552 | <gh_stars>0
from .excerpt_search import audio_features
from .utils import farthest_points
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from scipy.spatial.distance import pdist, squareform
from scipy.spatial import distance
import essentia.standard as esst
import numpy as np
import os
import shutil
import subprocess
SR = 44100
VST = 2
DIST = 'euclidean'
AUDIO_EXTS = ('.wav', '.mp3', '.flac', '.ogg', '.aif')
def load_audio_files_in_dir(path):
vsts = []
original = []
paths = []
for root, dirs, files in os.walk(path):
FOUND_VST = False
vst = []
for file in files:
if file.endswith(AUDIO_EXTS) and 'scales' not in file:
if 'target' in file:
audios = original
else:
FOUND_VST = True
audios = vst
audios.append(
esst.EasyLoader(filename=os.path.join(root, file),
sampleRate=SR)())
if FOUND_VST:
vsts.append(vst)
paths.append(root)
return vsts, original, paths
def audio_features_from_set(set):
"""
Computes features averaged over each item in the set
"""
audio = []
for i in set:
audio.append(audio_features(i))
return np.mean(audio, axis=0)
def move_files(path, q='', n='', type='', dir='./excerpts', filter=''):
if not os.path.exists(dir):
os.makedirs(dir)
params = (q, n, type)
for file in os.listdir(path):
file = os.path.join(path, file)
if os.path.isfile(file) and 'scales' not in file:
root, base = os.path.split(file)
name, ext = os.path.splitext(base)
q_, n_, type_ = name.split('_')
if type_ != filter and filter:
continue
if not q:
q = q_
if not n:
n = n_
if not type:
type = type_
shutil.copy(file, os.path.join(dir, f"{q}_{n}_{type}{ext}"))
q, n, type = params
def main(path):
# load all files
vsts, original, paths = load_audio_files_in_dir(path)
# substituting audio arrays with their features
for i in range(len(vsts)):
vsts[i] = audio_features_from_set(vsts[i])
original = audio_features_from_set(original)
# looking for the vst farthest from original
dist = getattr(distance, DIST)
max_d = -1
for i, vst in enumerate(vsts):
d = dist(vst, original)
if d > max_d:
max_d = d
chosen = i
# taking path of the correct VST
q0 = paths[chosen]
# removing this vst from the set
del paths[chosen]
del vsts[chosen]
# looking for the medoid
distmat = squareform(pdist(vsts))
medoid = np.argmin(np.sum(distmat, axis=1))
q2 = paths[medoid]
# removing this vst from the set
del paths[medoid]
del vsts[medoid]
# looking for farthest VSTs
vsts = np.array(vsts)
vsts = StandardScaler().fit_transform(vsts)
pca = PCA(n_components=10)
vsts = pca.fit_transform(vsts)
print("Explained variance: ", pca.explained_variance_ratio_,
pca.explained_variance_ratio_.sum())
points = farthest_points(vsts, VST, 1)
# taking paths of the correct VSTs
q1 = [paths[i] for i in points[:, 0]]
# moving audio files to the excerpts dir
# move all the files in q0, q1[0] and q2 to name q0_[], q1_[] etc
print(f"path q0: {q0}")
print(f"path q1: {q1}")
print(f"path q2: {q2}")
move_files(q0, q='q0')
move_files(q1[0], q='q1')
move_files(q2, q='q2')
# the target is the one which ends with 'orig'
# for q0 we use had produced excerpts with the correct name
move_files(path, q='q0', filter='target')
move_files(q1[1], q='q1', type='target', filter='orig')
move_files(q2, q='q2', type='target', filter='orig')
def post_process(in_dir, out_dir, options=[]):
"""
Recursively walk across a directory and apply sox command with `options` to
all the audio files with extension `ext`.
"""
# check that sox is installed:
if shutil.which('sox') is None:
raise RuntimeError(
"Sox is needed, install it or run with '--no-postprocess' option")
for root, dirs, files in os.walk(in_dir):
if 'reverb' in root:
# skipping directories created by this script
continue
# computing the directory name in which new files are stored
r = root[len(dir) + 1:]
if len(r) > 0:
r += '-'
new_dir = r + '_'.join(options)
new_out_dir = os.path.join(out_dir, new_dir)
for file in files:
if file.endswith(
AUDIO_EXTS
) and 'target' not in file and 'scales' not in file:
if not os.path.exists(new_out_dir):
os.makedirs(new_out_dir)
# run sox
proc = subprocess.Popen(
['sox', os.path.join(root, file)] +
[os.path.join(new_out_dir, file)] + options)
proc.wait()
if __name__ == "__main__":
import sys
if len(sys.argv) > 1 and sys.argv[1] != '--no-postprocess':
dir = sys.argv[1]
else:
dir = './audio'
if '--no-postprocess' not in sys.argv:
post_process(dir, dir, ['norm', '-20', 'reverb', '50', 'norm'])
post_process(dir, dir, ['norm', '-20', 'reverb', '100', 'norm'])
main(dir)
| StarcoderdataPython |
6439620 | <filename>template/simple/setup.py
from distutils.core import setup
setup(
name="[project_name]",
version=(open("VERSION").read()).rstrip(),
author="author",
author_email="author_email",
license="MIT",
url="https://github.com/[ProjectSpace]/[project_name]",
description="Project description",
long_description_content_type="text/markdown",
long_description=open("README.md").read(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| StarcoderdataPython |
1847332 | <filename>computeFV.py
import os, sys, collections
import numpy as np
from yael import ynumpy
import IDT_feature
from tempfile import TemporaryFile
"""
Encodes a fisher vector.
"""
def create_fisher_vector(gmm_list, video_desc, fisher_path):
"""
expects a single video_descriptors object. videos_desciptors objects are defined in IDT_feature.py
fisher path is the full path to the fisher vector that is created.
this single video_desc contains the (trajs, hogs, hofs, mbhxs, mbhys) np.ndarrays
"""
vid_desc_list = []
vid_desc_list.append(video_desc.traj)
vid_desc_list.append(video_desc.hog)
vid_desc_list.append(video_desc.hof)
vid_desc_list.append(video_desc.mbhx)
vid_desc_list.append(video_desc.mbhy)
#For each video create and normalize a fisher vector for each of the descriptors. Then, concatenate the
#fisher vectors together to get an extra long fisher vector.
# Return a list of all of these long fisher vectors. The list should be the same length as the number
# of input videos.
fvs = []
for descriptor,gmm_mean_pca in zip(vid_desc_list,gmm_list):
gmm, mean, pca_transform = gmm_mean_pca
# apply the PCA to the vid_trajectory descriptor
#each image_desc is of size (X,TRAJ_DIM). Pca_tranform is of size (TRAJ_DIM,TRAJ_DIM/2)
descrip = descriptor.astype('float32') - mean
print type(gmm),type(mean),type(pca_transform)
print len(gmm), len(mean), len(pca_transform)
if pca_transform.all != None:
descrip = np.dot(descriptor.astype('float32') - mean, pca_transform)
# compute the Fisher vector, using the derivative w.r.t mu and sigma
fv = ynumpy.fisher(gmm, descrip, include = ['mu', 'sigma'])
# normalizations are done on each descriptors individually
# power-normalization
fv = np.sign(fv) * (np.abs(fv) ** 0.5)
# L2 normalize
#sum along the rows.
norms = np.sqrt(np.sum(fv ** 2))
# -1 allows reshape to infer the length. So it just solidifies the dimensions to (274,1)
fv /= norms
# handle images with 0 local descriptor (100 = far away from "normal" images)
fv[np.isnan(fv)] = 100
print "Performing fvs"
fvs.append(fv.T)
output_fv = np.hstack(fvs)
#L2 normalize the entire fv.
norm = np.sqrt(np.sum(output_fv ** 2))
output_fv /= norm
#example name:
# 'v_Archery_g01_c01.fisher.npz'
#subdirectory name
np.savez(fisher_path, fish=output_fv)
print fisher_path
return output_fv | StarcoderdataPython |
241216 |
'''
'''
import json
# Json format Running Info list string
Running_Info = '''
{
"Setting_Lists" :
{
"Vpv-Start":
{
"Unit": "V",
"UnitVal" : 0.1,
"Description" : "PV1 start-up voltage",
"Length" : 2,
"Value" : 0
},
"T_Start":
{
"Unit": "Sec",
"UnitVal" : 1,
"Description" : "Time to connect grid",
"Length" : 2,
"Value" : 0
},
"Vac_Min":
{
"Unit": "V",
"UnitVal" : 0.1,
"Description" : "Minimum operational grid voltage",
"Length" : 2,
"Value" : 0
},
"Vac_Max":
{
"Unit": "V",
"UnitVal" : 0.1,
"Description" : "Maximum operational grid voltage",
"Length" : 2,
"Value" : 0
},
"Fac_Min":
{
"Unit": "V",
"UnitVal" : 0.01,
"Description" : "Minimum operational grid Frequency",
"Length" : 2,
"Value" : 0
},
"Fac-Max":
{
"Unit": "V",
"UnitVal" : 0.01,
"Description" : "Maximum operational grid Frequency",
"Length" : 2,
"Value" : 0
}
},
"Storage_Info" :
[
{
"Data_Index": 0,
"Measuring_chn": "Vpv1",
"Unit": "V",
"UnitVal" : 0.1,
"Description" : "PV1 voltage",
"Length" : 2,
"Value" : 0
},
{
"Data_Index": 1,
"Measuring_chn": "Ipv1",
"Unit": "A",
"UnitVal" : 0.1,
"Description" : "PV1 current",
"Length" : 2,
"Value" : 0
}
],
"Reading_Lists" :
[
{
"Data_Index": 0,
"Measuring_chn": "Vpv1",
"Unit": "V",
"UnitVal" : 0.1,
"Description" : "PV1 voltage",
"Length" : 2,
"Value" : 0
},
{
"Data_Index": 1,
"Measuring_chn": "Vpv2",
"Unit": "V",
"UnitVal" : 0.1,
"Description" : "PV2 voltage",
"Length" : 2,
"Value" : 0
},
{
"Data_Index": 2,
"Measuring_chn": "Ipv1",
"Unit": "A",
"UnitVal" : 0.1,
"Description" : "PV1 current",
"Length" : 2,
"Value" : 0
},
{
"Data_Index": 3,
"Measuring_chn": "Ipv2",
"Unit": "A",
"UnitVal" : 0.1,
"Description" : "PV2 current",
"Length" : 2,
"Value" : 0
},
{
"Data_Index": 4,
"Measuring_chn": "Vac1",
"Unit": "V",
"UnitVal" : 0.1,
"Description" : "Phase L1 Voltage",
"Length" : 2,
"Value" : 0
},
{
"Data_Index": 5,
"Measuring_chn": "Vac2",
"Unit": "V",
"UnitVal" : 0.1,
"Description" : "Phase L2 Voltage",
"Length" : 2,
"Value" : 0
},
{
"Data_Index": 6,
"Measuring_chn": "Vac3",
"Unit": "V",
"UnitVal" : 0.1,
"Description" : "Phase L3 Voltage",
"Length" : 2,
"Value" : 0
},
{
"Data_Index": 7,
"Measuring_chn": "Iac1",
"Unit": "A",
"UnitVal" : 0.1,
"Description" : "Phase L1 Current",
"Length" : 2,
"Value" : 0
}
]
}
'''
'''
********** Invert Class*****
It consists various spec info variables and assignments by communication flow.
ex: Addr: Alloaction communication address
It could be added as our requirements.
'''
class INVERTER():
DeviceType = 0x00 #
FirmwareVer = [] # 5 Byte
ModelName = [] # 10 Byte
Manufacture = [] # 16 Byte
SerialNumber = [] # 16 Byte
Nom_Vpv = [] # 4 Byte
InternalVersion = [] # 12 Byte
SafetyConutryCode = 0 # 1 Byte
###
Vpv_Start = 0 # PV Start-up Voltage
T_Start = 0 # Time to connect grid
Vac_Min = 0 # Minimum operational grid voltage
Vac_Max = 0 # Maximum operational grid voltage
Fac_Min = 0 # Minimum operational grid Frequency
Fac_Max = 0 # Maximum operational grid Frequency
### RTC Time value
Year = 0;
Month = 0;
Date = 0;
Hour = 0;
Minute = 0;
Second = 0;
def __init__(self):
self.ErrorCnt = 0
self.Addr = 0x7F
# Json to Array
self.RunningInfoList = json.loads(Running_Info)
for index in range(0, 10):
self.FirmwareVer.append('0')
for index in range(0, 20):
self.ModelName.append('A')
for index in range(0, 20):
self.Manufacture.append('A')
self.SerialNumber.append('A')
self.InternalVersion.append('0')
for index in range(0, 10):
self.Nom_Vpv.append('0')
| StarcoderdataPython |
1866699 | <gh_stars>0
#!/usr/bin/env python
"""
This code holds the solution for part 2 of day 16 of the Advent of Code for 2015.
"""
known_facts = """children: 3
cats: 7
samoyeds: 2
pomeranians: 3
akitas: 0
vizslas: 0
goldfish: 5
trees: 3
cars: 2
perfumes: 1"""
aunts = {}
known_aunt = {}
def extract_field(aunt_info, pieces, pos):
aunt_info[pieces[pos].rstrip(':')] = int(pieces[pos+1].rstrip(','))
for line in known_facts.splitlines():
pieces = line.split(':')
extract_field(known_aunt, pieces, 0)
with open("input.txt", "r") as f:
for line in f:
pieces = line.split(' ')
number = int(pieces[1].rstrip(':'))
aunt_info = {}
field = 2
while field < len(pieces):
extract_field(aunt_info, pieces, field)
field += 2
aunts[number] = aunt_info
tmp = aunts.copy()
for name in tmp:
for prop in ["cats", "trees"]:
if prop in tmp[name]:
if known_aunt[prop] > tmp[name][prop]:
if name in aunts:
del aunts[name]
break
for prop in ["pomeranians", "goldfish"]:
if prop in tmp[name]:
if known_aunt[prop] < tmp[name][prop]:
if name in aunts:
del aunts[name]
break
for prop in ["samoyeds", "akitas", "vizslas", "cars", "perfumes"]:
if prop in tmp[name]:
if known_aunt[prop] != tmp[name][prop]:
if name in aunts:
del aunts[name]
break
print aunts
| StarcoderdataPython |
356921 | import nisyscfg
import nisyscfg.xnet
import sys
class DeviceNotFoundError(Exception):
pass
class PortNotFoundError(Exception):
pass
def nixnet_assign_port_name(serial_number, port_number, port_name):
with nisyscfg.Session() as session:
# Search for the NI-XNET device with the specified serial number.
device_filter = session.create_filter()
device_filter.is_device = True
device_filter.serial_number = serial_number
try:
# Assume only one device will be found
device = next(session.find_hardware(filter=device_filter, expert_names="xnet"))
except StopIteration:
raise DeviceNotFoundError(
'Could not find a device with serial number "{}"'.format(serial_number)
)
# Search for the interface connected to the NI-XNET device with the
# specified port number.
interface_filter = session.create_filter()
interface_filter.is_device = False
interface_filter.connects_to_link_name = device.provides_link_name
interface_filter.xnet.port_number = port_number
try:
# Assume only one interface will be found
interface = next(session.find_hardware(filter=interface_filter, expert_names="xnet"))
except StopIteration:
raise PortNotFoundError(
'Device with serial number "{}" does not have port number {}'.format(
serial_number, port_number
)
)
interface.rename(port_name)
if "__main__" == __name__:
if len(sys.argv) != 4:
print("Usage: {} <serial_number> <port_number> <port_name>".format(sys.argv[0]))
sys.exit(1)
serial_number = sys.argv[1].upper()
port_number = int(sys.argv[2])
port_name = sys.argv[3]
nixnet_assign_port_name(serial_number, port_number, port_name)
| StarcoderdataPython |
5014306 | <reponame>jay-tyler/data-structures<gh_stars>1-10
def insort(unlist):
"""Insertion sort a list
Implementation follows after gif here
https://en.wikipedia.org/wiki/Insertion_sort"""
slist = unlist[:]
for i in range(1, len(slist)):
j = i - 1
while j >= 0:
if slist[i] < slist[j]:
slist[i], slist[j] = slist[j], slist[i]
j -= 1
i -= 1
else:
break
return slist
if __name__ == "__main__":
"""Test time performance for best and worst cases"""
import time
# time is better than timeit for unix times
# https://docs.python.org/2/library/timeit.html#timeit.default_timer
# O(n) best case
start = time.time()
for i in range(100):
insort(range(100))
stop = time.time()
best_time = (stop - start) / 100
# O(n**2) worst case
start = time.time()
for i in range(100):
insort(range(100)[::-1])
stop = time.time()
worst_time = (stop - start) / 100
print "Best case is {:.1f} times better than worst for n=100\n\n".format(
worst_time/best_time) +\
"best case: {best:.2E} s\n\nworst case: {worst:.2E} s".format(
best=best_time, worst=worst_time)
| StarcoderdataPython |
273889 | <reponame>agupta-io/testplan
"""Unit tests for MultiTest base functionality."""
import os
from testplan.common.utils import path
from testplan.testing import multitest
from testplan.testing.multitest import base
from testplan.testing import filtering
from testplan.testing import ordering
from testplan import defaults
from testplan import report
from testplan.common import entity
# TODO: shouldn't need to specify these...
MTEST_DEFAULT_PARAMS = {
"test_filter": filtering.Filter(),
"test_sorter": ordering.NoopSorter(),
"stdout_style": defaults.STDOUT_STYLE,
}
def test_multitest_runpath():
"""Test setting of runpath."""
# # No runpath specified
mtest = multitest.MultiTest(
name="Mtest", suites=[], **MTEST_DEFAULT_PARAMS
)
assert mtest.runpath is None
assert mtest._runpath is None
mtest.run()
assert mtest.runpath == path.default_runpath(mtest)
assert mtest._runpath == path.default_runpath(mtest)
# runpath in local cfg
custom = os.path.join("", "var", "tmp", "custom")
mtest = multitest.MultiTest(
name="Mtest", suites=[], runpath=custom, **MTEST_DEFAULT_PARAMS
)
assert mtest.runpath is None
assert mtest._runpath is None
mtest.run()
assert mtest.runpath == custom
assert mtest._runpath == custom
# runpath in global cfg
global_runpath = os.path.join("", "var", "tmp", "global_level")
par = base.MultiTestConfig(name="Mtest", suites=[], runpath=global_runpath)
mtest = multitest.MultiTest(
name="Mtest", suites=[], **MTEST_DEFAULT_PARAMS
)
mtest.cfg.parent = par
assert mtest.runpath is None
assert mtest._runpath is None
mtest.run()
assert mtest.runpath == global_runpath
assert mtest._runpath == global_runpath
# runpath in global cfg and local
global_runpath = os.path.join("", "var", "tmp", "global_level")
local_runpath = os.path.join("", "var", "tmp", "local_runpath")
par = base.MultiTestConfig(name="Mtest", suites=[], runpath=global_runpath)
mtest = multitest.MultiTest(
name="Mtest", suites=[], runpath=local_runpath, **MTEST_DEFAULT_PARAMS
)
mtest.cfg.parent = par
assert mtest.runpath is None
assert mtest._runpath is None
mtest.run()
assert mtest.runpath == local_runpath
assert mtest._runpath == local_runpath
@multitest.testsuite
class Suite(object):
"""Basic testsuite."""
@multitest.testcase
def case(self, env, result):
"""Basic testcase."""
result.true(True)
@multitest.testcase(parameters=[1, 2, 3])
def parametrized(self, env, result, val):
"""Parametrized testcase."""
result.gt(val, 0)
@multitest.testsuite
class ParallelSuite(object):
"""Suite with parallelisable testcases."""
@multitest.testcase(execution_group="A")
def case1(self, env, result):
"""Testcase 1"""
result.eq(0, 0)
@multitest.testcase(execution_group="A")
def case2(self, env, result):
"""Testcase 2"""
result.eq(1, 1)
@multitest.testcase(execution_group="A")
def case3(self, env, result):
"""Testcase 3"""
result.eq(2, 2)
@multitest.testcase(execution_group="B", parameters=[1, 2, 3])
def parametrized(self, env, result, val):
"""Parametrized testcase"""
result.gt(val, 0)
EXPECTED_REPORT_SKELETON = report.TestGroupReport(
name="MTest",
category=report.ReportCategories.MULTITEST,
uid="MTest",
env_status=entity.ResourceStatus.STOPPED,
entries=[
report.TestGroupReport(
name="Suite",
description="Basic testsuite.",
category=report.ReportCategories.TESTSUITE,
uid="Suite",
parent_uids=["MTest"],
entries=[
report.TestCaseReport(
name="case",
description="Basic testcase.",
uid="case",
parent_uids=["MTest", "Suite"],
),
report.TestGroupReport(
name="parametrized",
category=report.ReportCategories.PARAMETRIZATION,
uid="parametrized",
parent_uids=["MTest", "Suite"],
entries=[
report.TestCaseReport(
name="parametrized__val_1",
description="Parametrized testcase.",
uid="parametrized__val_1",
parent_uids=["MTest", "Suite", "parametrized"],
),
report.TestCaseReport(
name="parametrized__val_2",
description="Parametrized testcase.",
uid="parametrized__val_2",
parent_uids=["MTest", "Suite", "parametrized"],
),
report.TestCaseReport(
name="parametrized__val_3",
description="Parametrized testcase.",
uid="parametrized__val_3",
parent_uids=["MTest", "Suite", "parametrized"],
),
],
),
],
)
],
)
def test_dry_run():
"""Test the "dry_run" method which generates an empty report skeleton."""
mtest = multitest.MultiTest(
name="MTest", suites=[Suite()], **MTEST_DEFAULT_PARAMS
)
result = mtest.dry_run()
report_skeleton = result.report
# Comparing the serialized reports makes it much easier to spot any
# inconsistencies.
assert report_skeleton.serialize() == EXPECTED_REPORT_SKELETON.serialize()
def test_run_all_tests():
"""Test running all tests."""
mtest = multitest.MultiTest(
name="MTest", suites=[Suite()], **MTEST_DEFAULT_PARAMS
)
mtest_report = mtest.run_tests()
assert mtest_report.passed
assert mtest_report.name == "MTest"
assert mtest_report.category == report.ReportCategories.MULTITEST
assert len(mtest_report.entries) == 1 # One suite.
suite_report = mtest_report.entries[0]
assert suite_report.passed
assert suite_report.name == "Suite"
assert suite_report.category == report.ReportCategories.TESTSUITE
assert len(suite_report.entries) == 2 # Two testcases.
testcase_report = suite_report.entries[0]
_check_testcase_report(testcase_report)
param_report = suite_report.entries[1]
assert param_report.passed
assert param_report.name == "parametrized"
assert param_report.category == report.ReportCategories.PARAMETRIZATION
assert len(param_report.entries) == 3 # Three parametrized testcases
for i, testcase_report in enumerate(param_report.entries):
_check_param_testcase_report(testcase_report, i)
def test_run_tests_parallel():
"""Test running tests in parallel via an execution group."""
# Since we have at most three testcases in any one execution group,
# use three threads in the thread pool to save on resources.
mtest = multitest.MultiTest(
name="MTest",
suites=[ParallelSuite()],
thread_pool_size=3,
**MTEST_DEFAULT_PARAMS
)
mtest_report = mtest.run_tests()
assert mtest_report.passed
assert mtest_report.name == "MTest"
assert mtest_report.category == report.ReportCategories.MULTITEST
assert len(mtest_report.entries) == 1 # One suite.
suite_report = mtest_report.entries[0]
assert suite_report.passed
assert suite_report.name == "ParallelSuite"
assert suite_report.category == report.ReportCategories.TESTSUITE
assert len(suite_report.entries) == 4 # Four testcases.
for i in range(3):
case_name = "case{}".format(i + 1)
_check_parallel_testcase(suite_report[case_name], i)
_check_parallel_param(suite_report["parametrized"])
def test_run_testcases_iter():
"""Test running tests iteratively."""
mtest = multitest.MultiTest(
name="MTest",
suites=[Suite()],
thread_pool_size=3,
**MTEST_DEFAULT_PARAMS
)
results = list(mtest.run_testcases_iter())
assert len(results) == 4
testcase_report, parent_uids = results[0]
assert parent_uids == ["MTest", "Suite"]
_check_testcase_report(testcase_report)
for i, (testcase_report, parent_uids) in enumerate(results[1:]):
assert parent_uids == ["MTest", "Suite", "parametrized"]
_check_param_testcase_report(testcase_report, i)
def _check_parallel_testcase(testcase_report, i):
"""
Check that ith testcase report in the ParallelSuite is as expected after
a full run.
"""
assert testcase_report.name == "case{}".format(i + 1)
assert testcase_report.category == report.ReportCategories.TESTCASE
assert len(testcase_report.entries) == 1 # One assertion
equals_assertion = testcase_report.entries[0]
assert equals_assertion["passed"]
assert equals_assertion["type"] == "Equal"
assert equals_assertion["first"] == i
assert equals_assertion["second"] == i
def _check_parallel_param(param_report):
"""
Check the parametrized testcase group from the ParallelSuite is as
expected after a full run.
"""
assert param_report.name == "parametrized"
assert param_report.category == report.ReportCategories.PARAMETRIZATION
assert len(param_report.entries) == 3 # Three parametrized testcases.
for i, testcase_report in enumerate(param_report.entries):
assert testcase_report.name == "parametrized__val_{}".format(i + 1)
assert testcase_report.category == report.ReportCategories.TESTCASE
assert len(testcase_report.entries) == 1 # One assertion
greater_assertion = testcase_report.entries[0]
assert greater_assertion["passed"]
assert greater_assertion["type"] == "Greater"
assert greater_assertion["first"] == i + 1
assert greater_assertion["second"] == 0
def _check_testcase_report(testcase_report):
"""
Check the testcase report generated for the "case" testcase from the
"Suite" testsuite.
"""
assert testcase_report.passed
assert testcase_report.name == "case"
assert testcase_report.category == report.ReportCategories.TESTCASE
assert len(testcase_report.entries) == 1 # One assertion.
truth_assertion = testcase_report.entries[0]
assert truth_assertion["passed"]
assert truth_assertion["type"] == "IsTrue"
assert truth_assertion["expr"] is True
def _check_param_testcase_report(testcase_report, i):
"""
Check the testcase report generated for the ith parametrization of the
"parametrized" testcase from the "Suite" testsuite.
"""
assert testcase_report.passed
assert testcase_report.name == "parametrized__val_{}".format(i + 1)
assert testcase_report.category == report.ReportCategories.TESTCASE
assert len(testcase_report.entries) == 1 # One assertion
greater_assertion = testcase_report.entries[0]
assert greater_assertion["passed"]
assert greater_assertion["type"] == "Greater"
assert greater_assertion["first"] == i + 1
assert greater_assertion["second"] == 0
| StarcoderdataPython |
224298 | <filename>main.py<gh_stars>0
from PyQt5.QtWidgets import QApplication, QWidget, QFileDialog, QPushButton, QLabel, QGridLayout, QHBoxLayout
from PyQt5.QtCore import pyqtSlot, pyqtSignal, QThreadPool, QRunnable, QTimer, QSize, QObject
from PyQt5.QtGui import QPixmap
import sys
import os
import json
import subprocess
import psutil
from PIL import Image
import numpy as np
import time
class UpdateBlenderSignals(QObject):
finished = pyqtSignal()
class UpdateBlenderImage(QRunnable):
def __init__(self, image_path0, image_path1, image_path2, label0, label1, label2, qt_frame, blender_to_qt_path):
super(UpdateBlenderImage, self).__init__()
self.image_path0 = image_path0
self.image_path1 = image_path1
self.image_path2 = image_path2
self.label0 = label0
self.label1 = label1
self.label2 = label2
self.qt_frame = qt_frame
self.blender_frame = 0
self.blender_to_qt_path = blender_to_qt_path
self.signals = UpdateBlenderSignals()
@pyqtSlot()
def run(self):
while not os.path.isfile(self.blender_to_qt_path):
time.sleep(0.25)
self.get_blender_frame()
while self.blender_frame != self.qt_frame:
time.sleep(0.1)
self.get_blender_frame()
self.pixmap0 = QPixmap(self.image_path0)
self.pixmap1 = QPixmap(self.image_path1)
self.pixmap2 = QPixmap(self.image_path2)
self.label0.setPixmap(self.pixmap0)
self.label1.setPixmap(self.pixmap1)
self.label2.setPixmap(self.pixmap2)
self.signals.finished.emit()
def get_blender_frame(self):
with open(self.blender_to_qt_path, 'r') as f:
blender_to_qt_dict = json.load(f)
self.blender_frame = blender_to_qt_dict['blender_frame']
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.qt_to_blender_path = os.path.abspath("qt_to_blender.json")
self.qt_frame = 0
self.qt_to_blender_dict = {'qt_frame': self.qt_frame}
with open(self.qt_to_blender_path, 'w+') as f:
json.dump(self.qt_to_blender_dict, f)
self.blender_to_qt_path = os.path.abspath("blender_to_qt.json")
self.blender_frame = 0
self.blender_to_qt_dict = {'blender_frame': self.blender_frame}
with open(self.blender_to_qt_path, 'w+') as f:
json.dump(self.blender_to_qt_dict, f)
self.blender_image_path = os.path.abspath("blender_out")
self.all_text = []
self.blender_process = None
self.window_width = 1200
self.window_height = 600
self.blender_button = None
self.blender_path_label = None
self.console_text = None
self.blender_pixmap = None
self.grid = None
self.thread_pool = QThreadPool()
self.blender_image_0 = QLabel()
self.blender_image_1 = QLabel()
self.blender_image_2 = QLabel()
self.blender_grid = QGridLayout()
self.blender_button = QPushButton(f'Update')
self.update_blender_image = None
self.config = {}
if os.path.isfile('config.json'):
with open('config.json', 'r') as f:
self.config = json.load(f)
if self.config['blender_path'] == '':
self.config.update({'blender_path': 'not set'})
else:
self.config = {'blender_path': 'not set'}
with open('config.json', 'w+') as f:
json.dump(self.config, f)
self.init_ui()
self.launch_blender()
def text_to_console(self, new_text):
self.all_text.append(new_text)
new_output = ""
for text in self.all_text[-4:]:
new_output += text + '\n'
self.console_text.setText(new_output)
def new_update_blender(self):
self.qt_frame += 1
self.qt_to_blender_dict.update({"qt_frame": self.qt_frame})
with open(self.qt_to_blender_path, 'w+') as f:
json.dump(self.qt_to_blender_dict, f)
self.update_blender_image = UpdateBlenderImage(self.blender_image_path+'_0.png',
self.blender_image_path+'_1.png',
self.blender_image_path+'_2.png',
self.blender_image_0,
self.blender_image_1,
self.blender_image_2,
self.qt_frame,
self.blender_to_qt_path)
self.update_blender_image.signals.finished.connect(self.new_update_blender)
self.thread_pool.start(self.update_blender_image)
def init_ui(self):
self.setGeometry(0, 0, self.window_width, self.window_height)
self.setWindowTitle("Blender Presenter")
self.grid = QGridLayout(self)
self.setLayout(self.grid)
self.grid.addLayout(self.blender_grid, 0, 0)
self.blender_button.setMaximumSize(self.blender_button.sizeHint())
self.blender_button.setToolTip('Set Blender Executable Path')
self.blender_button.clicked.connect(self.on_configure_blender)
self.blender_grid.addWidget(self.blender_button, 0, 0)
blender_path = self.config['blender_path']
self.blender_path_label = QLabel(f"Blender Path: {blender_path}")
self.blender_grid.addWidget(self.blender_path_label, 0, 1)
if not os.path.isfile(self.blender_image_path):
test_image = np.ones((400, 400, 4), dtype=np.uint8)
test_image[:, :, 1:] = test_image[:, :, 1:]*255
im = Image.fromarray(test_image)
im.save(self.blender_image_path+'_0.png')
im.save(self.blender_image_path+'_1.png')
im.save(self.blender_image_path+'_2.png')
self.grid.addWidget(self.blender_image_0, 1, 0)
self.grid.addWidget(self.blender_image_1, 1, 1)
self.grid.addWidget(self.blender_image_2, 1, 2)
self.console_text = QLabel("")
self.grid.addWidget(self.console_text, 2, 0)
self.show()
def launch_blender(self):
close_all_blenders()
blender_path = self.config['blender_path']
blender_script_path = os.path.abspath("blender_run.py")
try:
self.blender_process = subprocess.Popen([f'{blender_path}',
'--background',
'--python',
f'{blender_script_path}',
f'{self.blender_to_qt_path}',
f'{self.qt_to_blender_path}',
f'{self.blender_image_path}'])
self.text_to_console("Blender Found")
except FileNotFoundError:
self.text_to_console("Blender not found")
except PermissionError:
self.text_to_console("Blender not found")
self.new_update_blender()
@pyqtSlot()
def on_configure_blender(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
blender_path, _ = QFileDialog.getOpenFileName(self, "QFileDialog.getOpenFileName()", "", "All Files (*)", options=options)
self.config.update({'blender_path': blender_path})
self.blender_path_label.setText(f'Blender Path: {blender_path}')
self.blender_path_label.resize(self.blender_button.sizeHint())
with open('config.json', 'w+') as f:
json.dump(self.config, f)
self.launch_blender()
def close_all_blenders():
for p in psutil.process_iter():
if "blender" in p.name():
blender_pid = p.pid
os.kill(blender_pid, 1)
if __name__ == '__main__':
app = QApplication(sys.argv)
mw = MainWindow()
sys.exit(app.exec_())
| StarcoderdataPython |
112949 | from super_gradients.common.factories.base_factory import BaseFactory
from super_gradients.training.losses import LOSSES
class LossesFactory(BaseFactory):
def __init__(self):
super().__init__(LOSSES)
| StarcoderdataPython |
11311012 | <filename>backend/api/permissions.py
# -*- coding: utf-8 -*-
from rest_framework.permissions import BasePermission, SAFE_METHODS
from api.models import Blog
class IsOwner(BasePermission):
"""Custom permission class to allow only Blog owners to edit them."""
def has_object_permission(self, request, view, obj):
"""Return True if permission is granted to the Blog owner."""
if isinstance(obj, Blog):
return obj.owner == request.user
return obj.owner == request.user
class IsOwnerOrReadOnly(BasePermission):
"""
游客访问权限及创建者编辑权限
"""
def has_object_permission(self, request, view, obj):
# 游客权限
if request.method in SAFE_METHODS:
return True
# 编辑权限
return obj.owner == request.user | StarcoderdataPython |
3417343 | with open("/Users/apoorv/aoc/aoc_2018/data/aoc_7_data.txt") as inp:
data = inp.read().splitlines()
data = [(x[5], x[36]) for x in data]
m = dict()
for x in data:
m[x[0]] = set()
m[x[1]] = set()
for a, b in data:
m[b].add(a)
l = []
out = []
for key in m:
if len(m[key]) == 0:
l.append(key)
w = 5
t = [(None, 0) for i in range(w)]
ans = 0
while len(l) > 0 or sum(map(lambda x: x[1], t)) > 0:
for i in range(len(t)):
l.sort()
# print(l, t[i])
if t[i][1] == 0 and len(l) > 0:
e = l[0]
out += [e]
l = l[1:]
t[i] = (e, 60 + ord(e) - ord('A') + 1)
print(t, l)
t = [(x, max(0, y-1)) for x, y in t]
ans += 1
for i in range(len(t)):
if t[i][1] == 0:
for x in m:
if t[i][0] in m[x]:
m[x].remove(t[i][0])
if len(m[x]) == 0:
l.append(x)
print(ans)
| StarcoderdataPython |
11351807 |
import groads
| StarcoderdataPython |
1612627 | # -*- coding: utf-8 -*-
"""Tests.
"""
import pytest
from mss.utils.utils_collections import arrange_by_alphabet, group_to_size
@pytest.fixture
def input_data_arrange():
return [
'#',
'acquire',
'acquire',
'constrain',
'enthusiastic',
'think',
'edge',
]
@pytest.fixture
def ref_data_arrange_duplicates():
return {
'#': ['#'],
'A': ['acquire', 'acquire'],
'C': ['constrain'],
'E': ['edge', 'enthusiastic'],
'T': ['think'],
}
@pytest.fixture
def ref_data_arrange_unique():
return {
'#': ['#'],
'A': ['acquire'],
'C': ['constrain'],
'E': ['edge', 'enthusiastic'],
'T': ['think'],
}
def test_arrange_by_alphabet(input_data_arrange, ref_data_arrange_duplicates):
"""Must arrange by alphabet, no duplicate deletion."""
assert arrange_by_alphabet(input_data_arrange,
unique=False) == ref_data_arrange_duplicates
def test_arrange_by_alphabet_unique(input_data_arrange,
ref_data_arrange_unique):
"""Must arrange by alphabet with duplicate deletion."""
assert arrange_by_alphabet(input_data_arrange,
unique=True) == ref_data_arrange_unique
def test_group_to_size():
"""Must form sized sequences."""
res = list(group_to_size([1, 2, 3, 4, 5, 6, 7], 2, '?'))
assert res == [(1, 2), (3, 4), (5, 6), (7, '?')]
res = list(group_to_size([1, 2, 3, 4, 5, 6, 7], 3, '?'))
assert res == [(1, 2, 3), (4, 5, 6), (7, '?', '?')]
| StarcoderdataPython |
12809549 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by <NAME>
# Rev.2020Jan28
#
"""This package generates the Input of microkinetic models for Maple.
Possible expansions:
* Put is and fs as lists, to support H2 dissociative adsorption for instance.
* Non-isothermal reactors, T dependent on time.
* Non-isobaric reactors, P dependent on time.
* Cycle the model making the energies depend on two or more parameters (PCA).
* Consider coverage effects.
* Unidimensional diffusion, taking stationary state conditions in Fick's law.
Security checks to implement:
* Check if float(conf['Catalyst']['areaactivesite']) gives no error.
* Idem float(conf['Catalyst']['secondlayerthickness']) if gas-phase species.
"""
# Load libraries
import amklib
# Initialize variables
ltp={} # List-to-print dictionary of lists
# Read configuration file
conf=amklib.readconf("./parameters.txt")
# Read the input files int&rxn as dictionary of dictionaries.
itm=amklib.read('./itm.csv')
rxn=amklib.read('./rxn.csv')
#print('\n \n', int, '\n \n' , rxn, '\n \n')
# Electrochemical part: Get electric potential; adjust potentials if in electrochem conditions.
elecpot=amklib.get_elecpot(conf) # Gets electric potential vs SHE (vs RHE in config file).
if elecpot !=0 :
get_nelect_for_rxn(conf,itm,rxn)
amklib.adjust_energy_with_potential(conf,itm,elecpot)
amklib.adjust_energy_with_potential(conf,rxn,elecpot)
# Prepare site balance equation, solver for SODE, and initial conditions.
# Also initialize the list of differential equations.
itm,sbalance,sodesolv,initialc,rhsparse=amklib.process_intermediates(conf,itm,ltp)
# Prepare kinetic constants and rates of all chemical steps.
# Also expand list of differential equations in "itm" to include chemical steps.
itm,rxn=amklib.process_rxn(conf,itm,rxn,ltp)
# Print Maple input.
amklib.printtxt(conf,itm,rxn,sbalance,initialc,sodesolv,rhsparse,ltp)
| StarcoderdataPython |
6529899 | <reponame>goelyash/Spider
from pybloom import ScalableBloomFilter
import os
from threading import Lock
MAX_BUF_WRITES = 10
#BloomSet is used to maintain visited status of the urls
class BloomSet:
#initialize member variables
def __init__(self, name):
self.name = name
self.multiDir = "MultiBloom"
self.multiName = "bloom"
self.lock = Lock()
self.writes = 0
self.multiFiles = 0
#self.file = open(os.path.join(self.name,"bloom"), "a+")
os.chdir(self.name)
self.file = open("bloom", "a+")
if not os.path.exists("MultiBloom"):
print("Directory created MultiBloom")
os.makedirs("MultiBloom")
else:
print("Directory already present")
self.filter = self.boot()
#create a file for every depth of a link
#Remove self.file during initialization and add a file initialization for every depth
#thats crawled by taking the depth as argument.
def __contains__(self, val):
return val in self.filter
def add(self,arg):
self.lock.acquire()
self.filter.add(arg)
self.writes+=1
if self.writes > MAX_BUF_WRITES:
self.writes -=MAX_BUF_WRITES
self.write()
self.lock.release()
def get(self, arg):
return arg in self.filter
def boot(self):
try:
self.file.seek(0)
a = ScalableBloomFilter.fromfile(self.file)
return a
except:
return ScalableBloomFilter(ScalableBloomFilter.LARGE_SET_GROWTH)
def multiAdd(self,urls):
self.lock.acquire()
for i in range(0, len(urls)):
if urls[i] == "":
pass
else:
filename = self.multiName + str(i)
self.multiFile = open(os.path.join(self.multiDir,filename), "a")
self.filter1 = self.boot1()
self.multiFile.seek(0)
self.multiFile.truncate()
self.filter1.add(urls[i])
self.filter1.tofile(self.multiFile)
self.multiFile.close()
self.lock.release()
def boot1(self):
try:
self.multiFile.seek(0)
a = ScalableBloomFilter.fromfile(self.multiFile)
return a
except:
return ScalableBloomFilter(ScalableBloomFilter.LARGE_SET_GROWTH)
def write(self):
self.file.seek(0)
self.file.truncate()
self.filter.tofile(self.file)
def close(self):
self.filter.tofile(self.file)
self.file.close()
| StarcoderdataPython |
9604183 | <gh_stars>0
import sys
L = [ m.split('|') for m in open(sys.argv[1]).readlines() ]
m = [0,0,8,1,7,4]
M = lambda l,x: m[l] or 15-x[0]*9-x[1]*6 if l else 5-x[0]-x[1]*2
a = sum(sum(bool(m[len(x)-5]) for x in l[1].split()) for l in L)
b=0
for n,o in L:
S = [set(c for c in n if n.count(c)==i) for i in m]
e,c = *S[5], *S[2].intersection(min(n.split(),key=len))
l = [str(M(len(O)-5,[e in O,c in O])) for O in o.split()]
b+=int(''.join(l))
print("Part 1: {:d}".format(a))
print("Part 2: {:d}".format(b))
| StarcoderdataPython |
4864190 | <filename>FNNMCMultiDim.py
#!/usr/bin/env python
# PyTorch 1.8.1-CPU virtual env.
# Python 3.9.4 Windows 10
# -*- coding: utf-8 -*-
"""The script implement the classical longstaff-schwartz algorithm for pricing american options.
This script focus on the multidimensional case for rainbow option
"""
import numpy as np
import torch
import os
#############
# Data preparation
#############
class regressionDataset(torch.utils.data.Dataset):
def __init__(self, covariates, response):
matrixCovariates = covariates
matrixResponse = response.reshape(-1,1)
self.covariates = torch.tensor(matrixCovariates, dtype=torch.float32).to(device)
self.response = torch.tensor(matrixResponse, dtype=torch.float32).to(device)
def __len__(self):
return len(self.covariates)
def __getitem__(self, idx):
preds = self.covariates[idx,:] # or just [idx]
price = self.response[idx,:]
return (preds, price) # tuple of matrices
##########
# Model for regression
#########
device = 'cuda' if torch.cuda.is_available() else 'cpu'
#Design model
class Net(torch.nn.Module):
def __init__(self, hyperparameters):
super(Net, self).__init__()
self.hiddenlayer1 = torch.nn.Linear(hyperparameters.inputSize, hyperparameters.hiddenlayer1)
#self.drop1 = torch.nn.Dropout(0.25)
self.hiddenlayer2 = torch.nn.Linear(hyperparameters.hiddenlayer1, hyperparameters.hiddenlayer2)
#self.drop2 = torch.nn.Dropout(0.25)
self.hiddenlayer3 = torch.nn.Linear(hyperparameters.hiddenlayer2, hyperparameters.hiddenlayer3)
self.hiddenlayer4 = torch.nn.Linear(hyperparameters.hiddenlayer3, hyperparameters.hiddenlayer4)
#self.hiddenlayer5 = torch.nn.Linear(hyperparameters.hiddenlayer4, hyperparameters.hiddenlayer5)
#self.hiddenlayer6 = torch.nn.Linear(hyperparameters.hiddenlayer5, hyperparameters.hiddenlayer6)
self.output = torch.nn.Linear(hyperparameters.hiddenlayer4, 1)
torch.nn.init.xavier_uniform_(self.hiddenlayer1.weight)
torch.nn.init.zeros_(self.hiddenlayer1.bias)
torch.nn.init.xavier_uniform_(self.hiddenlayer2.weight)
torch.nn.init.zeros_(self.hiddenlayer2.bias)
torch.nn.init.xavier_uniform_(self.hiddenlayer3.weight)
torch.nn.init.zeros_(self.hiddenlayer3.bias)
torch.nn.init.xavier_uniform_(self.hiddenlayer4.weight)
torch.nn.init.zeros_(self.hiddenlayer4.bias)
#torch.nn.init.xavier_uniform_(self.hiddenlayer5.weight)
#torch.nn.init.zeros_(self.hiddenlayer5.bias)
#torch.nn.init.xavier_uniform_(self.hiddenlayer6.weight)
#torch.nn.init.zeros_(self.hiddenlayer6.bias)
torch.nn.init.xavier_uniform_(self.output.weight)
torch.nn.init.zeros_(self.output.bias)
def forward(self, x):
relu=torch.nn.ReLU()
#Leakyrelu=torch.nn.LeakyReLU(negative_slope=0.01)
#Leakyrelu=torch.nn.LeakyReLU(negative_slope=0.3)
z = relu(self.hiddenlayer1(x))
#z = self.drop1(z)
z = relu(self.hiddenlayer2(z))
#z = self.drop2(z)
z = relu(self.hiddenlayer3(z))
z = relu(self.hiddenlayer4(z))
#z = relu(self.hiddenlayer5(z))
#z = relu(self.hiddenlayer6(z))
z = self.output(z) # no activation
return z
################
# Training network
##############
def trainNetwork(trainingData, model, hyperparameters, timeStep):
"""The function train the neural network model based on hyperparameters given.
The function saves the trained models in TrainedModels directory"""
model.train() # set mode
loss_func = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=hyperparameters.learningRate)
bestEpoch_loss = 0.0 #Parameter to keep track of performance improvement
notImprovedEpoch = 0 #count number of improved iterations
trainedEpochs = 0 #count the number of epochs used for training
path = os.path.join(".", "TrainedModels", "modelAtTimeStep" + str(timeStep) + ".pth") # where to save trained model
for epoch in range(0, hyperparameters.epochs):
torch.manual_seed(1 + epoch) # recovery reproduce
epoch_loss = 0.0 # sum avg loss per item
for (batch_idx, batch) in enumerate(trainingData):
predictor = batch[0]
response = batch[1]
optimizer.zero_grad()
output = model(predictor) #forward pass
loss_val = loss_func(output, response) # avg loss in batch
epoch_loss += loss_val.item() # a sum of averages
loss_val.backward() # Compute gradient
optimizer.step() #parameter update
#print(" epoch = %4d loss = %0.4f" % \
#(epoch, epoch_loss))
#Early stopping
if(bestEpoch_loss>epoch_loss or epoch==0):
bestEpoch_loss = epoch_loss
notImprovedEpoch=0
trainedEpochs = epoch
torch.save(model.state_dict(), path) #save model
elif(notImprovedEpoch>=hyperparameters.patience):
break
else:
notImprovedEpoch = notImprovedEpoch + 1
if (hyperparameters.trainOnlyLastTimeStep==True):
hyperparameters.epochs = 1
print("Number of trained Epochs:", trainedEpochs)
##########
# Dynamic Regression Phase
##########
class Hyperparameters:
# The object holds the "observable" market variables
def __init__(self, learningRate, inputSize=0, hiddenlayer1=0, hiddenlayer2=0, hiddenlayer3=0, hiddenlayer4=0, hiddenlayer5=0, hiddenlayer6=0, epochs=10**4, batchSize=10**4, trainOnlyLastTimeStep=False, patience=5):
self.learningRate = learningRate
self.inputSize = inputSize
self.hiddenlayer1 = hiddenlayer1
self.hiddenlayer2 = hiddenlayer2
self.hiddenlayer3 = hiddenlayer3
self.hiddenlayer4 = hiddenlayer4
self.hiddenlayer5 = hiddenlayer5
self.hiddenlayer6 = hiddenlayer6
self.epochs = epochs
self.batchSize = batchSize
self.trainOnlyLastTimeStep= trainOnlyLastTimeStep
self.patience = patience
def findNeuralNetworkModels(simulatedPaths, Option, MarketVariables, hyperparameters):
# Go backward recursively to find the regression coefficients all the way back to T_1
# and then store all the regression coefficients
timeStepsTotal = np.shape(simulatedPaths)[0]-1
timeIncrement = Option.timeToMat/(timeStepsTotal)
for timeStep in range(timeStepsTotal,0,-1):
#Get payoff at maturity
if(timeStep== (timeStepsTotal) ):
ValueVec = Option.payoff(simulatedPaths[timeStep,:,:])
path = os.path.join(".", "TrainedModels", str("modelAtTimeStep") + str(timeStep) + ".pth")
torch.save(Net(hyperparameters).state_dict(), path)
#Find regressionscoefficients at each exercise dates before maturity
else:
response = np.exp(-MarketVariables.r*timeIncrement)*ValueVec
currentSpots = simulatedPaths[timeStep,:,:]
pathsITM = np.where(Option.payoff(currentSpots)>0)
if(np.shape(pathsITM)[1]):
#create dataset for training
trainingData = regressionDataset(currentSpots[pathsITM], response[pathsITM])
iterableTrainingData = torch.utils.data.DataLoader(trainingData, batch_size=hyperparameters.batchSize, shuffle=True)
regressionModel = Net(hyperparameters).to(device)
path = os.path.join(".", "TrainedModels", str("modelAtTimeStep") + str(timeStep+1) + ".pth")
regressionModel.load_state_dict(torch.load(path))
trainNetwork(trainingData=iterableTrainingData, model=regressionModel, hyperparameters=hyperparameters,
timeStep=timeStep)
#load model after training of model
evaluationModel = Net(hyperparameters).to(device)
path = os.path.join(".", "TrainedModels", str("modelAtTimeStep") + str(timeStep) + ".pth")
evaluationModel.load_state_dict(torch.load(path))
with torch.no_grad():
expectedContinuationValue = evaluationModel(torch.tensor(currentSpots, dtype=torch.float32).to(device))
intrinsicValue = Option.payoff(currentSpots)
ValueVec = response
#overwrite the default to keep the option alive, if it is beneficial to keep the exercise for the ITM paths.
#CashFlow from decision wheather to stop or keep option alive
npExpectedContinuationValue = expectedContinuationValue.numpy().flatten() #transform tensor to vector
cashFlowChoice = np.where(intrinsicValue>npExpectedContinuationValue, intrinsicValue, response)
ValueVec[pathsITM] = cashFlowChoice[pathsITM]
else:
ValueVec = response
#########
# Pricing
#########
def priceAmericanOption(simulatedPaths, Option, MarketVariables, hyperparameters):
timeStepsTotal = simulatedPaths.shape[0]-1 #time 0 does not count to a timestep
timeIncrement = Option.timeToMat/timeStepsTotal
regressionModel = Net(hyperparameters).to(device)
regressionModel.eval()
for timeStep in range(timeStepsTotal,0,-1):
#Get payoff at maturity
if(timeStep==timeStepsTotal):
ValueVec = Option.payoff(simulatedPaths[timeStep,:,:])
#Use coefficientMatrix and paths to price american option
else:
continuationValue = np.exp(-MarketVariables.r*timeIncrement)*ValueVec
currentSpots = simulatedPaths[timeStep,:,:]
path = os.path.join(".", "TrainedModels", str("modelAtTimeStep") + str(timeStep) + ".pth")
regressionModel.load_state_dict(torch.load(path))
with torch.no_grad():
expectedContinuationValue = regressionModel(torch.tensor(currentSpots, dtype=torch.float32).to(device))
intrinsicValue = Option.payoff(currentSpots)
#overwrite the default to keep the option alive, if it is beneficial to keep the exercise for the ITM paths.
ValueVec = continuationValue #default value to keep option alive
npExpectedContinuationValue = expectedContinuationValue.numpy().flatten()
cashFlowChoice = np.where(intrinsicValue>npExpectedContinuationValue, intrinsicValue, continuationValue)
pathsITM = np.where(intrinsicValue>0)
ValueVec[pathsITM] = cashFlowChoice[pathsITM]
return ValueVec.mean()*np.exp(-MarketVariables.r*timeIncrement)
import Products
import SimulationPaths.GBMMultiDim
import time
if __name__ == '__main__':
timeStepsTotal = 9
normalizeStrike=100
pathTotal = 10**4
callMax = Products.Option(timeToMat=3, strike=1, typeOfContract="CallMax")
marketVariables = Products.MarketVariables(r=0.05, dividend=0.10, vol=0.2, spot=[100/normalizeStrike]*2, correlation=0.0)
hyperparameters = Hyperparameters(learningRate=0.001, inputSize=2, hiddenlayer1=10, hiddenlayer2=10, epochs=10, batchSize=10**4)
estimates = np.zeros(100)
timeSimPathsStart = time.time()
learningPaths = SimulationPaths.GBMMultiDim.simulatePaths(timeStepsTotal=timeStepsTotal,pathsTotal=10**6, marketVariables=marketVariables, timeToMat=callMax.timeToMat)
timeSimPathsEnd = time.time()
print(f"Time taken to simulate paths is: {timeSimPathsEnd-timeSimPathsStart:f}")
timeRegressionStart = time.time()
findNeuralNetworkModels(simulatedPaths=learningPaths, Option=callMax, MarketVariables=marketVariables, hyperparameters=hyperparameters)
timeRegressionEnd = time.time()
print(f"Time taken for find regressioncoefficients: {timeRegressionEnd-timeRegressionStart:f}")
for i in range(100):
# create empirical estimations
pricingPaths = SimulationPaths.GBMMultiDim.simulatePaths(timeStepsTotal=timeStepsTotal,pathsTotal=pathTotal, marketVariables=marketVariables, timeToMat=callMax.timeToMat)
timePriceStart = time.time()
price = priceAmericanOption(simulatedPaths=pricingPaths, Option=callMax, MarketVariables=marketVariables, hyperparameters=hyperparameters)*normalizeStrike
timePriceEnd = time.time()
print(f"Time taken for Pricing: {timePriceEnd-timePriceStart:f}")
print(f"The estimated price is: {price:f} and the true price is: 13.9")
estimates[i]=price
print("Mean: ", np.mean(estimates))
print("Std Error Mean: ", np.std(estimates)/10) | StarcoderdataPython |
4908236 | <reponame>ansonb/NeuralNetwork<filename>lib/ops.py
from abc import ABC, abstractmethod
class Op(ABC):
def __init__(self, node_name, is_trainable=False):
super().__init__()
self.val = None
self.variable_type = 'none'
self.node_name = node_name
def bprop(self, input_nodes, cur_node, target_node, grad_table):
assert grad_table[cur_node.name]!=None, "grad_table[cur_node] should not be None"
# grad_sum = 0
# for node in input_nodes:
# grad_sum += self.derivative(target_node) #diff wrt target_node
# grad = grad_table[cur_node.name]*grad_sum
grad = grad_table[cur_node.name]*self.derivative(target_node)
# if self.node_name=='layer2_node_relu_0':
# print('grad',grad,grad_table[cur_node.name],target_node.name,self.derivative(target_node))
return grad
def f(self, *args, **kwargs):
pass
@abstractmethod
def default_derivative(self, *args, **kwargs):
pass
@abstractmethod
def derivative(self, *args, **kwargs):
pass
# TODO: could add more non linearity
class Relu(Op):
def __init__(self, *args, **kwargs):
assert len(args)==1, 'Length of inputs must be 1 to Relu op'
super().__init__(kwargs['node_name'])
self.inputs = args
self.input_node_1 = args[0][0]
self.derivative_dict = {}
self.derivative_dict[self.input_node_1.name] = self.derivative_node_1
self.derivative_dict[self.node_name] = lambda *args, **kwargs: 1
def f(self, *args, **kwargs):
self.val = max(self.input_node_1.op.val,0)
# self.val = self.input_node_1.op.val
return self.val
def derivative_node_1(self, *args, **kwargs):
x = args[0].op.val
return 0 if x<0 else 1
# return x
def default_derivative(self, *args, **kwargs):
return self.input_node_1.op.derivative(args[0])
def derivative(self, *args, **kwargs):
if args[0].name == self.node_name:
return 1
return self.derivative_dict.get(args[0].name,self.default_derivative)(*args,**kwargs)
class Mult(Op):
#operation is input_node_1*input_node_2
def __init__(self, *args, **kwargs):
assert len(args[0])==2, 'Length of inputs must be 2 to Mult op'
super().__init__(kwargs['node_name'],is_trainable=False)
self.inputs = args[0]
self.input_node_1 = self.inputs[0]
self.input_node_2 = self.inputs[1]
self.derivative_dict = {}
self.derivative_dict[self.input_node_1.name] = self.derivative_node_1
self.derivative_dict[self.input_node_2.name] = self.derivative_node_2
self.derivative_dict[self.node_name] = lambda *args, **kwargs: 1
# TODO: correct
def f(self, *args, **kwargs):
self.val = self.input_node_1.op.val * self.input_node_2.op.val
# if self.node_name=='loss_node_square_0':
# print(self.input_node_1.op.val,self.input_node_2.op.val,self.val)
return self.val
def derivative_node_1(self, *args, **kwargs):
return input_node_2.op.val
def derivative_node_2(self, *args, **kwargs):
return input_node_1.op.val
def default_derivative(self, *args, **kwargs):
return self.input_node_1.op.val*self.input_node_2.op.derivative_dict.get(args[0].name,self.input_node_2.op.default_derivative)(*args,**kwargs) + self.input_node_2.op.val*self.input_node_1.op.derivative_dict.get(args[0].name,self.input_node_1.op.default_derivative)(*args,**kwargs)
# def derivative(self, *args, **kwargs):
# return self.input_node_1.op.val*self.derivative_dict.get(args[0].name,self.default_derivative)(*args,**kwargs) + self.input_node_2.op.val*self.derivative_dict.get(args[0].name,self.default_derivative)(*args,**kwargs)
def derivative(self, *args, **kwargs):
if args[0].name == self.node_name:
return 1
return self.input_node_1.op.val*self.input_node_2.op.derivative_dict.get(args[0].name,self.input_node_2.op.default_derivative)(*args,**kwargs) + self.input_node_2.op.val*self.input_node_1.op.derivative_dict.get(args[0].name,self.input_node_1.op.default_derivative)(*args,**kwargs)
class Add(Op):
#operation is input_node_1+input_node_2
def __init__(self, *args, **kwargs):
assert len(args[0])==2, 'Length of inputs must be 2 to Add op'
super().__init__(kwargs['node_name'],is_trainable=False)
self.inputs = args[0]
self.input_node_1 = self.inputs[0]
self.input_node_2 = self.inputs[1]
self.derivative_dict = {}
self.derivative_dict[self.input_node_1.name] = self.derivative_node_1
self.derivative_dict[self.input_node_2.name] = self.derivative_node_2
self.derivative_dict[self.node_name] = lambda *args, **kwargs: 1
def f(self, *args, **kwargs):
self.val = self.input_node_1.op.val + self.input_node_2.op.val
return self.val
def derivative_node_1(self, *args, **kwargs):
return 1
def derivative_node_2(self, *args, **kwargs):
return 1
def default_derivative(self, *args, **kwargs):
return self.input_node_2.op.derivative_dict.get(args[0].name,self.input_node_2.op.default_derivative)(*args,**kwargs) + self.input_node_1.op.derivative_dict.get(args[0].name,self.input_node_1.op.default_derivative)(*args,**kwargs)
# def derivative(self, *args, **kwargs):
# return self.derivative_dict.get(args[0].name,self.default_derivative)(*args,**kwargs) + self.derivative_dict.get(args[0].name,self.default_derivative)(*args,**kwargs)
def derivative(self, *args, **kwargs):
# print('derivative sub---->')
# print(args[0].name,self.node_name)
# if args[0].name=='loss_node_square_0' and self.node_name=='loss_node_add_0':
# print('=============>')
# print(self.input_node_2.op.derivative_dict.get(args[0].name)(*args,**kwargs) + self.input_node_1.op.derivative_dict.get(args[0].name)(*args,**kwargs))
# print(self.input_node_2.name)
# print(self.input_node_1.name)
# print(args[0].name)
if args[0].name == self.node_name:
return 1
return self.input_node_2.op.derivative_dict.get(args[0].name,self.input_node_2.op.default_derivative)(*args,**kwargs) + self.input_node_1.op.derivative_dict.get(args[0].name,self.input_node_1.op.default_derivative)(*args,**kwargs)
class Sub(Op):
#operation is input_node_1-input_node_2
def __init__(self, *args, **kwargs):
assert len(args[0])==2, 'Length of inputs must be 2 to Sub op'
super().__init__(kwargs['node_name'],is_trainable=False)
self.inputs = args[0]
self.input_node_1 = self.inputs[0]
self.input_node_2 = self.inputs[1]
self.derivative_dict = {}
self.derivative_dict[self.input_node_1.name] = self.derivative_node_1
self.derivative_dict[self.input_node_2.name] = self.derivative_node_2
self.derivative_dict[self.node_name] = lambda *args, **kwargs: 1
def f(self, *args, **kwargs):
self.val = self.input_node_1.op.val - self.input_node_2.op.val
return self.val
def derivative_node_1(self, *args, **kwargs):
return 1
def derivative_node_2(self, *args, **kwargs):
return -1
def default_derivative(self, *args, **kwargs):
return -1*self.input_node_2.op.derivative_dict.get(args[0].name,self.input_node_2.op.default_derivative)(*args,**kwargs) + self.input_node_1.op.derivative_dict.get(args[0].name,self.input_node_1.op.default_derivative)(*args,**kwargs)
# def derivative(self, *args, **kwargs):
# return self.derivative_dict.get(args[0].name,self.default_derivative)(*args,**kwargs) - self.derivative_dict.get(args[0].name,self.default_derivative)(*args,**kwargs)
def derivative(self, *args, **kwargs):
# print('derivative sub---->')
# print(args[0].name,self.node_name)
if args[0].name == self.node_name:
return 1
return -1*self.input_node_2.op.derivative_dict.get(args[0].name,self.input_node_2.op.default_derivative)(*args,**kwargs) + self.input_node_1.op.derivative_dict.get(args[0].name,self.input_node_1.op.default_derivative)(*args,**kwargs)
class VariableOp(Op):
def __init__(self, *args, **kwargs):
assert len(args)==1, 'Length of inputs must be 1 to VariableOp op'
super().__init__(kwargs['node_name'],is_trainable=True)
self.inputs = args
self.derivative_dict = {}
self.derivative_dict[kwargs['node_name']] = self.derivative_self
self.derivative_dict[self.node_name] = lambda *args, **kwargs: 1
self.variable_type = kwargs['variable_type']
def derivative_self(self, *args, **kwargs):
return 1
def default_derivative(self, *args, **kwargs):
return 0
def derivative(self, *args, **kwargs):
return self.derivative_dict.get(args[0].name,self.default_derivative)(*args,**kwargs)
class PlaceholderOp(Op):
def __init__(self, *args, **kwargs):
assert len(args)==1, 'Length of inputs must be 1 to PlaceholderOp'
super().__init__(kwargs['node_name'],is_trainable=False)
self.inputs = args
self.derivative_dict = {}
self.derivative_dict[kwargs['node_name']] = self.derivative_self
self.derivative_dict[self.node_name] = lambda *args, **kwargs: 1
self.val = None
def set_val(self, val):
self.val = val
def derivative_self(self, *args, **kwargs):
return 1
def default_derivative(self, *args, **kwargs):
return 0
def derivative(self, *args, **kwargs):
return self.derivative_dict.get(args[0].name,self.default_derivative)(*args,**kwargs) | StarcoderdataPython |
5110522 | <reponame>aschneuw/road-segmentation-unet
import code
import glob
import os
import time
from datetime import datetime
import numpy as np
import tensorflow as tf
import images
import unet
from constants import NUM_CHANNELS, IMG_PATCH_SIZE, FOREGROUND_THRESHOLD
from summary import Summary
tf.app.flags.DEFINE_integer('batch_size', 25, "Batch size of training instances")
tf.app.flags.DEFINE_boolean('dilated_layers', False, "Add dilated CNN layers")
tf.app.flags.DEFINE_float('dropout', 0.8, "Probability to keep an input")
tf.app.flags.DEFINE_boolean('ensemble_prediction', False, "Ensemble Prediction")
tf.app.flags.DEFINE_string('eval_data_dir', None, "Directory containing eval images")
tf.app.flags.DEFINE_integer('eval_every', 500, "Number of steps between evaluations")
tf.app.flags.DEFINE_boolean('eval_train', False, "Evaluate training data")
tf.app.flags.DEFINE_integer('gpu', -1, "GPU to run the model on")
tf.app.flags.DEFINE_boolean('image_augmentation', False, "Augment training set of images with transformations")
tf.app.flags.DEFINE_boolean('interactive', False, "Spawn interactive Tensorflow session")
tf.app.flags.DEFINE_string('logdir', os.path.abspath("./logdir"), "Directory where to write logfiles")
tf.app.flags.DEFINE_float('lr', 0.01, "Initial learning rate")
tf.app.flags.DEFINE_string('model_path', None, "Restore exact model path")
tf.app.flags.DEFINE_float('momentum', 0.9, "Momentum")
tf.app.flags.DEFINE_integer('num_epoch', 5, "Number of pass on the dataset during training")
tf.app.flags.DEFINE_integer('num_eval_images', 4, "Number of images to predict for an evaluation")
tf.app.flags.DEFINE_integer('num_gpu', 1, "Number of available GPUs to run the model on")
tf.app.flags.DEFINE_integer('num_layers', 5, "Number of layers of the U-Net")
tf.app.flags.DEFINE_integer('patch_size', 128, "Size of the prediction image")
tf.app.flags.DEFINE_integer('pred_batch_size', 2, "Batch size of batchwise prediction")
tf.app.flags.DEFINE_string('restore_date', None, "Restore the model from specific date")
tf.app.flags.DEFINE_integer('restore_epoch', None, "Restore the model from specific epoch")
tf.app.flags.DEFINE_boolean('restore_model', False, "Restore the model from previous checkpoint")
tf.app.flags.DEFINE_integer('root_size', 64, "Number of filters of the first U-Net layer")
tf.app.flags.DEFINE_string('rotation_angles', None, "Rotation angles")
tf.app.flags.DEFINE_string('save_path', os.path.abspath("./runs"),
"Directory where to write checkpoints, overlays and submissions")
tf.app.flags.DEFINE_integer('seed', 2017, "Random seed for reproducibility")
tf.app.flags.DEFINE_integer('stride', 16, "Sliding delta for patches")
tf.app.flags.DEFINE_string('train_data_dir', os.path.abspath("./data/training"),
"Directory containing training images/ groundtruth/")
tf.app.flags.DEFINE_integer('train_score_every', 1000, "Compute training score after the given number of iterations")
FLAGS = tf.app.flags.FLAGS
class Options(object):
"""Options used by our model."""
def __init__(self):
self.batch_size = FLAGS.batch_size
self.dilated_layers = FLAGS.dilated_layers
self.dropout = FLAGS.dropout
self.ensemble_prediction = FLAGS.ensemble_prediction
self.eval_data_dir = FLAGS.eval_data_dir
self.eval_every = FLAGS.eval_every
self.eval_train = FLAGS.eval_train
self.gpu = FLAGS.gpu
self.image_augmentation = FLAGS.image_augmentation
self.interactive = FLAGS.interactive
self.logdir = FLAGS.logdir
self.lr = FLAGS.lr
self.model_path = FLAGS.model_path
self.momentum = FLAGS.momentum
self.num_epoch = FLAGS.num_epoch
self.num_eval_images = FLAGS.num_eval_images
self.num_gpu = FLAGS.num_gpu
self.num_layers = FLAGS.num_layers
self.patch_size = FLAGS.patch_size
self.pred_batch_size = FLAGS.pred_batch_size
self.restore_date = FLAGS.restore_date
self.restore_epoch = FLAGS.restore_epoch
self.restore_model = FLAGS.restore_model
self.root_size = FLAGS.root_size
self.rotation_angles = None if not FLAGS.rotation_angles else [int(i) for i in FLAGS.rotation_angles.split(",")]
self.save_path = FLAGS.save_path
self.seed = FLAGS.seed
self.stride = FLAGS.stride
self.train_data_dir = FLAGS.train_data_dir
self.train_score_every = FLAGS.train_score_every
class ConvolutionalModel:
def __init__(self, options, session):
self._options = options
self._session = session
np.random.seed(options.seed)
tf.set_random_seed(options.seed)
self.input_size = unet.input_size_needed(options.patch_size, options.num_layers)
self.experiment_name = datetime.now().strftime("%Y-%m-%dT%Hh%Mm%Ss")
experiment_path = os.path.abspath(os.path.join(options.save_path, self.experiment_name))
summary_path = os.path.join(options.logdir, self.experiment_name)
self._summary = Summary(options, session, summary_path)
self.build_graph()
def cross_entropy_loss(self, labels, pred_logits):
"""BCE loss"""
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=pred_logits,
labels=labels)
loss = tf.reduce_mean(cross_entropy)
return loss
def optimize(self, loss):
"""Build the part of the graph to optimize the loss function."""
opts = self._options
learning_rate = tf.train.exponential_decay(opts.lr, self._global_step,
1000, 0.95, staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate, opts.momentum)
train = optimizer.minimize(loss, global_step=self._global_step)
return train, learning_rate
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# Global step: scalar, i.e., shape [].
global_step = tf.Variable(0, name="global_step")
self._global_step = global_step
# data placeholders
patches_node = tf.placeholder(tf.float32,
shape=(opts.batch_size, self.input_size, self.input_size, NUM_CHANNELS),
name="patches")
labels_node = tf.placeholder(tf.int64,
shape=(opts.batch_size, opts.patch_size, opts.patch_size),
name="groundtruth")
patches_node, labels_node = self.stochastic_images_augmentation(patches_node, labels_node)
dropout_keep = tf.placeholder_with_default(1.0, shape=(), name="dropout_keep")
self._dropout_keep = dropout_keep
predict_logits = unet.forward(patches_node, root_size=opts.root_size, num_layers=opts.num_layers,
dilated_layers=opts.dilated_layers, dropout_keep=dropout_keep)
predictions = tf.nn.softmax(predict_logits, dim=3)
predictions = predictions[:, :, :, 1]
loss = self.cross_entropy_loss(labels_node, predict_logits)
self._train, self._learning_rate = self.optimize(loss)
self._loss = loss
self._predictions = predictions
self._patches_node = patches_node
self._labels_node = labels_node
self._predict_logits = predict_logits
self._summary.initialize_eval_summary()
self._summary.initialize_train_summary()
self._summary.initialize_overlap_summary()
self._summary.initialize_missclassification_summary()
summary_scalars = {"loss": loss, "learning_rate": self._learning_rate}
self.summary_op = self._summary.get_summary_op(summary_scalars)
# Properly initialize all variables.
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
self.saver = tf.train.Saver(max_to_keep=100)
def stochastic_images_augmentation(self, imgs, masks):
"""Add stochastic transformation to imgs and masks:
flip_ud, flip_lr, transpose, rotation by any 90 degree
"""
original_imgs, original_masks = imgs, masks
batch_size = int(imgs.shape[0])
self._image_augmentation = tf.placeholder_with_default(False, shape=(), name='image_augmentation_flag')
def apply_transform(transform, pim):
proba, img, mask = pim
return tf.cond(proba > 0.5, lambda: transform(img), lambda: img), \
tf.cond(proba > 0.5, lambda: transform(mask), lambda: mask)
def stochastic_transform(transform, imgs, masks, name):
proba = tf.random_uniform(shape=(batch_size,), name="should_" + name)
imgs, masks = tf.map_fn(lambda pim: apply_transform(tf.image.flip_up_down, pim),
[proba, imgs, masks],
dtype=(imgs.dtype, masks.dtype))
return imgs, masks
with tf.variable_scope("data_augm"):
masks = tf.expand_dims(masks, -1)
imgs, masks = stochastic_transform(tf.image.flip_up_down, imgs, masks, name="flip_up_down")
imgs, masks = stochastic_transform(tf.image.flip_left_right, imgs, masks, name="flip_up_down")
imgs, masks = stochastic_transform(tf.image.transpose_image, imgs, masks, name="transpose")
number_rotation = tf.cast(tf.floor(tf.random_uniform(shape=(batch_size,), name="number_rotation") * 4),
tf.int32)
imgs, masks = tf.map_fn(lambda kim: (tf.image.rot90(kim[1], kim[0]), tf.image.rot90(kim[2], kim[0])),
[number_rotation, imgs, masks],
dtype=(imgs.dtype, masks.dtype))
masks = tf.squeeze(masks, -1)
imgs, masks = tf.cond(self._image_augmentation,
lambda: (imgs, masks),
lambda: (original_imgs, original_masks))
return imgs, masks
def train(self, patches, labels_patches, imgs, labels):
"""Train the model for one epoch
params:
imgs: [num_images, img_height, img_width, num_channel]
labels: [num_images, num_patches_side, num_patches_side]
"""
opts = self._options
labels_patches = (labels_patches >= 0.5) * 1.
labels = (labels >= 0.5) * 1.
num_train_patches = patches.shape[0]
indices = np.arange(0, num_train_patches)
np.random.shuffle(indices)
num_errors = 0
total = 0
for batch_i, offset in enumerate(range(0, num_train_patches - opts.batch_size, opts.batch_size)):
batch_indices = indices[offset:offset + opts.batch_size]
feed_dict = {
self._patches_node: patches[batch_indices, :, :, :],
self._labels_node: labels_patches[batch_indices],
self._dropout_keep: opts.dropout,
self._image_augmentation: opts.image_augmentation,
}
summary_str, _, l, predictions, predictions, step = self._session.run(
[self.summary_op, self._train, self._loss, self._predict_logits, self._predictions,
self._global_step],
feed_dict=feed_dict)
print("Batch {} Step {}".format(batch_i, step), end="\r")
self._summary.add(summary_str, global_step=step)
num_errors += np.abs(labels_patches[batch_indices] - predictions).sum()
total += opts.batch_size
self._summary.add_to_pixel_missclassification_summary(num_errors, total, self._global_step)
# from time to time do full prediction on some images
if step > 0 and step % opts.eval_every == 0:
print()
images_to_predict = imgs[:opts.num_eval_images, :, :, :]
masks = self.predict(images_to_predict)
overlays = images.overlays(images_to_predict, masks)
pred_masks = ((masks > 0.5) * 1).squeeze()
true_masks = labels[:opts.num_eval_images, :, :].squeeze()
self._summary.add_to_eval_summary(masks, overlays, labels, self._global_step)
self._summary.add_to_overlap_summary(true_masks, pred_masks, self._global_step)
if step > 0 and step % opts.train_score_every == 0:
self._summary.add_to_training_summary(self.predict(imgs), labels, self._global_step)
self._summary.flush()
def predict(self, imgs):
"""Run inference on `imgs` and return predicted masks
imgs: [num_images, image_height, image_width, num_channel]
returns: masks [num_images, images_height, image_width] with road probabilities
"""
opts = self._options
num_images = imgs.shape[0]
print("Running prediction on {} images... ".format(num_images), end="")
if opts.ensemble_prediction:
print("Start data augmentation for prediction...")
imgs = images.image_augmentation_ensemble(imgs)
print("Done")
num_images = imgs.shape[0]
offset = int((unet.input_size_needed(opts.patch_size, opts.num_layers) - opts.patch_size) / 2)
imgs_exp = images.mirror_border(imgs, offset)
patches = images.extract_patches(imgs_exp,
patch_size=unet.input_size_needed(opts.patch_size, opts.num_layers),
predict_patch_size=opts.patch_size,
stride=opts.stride)
num_patches = patches.shape[0]
num_channel = imgs.shape[3]
# patches padding to have full batches
if num_patches % opts.batch_size != 0:
num_extra_patches = opts.batch_size - (num_patches % opts.batch_size)
extra_patches = np.zeros((num_extra_patches, opts.patch_size, opts.patch_size, num_channel))
patches = np.concatenate([patches, extra_patches], axis=0)
num_batches = int(patches.shape[0] / opts.batch_size)
eval_predictions = np.ndarray(shape=(patches.shape[0], opts.patch_size, opts.patch_size))
for batch in range(num_batches):
offset = batch * opts.batch_size
feed_dict = {
self._patches_node: patches[offset:offset + opts.batch_size, :, :, :],
}
eval_predictions[offset:offset + opts.batch_size, :, :] = self._session.run(self._predictions, feed_dict)
# remove padding
eval_predictions = eval_predictions[0:num_patches]
patches_per_image = int(num_patches / num_images)
# construct masks
new_shape = (num_images, patches_per_image, opts.patch_size, opts.patch_size, 1)
masks = images.images_from_patches(eval_predictions.reshape(new_shape), stride=opts.stride)
if opts.ensemble_prediction:
print("Invert Data augmentation and average predictions...")
masks = images.invert_image_augmentation_ensemble(masks)
print("Averaging done...")
print("Prediction Done")
return masks
def predict_batchwise(self, imgs, pred_batch_size):
masks = []
for i in range(int(np.ceil(imgs.shape[0] / pred_batch_size))):
start = i * pred_batch_size
end = start + pred_batch_size
masks.append(self.predict(imgs[start:end]))
if len(masks) > 1:
masks = np.concatenate(masks, axis=0)
return masks
else:
return masks[0]
def save(self, epoch=0):
opts = self._options
model_data_dir = os.path.abspath(
os.path.join(opts.save_path, self.experiment_name, 'model-epoch-{:03d}.chkpt'.format(epoch)))
saved_path = self.saver.save(self._session, model_data_dir)
# create checkpoint
print("Model saved in file: {}".format(saved_path))
def restore(self, date=None, epoch=None, file=None):
"""Restores model from saved checkpoint
date: which model should be restored (most recent if None)
epoch: at which epoch model should be restored (most recent if None)
file: provide directly the checkpoint file te restore
"""
opts = self._options
if file is not None:
model_data_dir = file
else:
# get experiment name to restore from
if date is None:
dates = [date for date in glob.glob(os.path.join(opts.save_path, "*")) if os.path.isdir(date)]
model_data_dir = sorted(dates)[-1]
else:
model_data_dir = os.path.abspath(os.path.join(opts.save_path, date))
# get epoch construct final path
if epoch is None:
model_data_dir = os.path.abspath(os.path.join(model_data_dir, 'model-epoch-*.chkpt.meta'))
model_data_dir = sorted(glob.glob(model_data_dir))[-1][:-5]
else:
model_data_dir = os.path.abspath(
os.path.join(model_data_dir, 'model-epoch-{:03d}.chkpt'.format(epoch)))
self.saver.restore(self._session, model_data_dir)
print("Model restored from from file: {}".format(model_data_dir))
def main(_):
opts = Options()
if opts.gpu == -1:
config = tf.ConfigProto()
else:
config = tf.ConfigProto(device_count={'GPU': opts.num_gpu}, allow_soft_placement=True)
with tf.Graph().as_default(), tf.Session(config=config) as session:
device = '/device:CPU:0' if opts.gpu == -1 else '/device:GPU:{}'.format(opts.gpu)
print("Running on device {}".format(device))
with tf.device(device):
model = ConvolutionalModel(opts, session)
if opts.restore_model:
if opts.model_path is not None:
model.restore(file=opts.model_path)
print("Restore model: {}".format(opts.model_path))
else:
print("Restore date: {}".format(opts.restore_date))
model.restore(date=opts.restore_date, epoch=opts.restore_epoch)
if opts.num_epoch > 0:
train_images, train_groundtruth = images.load_train_data(opts.train_data_dir)
input_size = unet.input_size_needed(opts.patch_size, opts.num_layers)
offset = int((input_size - opts.patch_size) / 2)
extended_images = images.expand_and_rotate(train_images, opts.rotation_angles, offset)
patches = images.extract_patches(extended_images,
patch_size=input_size,
predict_patch_size=opts.patch_size,
stride=opts.stride)
print("Train on {} patches of size {}x{}".format(patches.shape[0], patches.shape[1], patches.shape[2]))
train_groundtruth_exp = images.expand_and_rotate(train_groundtruth, opts.rotation_angles, 0)
labels_patches = images.extract_patches(train_groundtruth_exp,
patch_size=opts.patch_size,
stride=opts.stride)
print(
"Train on {} groundtruth patches of size {}x{}".format(labels_patches.shape[0], labels_patches.shape[1],
labels_patches.shape[2]))
model._summary.add_to_eval_patch_summary(train_groundtruth)
for i in range(opts.num_epoch):
print("==== Train epoch: {} ====".format(i))
tf.local_variables_initializer().run() # Reset scores
model.train(patches, labels_patches, train_images, train_groundtruth) # Process one epoch
model.save(i) # Save model to disk
if opts.eval_train:
print("Evaluate Test")
eval_images, eval_groundtruth = images.load_train_data(opts.train_data_dir)
pred_masks = model.predict_batchwise(eval_images, opts.pred_batch_size)
pred_labels = ((pred_masks > 0.5) * 1).squeeze(-1)
pred_overlays = images.overlays(eval_images, pred_masks, fade=0.5)
overlapped = images.overlap_pred_true(pred_labels, eval_groundtruth)
error = images.overlapp_error(pred_labels, eval_groundtruth)
images.save_all(pred_labels, opts.eval_data_dir, "eval_binary_pred_{:03d}.png", greyscale=True)
images.save_all(pred_masks, opts.eval_data_dir, "eval_probability_pred_{:03d}.png", greyscale=True)
images.save_all(pred_overlays, opts.eval_data_dir, "eval_overlays_pred_{:03d}.png")
images.save_all(overlapped, opts.eval_data_dir, "eval_confusion_{:03d}.png")
images.save_all(error, opts.eval_data_dir, "eval_orror_{:03d}.png", greyscale=True)
if opts.eval_data_dir and not opts.eval_train:
print("Running inference on eval data {}".format(opts.eval_data_dir))
eval_images = images.load(opts.eval_data_dir)
start = time.time()
masks = model.predict_batchwise(eval_images, opts.pred_batch_size)
stop = time.time()
print("Prediction time:{} mins".format((stop - start) / 60))
masks = images.quantize_mask(masks, patch_size=IMG_PATCH_SIZE, threshold=FOREGROUND_THRESHOLD)
overlays = images.overlays(eval_images, masks, fade=0.4)
save_dir = os.path.abspath(os.path.join(opts.save_path, model.experiment_name))
images.save_all(overlays, save_dir)
images.save_submission_csv(masks, save_dir, IMG_PATCH_SIZE)
# Save model used for prediction
saved_path = model.saver.save(model._session, save_dir + "-model.chkpt")
model_info = "Model used for submission: " + ""
if opts.interactive:
code.interact(local=locals())
if __name__ == '__main__':
tf.app.run()
| StarcoderdataPython |
5000281 | # Generated by Django 3.1.1 on 2020-09-25 10:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('model', models.CharField(max_length=50)),
('color', models.CharField(max_length=50)),
('gov_numb', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Owner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('date_of_b', models.DateField()),
],
),
migrations.CreateModel(
name='Membership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_of_beginning', models.DateField()),
('date_of_ending', models.DateField()),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project_first_app.car')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project_first_app.owner')),
],
),
migrations.CreateModel(
name='Driver_License',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number_of_license', models.IntegerField(max_length=50)),
('type', models.CharField(choices=[('A', 'Moto'), ('B', 'Auto'), ('C', 'Track')], max_length=1)),
('date_of_given', models.DateField()),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project_first_app.owner')),
],
),
migrations.AddField(
model_name='car',
name='owners',
field=models.ManyToManyField(through='project_first_app.Membership', to='project_first_app.Owner'),
),
]
| StarcoderdataPython |
11363135 | import pandas as pd
import numpy as np
import abc
from copy import copy
from time import time
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.random import check_random_state
from robusta.crossval import crossval
from ._verbose import _print_last
from ._subset import FeatureSubset
from ._plot import _plot_progress, _plot_subset
class _Selector(BaseEstimator, TransformerMixin):
def transform(self, X):
"""Reduce X to the selected features.
Parameters
----------
X : DataFrame of shape [n_samples, n_features]
The input samples.
Returns
-------
Xt : DataFrame of shape [n_samples, n_selected_features]
The input samples with only the selected features.
"""
return X[self.get_subset()]
@abc.abstractmethod
def get_subset(self):
"""
Get list of columns to select
Returns
-------
use_cols : list of string, shape (k_features, )
Columns to selct
"""
return self.features_
def _set_features(self, X):
self.features_ = FeatureSubset(X.columns)
class _WrappedSelector(_Selector):
@abc.abstractmethod
def __init__(self, estimator, cv=5, scoring=None, max_iter=20, max_time=None,
random_state=0, n_jobs=-1, verbose=1, n_digits=4, cv_kwargs={}):
self.estimator = estimator
self.scoring = scoring
self.max_iter = max_iter
self.max_time = max_time
self.cv = cv
self.random_state = random_state
self.n_jobs = n_jobs
self.verbose = verbose
self.n_digits = n_digits
self.cv_kwargs = cv_kwargs
@property
def n_features_(self):
return self.features_.n_features
@property
def min_features_(self):
min_features = _check_k_features(self.min_features,
self.n_features_,
'min_features')
return min_features
@property
def max_features_(self):
max_features = _check_k_features(self.max_features,
self.n_features_,
'max_features')
msg = "<min_features> must be lower then <max_features>"
assert self.min_features_ <= max_features, msg
return max_features
def _get_importance(self, subset, result):
if 'importance' in result:
imp = result['importance']
subset.importance = pd.Series(np.average(imp, axis=0), index=subset)
subset.importance_std = pd.Series(np.std(imp, axis=0), index=subset)
return subset
def _eval_subset(self, subset, X, y, groups):
result = crossval(self.estimator, self.cv, X[subset], y, groups,
scoring=self.scoring, n_jobs=self.n_jobs,
return_pred=False, verbose=0,
**self.cv_kwargs)
subset.score = np.average(result['val_score'])
subset.score_std = np.std(result['val_score'])
subset = self._get_importance(subset, result)
return subset
def eval_subset(self, subset, X, y, groups=None):
# Convert to FeatureSubset
if type(subset) != type(self.features_):
subset = self.features_.copy().set_subset(subset)
# Evaluate
tic = time()
self._eval_subset(subset, X, y, groups)
subset.eval_time = time() - tic
# Update stats
self.total_time_ = getattr(self, 'total_time_', .0) + subset.eval_time
if not hasattr(self, 'best_score_') or self.best_score_ < subset.score:
self.best_subset_ = subset
self.best_score_ = subset.score
# Update history
subset.idx = self.n_iters_
self.trials_.append(subset)
# Verbose
_print_last(self)
# Check limits
self._check_max_iter()
self._check_max_time()
return subset.score
def _check_max_iter(self):
if hasattr(self, 'max_iter') and self.max_iter:
if self.max_iter <= self.n_iters_:
if self.verbose: print('Iterations limit exceed!')
raise KeyboardInterrupt
def _check_max_time(self):
if hasattr(self, 'max_time') and self.max_time:
if self.max_time <= self.total_time_:
if self.verbose: print('Time limit exceed!')
raise KeyboardInterrupt
def _reset_trials(self):
self.trials_ = []
@property
def n_iters_(self):
return len(self.trials_)
#@property
#def feature_importances_(self):
# subset = self._select_features()
# trial = _find_trial(subset)
# return pd.Series(trial['importance'], index=self.features_)
#@property
#def feature_importances_std_(self):
# subset = self._select_features()
# trial = _find_trial(subset)
# return pd.Series(trial['importance_std'], index=self.features_)
def plot_progress(self, **kwargs):
return _plot_progress(self, **kwargs)
def plot_subset(self, **kwargs):
return _plot_subset(self, **kwargs)
def get_subset(self):
if hasattr(self, 'best_subset_'):
return self.best_subset_
else:
model_name = self.__class__.__name__
raise NotFittedError(f'{model_name} is not fitted')
def _check_k_features(k_features, n_features, param='k_features'):
if isinstance(k_features, int):
if k_features > 0:
k_features = k_features
else:
raise ValueError(f'Integer <{param}> must be greater than 0')
elif isinstance(k_features, float):
if 0 < k_features < 1:
k_features = max(k_features * n_features, 1)
k_features = int(k_features)
else:
raise ValueError(f'Float <{param}> must be from interval (0, 1)')
else:
raise ValueError(f'Parameter <{param}> must be int or float,'
f'got {k_features}')
return k_features
class _WrappedGroupSelector:
def _get_importance(self, subset, result):
if 'importance' in result:
features, imp = result['features'], result['importance']
groups = [group for group, _ in features]
imp = pd.DataFrame(imp, columns=groups).T
imp = imp.groupby(groups).sum()
subset.importance = imp.mean(axis=1)
subset.importance_std = imp.std(axis=1)
return subset
def _set_features(self, X):
self.features_ = FeatureSubset(X.columns, group=True)
| StarcoderdataPython |
11245636 | <reponame>erteck/textHighlighter
from abc import ABC, abstractmethod
from math import pi
class GeometricObject(ABC):
def perimeter(self):
pass
def area(self):
pass
class Circle(GeometricObject):
def __init__(self,radius = 1.0):
self._radius = radius
def perimeter(self):
return pi*2*self._radius
def area(self):
return pi*((self._radius)**2)
def __str__(self):
return f'Circle with radius = {self._radius}'
class Rectangle(GeometricObject):
def __init__(self,width = 1.0,length = 1.0):
self._width = width
self._length = length
def perimeter(self):
return 2 * self._length + 2 * self._width
def area(self):
return self._width * self._length
def __str__(self):
return f'Rectangle with length = {self._length} and width = {self._width}'
class Resizable(ABC):
def resize(percent):
pass
class ResizableCircle(Circle,Resizable):
def __init__(self,radius):
Circle.__init__(self,radius)
def resize(self,percent):
self._radius = (percent / 100) * self._radius
class ResizableRectangle(Rectangle,Resizable):
def __init__(self,width,length):
Rectangle.__init__(self,width,length)
def resize(self,percent):
self._width = (percent / 100) * self._width
self._length = (percent / 100) * self._length | StarcoderdataPython |
4849328 | <reponame>UAL-RE/ReM
from typing import Union, Optional
import requests
from ldcoolp_figshare import FigshareInstituteAdmin
from fastapi import APIRouter, HTTPException
router = APIRouter()
api_key: Optional[str] = None
stage_api_key: Optional[str] = None
def figshare_metadata_readme(figshare_dict: dict) -> dict:
"""
Function to provide shortened dict for README metadata
:param figshare_dict: Figshare API response
:return: README metadata based on Figshare response
"""
readme_dict = {}
if 'item' in figshare_dict:
print("figshare_metadata_readme: Using curation responses")
readme_dict['article_id'] = figshare_dict['item']['id']
readme_dict['curation_id'] = figshare_dict['id']
figshare_dict = figshare_dict['item']
else:
readme_dict['article_id'] = figshare_dict['id']
single_str_citation = figshare_dict['citation']
# Handle period in author list. Assume no period in dataset title
author_list = ([single_str_citation.split('):')[0] + ').'])
author_list += [str_row + '.' for str_row in
single_str_citation.split('): ')[1].split('. ')]
readme_dict.update({
'title': figshare_dict['title'],
'description': figshare_dict['description'],
'doi': f"https://doi.org/{figshare_dict['doi']}",
'preferred_citation': author_list,
'license': figshare_dict['license'],
'summary': figshare_dict['description'],
'references': figshare_dict['references'],
})
return readme_dict
@router.get('/figshare/{article_id}/')
def get_figshare(article_id: int, curation_id: Optional[int] = None,
stage: bool = False,
allow_approved: bool = False) -> Union[dict, HTTPException]:
"""
API call to retrieve Figshare metadata
\f
:param article_id: Figshare article ID
:param curation_id: Figshare curation ID
:param stage: Figshare stage or production API.
Stage is available for Figshare institutions
:param allow_approved: Return 200 responses even if curation is not pending
:return: Figshare API response
"""
if not stage:
base_url = "https://api.figshare.com"
else:
base_url = "https://api.figsh.com"
token = api_key if not stage else stage_api_key
headers = {
'Content-Type': 'application/json',
'Authorization': f'token {token}'
}
fs_admin = FigshareInstituteAdmin(token=token, stage=stage)
if curation_id is None:
status = ''
if not allow_approved:
status = 'pending'
curation_response = \
fs_admin.get_curation_list(article_id, status=status,
process=False)
if curation_response.status_code != 200:
raise HTTPException(
status_code=curation_response.status_code,
detail=f"Figshare: {curation_response.json()['message']}",
)
else:
curation_json = curation_response.json()
if len(curation_json) != 0:
print(f"Retrieved curation_id for {article_id}: "
f"{curation_json[0]['id']} "
f"(status={curation_json[0]['status']})")
curation_id = curation_json[0]['id']
else:
art_response = requests.get(
f"{base_url}/v2/articles/{article_id}",
headers=headers)
if art_response.status_code != 200:
raise HTTPException(
status_code=art_response.status_code,
detail=f"Figshare: {art_response.json()['message']}"
)
else:
review_msg = "reviews" if allow_approved else "pending review"
raise HTTPException(
status_code=401,
detail=f"FastAPI: No valid {review_msg} for {article_id}"
)
if curation_id is not None:
response = fs_admin.get_curation_details(curation_id, process=False)
if response.status_code != 200:
raise HTTPException(
status_code=response.status_code,
detail=response.json(),
)
else:
r_json = response.json()
if not allow_approved:
if r_json['status'] == 'pending':
return r_json
else:
raise HTTPException(
status_code=401,
detail=f'FastAPI: No valid pending review for {article_id}'
)
else:
return r_json
@router.get('/metadata/{article_id}/')
async def get_readme_metadata(article_id: int,
curation_id: Optional[int] = None,
stage: bool = False,
allow_approved: bool = False) -> dict:
"""
API call for README metadata based on Figshare response
\f
:param article_id: Figshare article ID
:param curation_id: Figshare curation ID
:param stage: Figshare stage or production API.
Stage is available for Figshare institutions
:param allow_approved: Return 200 responses even if curation is not pending
:return: README metadata API response
"""
try:
figshare_dict = get_figshare(article_id, curation_id=curation_id,
stage=stage,
allow_approved=allow_approved)
readme_dict = figshare_metadata_readme(figshare_dict)
return readme_dict
except HTTPException as e:
raise e
| StarcoderdataPython |
4875764 | <reponame>PetkoAndreev/Python-basics<gh_stars>0
change = float(input())
num_coins = 0
while change != 0:
if change >= 2:
change = round(change - 2, 2)
num_coins += 1
elif change >= 1:
change = round(change - 1, 2)
num_coins += 1
elif change >= 0.5:
change = round(change - 0.5, 2)
num_coins += 1
elif change >= 0.2:
change = round(change - 0.2, 2)
num_coins += 1
elif change >= 0.1:
change = round(change - 0.1, 2)
num_coins += 1
elif change >= 0.05:
change = round(change - 0.05, 2)
num_coins += 1
elif change >= 0.02:
change = round(change - 0.02, 2)
num_coins += 1
elif change >= 0.01:
change = round(change - 0.01, 2)
num_coins += 1
print(num_coins) | StarcoderdataPython |
1658635 | # Assuming the recorded transactions to be in the following format:
# [[[T-Nrt][Item 1, Item 2, Item 3]],[[T-3689], [1,2,2,3,8,7]]]
#from powerset import potenzmenge
from itertools import combinations, product
from create_transactions import create_transactions
import timeit
def apriori (transactions, min_trashhold, max_iterations):
''' The hear function of the Algorithm. This one takes it all together.
It sorts and count the Data.
It does the first two iterations manually
and all the other ones in a loop.
These allways includes:
1. Join Operation to form new candidates based on Last Tupels
2. Prune Cadidates with apriori knowledge
3. Count candidates
4. Keep those candidates as frequent itemsets, whose support >= threshhold '''
itemsets = [a[1] for a in transactions]
c=[0]
print(max(max(itemsets)))
candidates= [a for a in range(0, max(max(itemsets))+1)]
print(candidates)
c.append(count_items(itemsets, 1))
f=[0]
f.append({k:v for k,v in c[1].items() if v >= min_trashhold})
#for ind, candidates in enumerate(f):
combinations2 = inner_join(f[1], 1)
#print('zweiwertige Kombinationen: ', combinations2)
counted2 = count_items2(itemsets, combinations2)
c.append(counted2)
f.append({k:v for k,v in c[2].items() if v >= min_trashhold})
print('F nach Runde 2: ', f)
iteration = 3
#while iteration <= max_iterations:
while c[iteration-1]:
combinationX = inner_join(f[iteration-1], iteration-1)
c.append(count_items2(itemsets, combinationX))
#not_counted_candidates = pruning(combinationX, f[len(f)-1].values(), iteration)
#c.append(count_items2(itemsets, not_counted_candidates))
f.append({k:v for k,v in c[iteration].items() if v >= min_trashhold})
iteration +=1
print (f'aktuelle Frequent {iteration-1}-Itemsets: ', f[iteration-1])
print(f'Finales f (Größtes Frequent Itemset: {iteration-2}): ', f[iteration-2])
def count_items(itemsets, size):
counted = dict()
for itemset in itemsets:
itemset.sort()
for item in combinations(itemset, size):
if item not in counted.keys():
counted[(item)] = 1
else:
counted[(item)] += 1
return counted
def count_items2(itemsets, to_count):
#print('IM zählen (2), to count: ', to_count)
#itemsets = [set(a) for a in itemsets]
#to_count = [set(b) for b in to_count]
counted = dict()
for counting in to_count:
for itemset in itemsets:
#itemset.sort()
if set(counting) <= set(itemset):
if counting not in counted.keys():
counted[counting] = 1
else:
counted[counting] += 1
return {tuple(k):v for k,v in counted.items() }
def pruning(combinationX, l_minus_1, current_size):
for candidate in combinationX:
for sub_candidates in combinations(candidate, current_size-1):
if sub_candidates not in l_minus_1:
combinationX.remove(candidate)
print('pruned: ', candidate, ' because of: ', sub_candidates )
break
return combinationX
def inner_join(counted, current_size):
c=[]
for freq_itemset in combinations(counted.keys(),2):
# print('Häufige Tupel, in Kombination miteinander: ', freq_itemset)
if (freq_itemset[0][0:current_size-1] == freq_itemset[1][0:current_size-1]):
# print('Juhu, Kandidat gefunden!: ', freq_itemset)
s=set(freq_itemset[0])
s.add(freq_itemset[1][current_size-1])
c.append(tuple(s))
#print('Übereinstimmende Itemsets: ', c)
return c
if __name__ == '__main__':
#apriori([[[1], [1,2,3,4,5]], [[2],[2,3,4,5,6,7,8]], [[3], [7,8,9,10]]], 2,1000)
start = timeit.default_timer()
apriori(create_transactions(1500,15,8),3, 8)
stop= timeit.default_timer()
print('Time: ', stop - start)
| StarcoderdataPython |
5030812 |
class Struct(object):
def __init__(self, d):
for key, value in d.items():
if isinstance(value, (list, tuple)):
setattr(
self,
key,
[
Struct(item) if isinstance(item, dict)
else item
for item in value
]
)
else:
setattr(
self,
key,
Struct(value) if isinstance(value, dict) else value)
if __name__ == "__main__":
foo = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]}
bar = Struct(foo)
print(foo)
print(bar)
| StarcoderdataPython |
8002342 | <filename>src/AoC_2016/d9_decompress_str_re_recursion/expand_compressed_re_recurse.py
"""
Author: Darren
Date: 11/06/2021
Solving https://adventofcode.com/2016/day/9
Solution 2 of 2:
X(8x2)(3x3)ABCY
Original solution replaced src str with target str, i.e. by expanding.
However, part 2 was taking too long. Probably would have taken several hours.
This solution doesn't create the expanded str.
It simply calculates the lenths of segments that would be in the expanded str.
Part 1:
Calculate length of str segment, by multiplying n*m.
Then recursively call for the rest of the str, until no more segments.
Part 2:
As with Part 1, but now we want to replace the call to len to instead
be a call to recursive_len(). I.e. we now recurse into each segment,
not just to retrieve the rest of the str.
This is flippin' fast at returning a count of 10bn+.
"""
import logging
import os
import time
import re
SCRIPT_DIR = os.path.dirname(__file__)
INPUT_FILE = "input/input.txt"
SAMPLE_INPUT_FILE = "input/sample_input.txt"
expand_pattern = re.compile(r"\((\d+)x(\d+)\)")
def main():
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s:%(levelname)s:\t%(message)s")
# input_file = os.path.join(SCRIPT_DIR, SAMPLE_INPUT_FILE)
input_file = os.path.join(SCRIPT_DIR, INPUT_FILE)
with open(input_file, mode="rt") as f:
src_str = f.read()
# Part 1 - Uses the default len_func=len(), so (nxm) segments are only expanded once
result = decompressed_len(src_str=src_str)
logging.info(f"Part 1: Expanded str length = {result}")
# Part 2 - Recurses into each segment
result = recursive_len(src_str=src_str)
logging.info(f"Part 2: Expanded str length = {result}")
def recursive_len(src_str: str) -> int:
"""Recursively calls decompressed_len
Args:
src_str (str): Source str to expand
Returns:
[int]: Length of th expanded str
"""
return decompressed_len(src_str=src_str, len_func=recursive_len)
def decompressed_len(src_str: str, len_func=len) -> int:
""" Process src_str one char at a time, looking for (nxm) using re.
We can supply any function to len_func. If we use len(),
then when we expand the segment, it returns the length after a single nxm expansion.
If we use recurive_len(), then we recursively expand the segment.
Args:
src_str (str): The compressed str we want to determine expanded length for
len_func (callable): A function that returns an int
E.g. len() or recursive_len()
Returns:
[int]: The length of the expanded str
"""
# If there's no more src_str left...
if not src_str:
return 0
# See if we've found (nxm) at the BEGINNING of the str
match = expand_pattern.match(src_str)
if match:
extent, repeat = map(int, match.groups())
# determine positions of the segments we need to amplify
start = match.end()
end = match.end() + extent
# return the length of this repeated segment
# + recurse to get the expanded lenght of the rest of the src str
return (repeat * len_func(src_str[start:end])
+ decompressed_len(src_str[end:], len_func))
# If we're here, then no (nxm) found at beginning
# Return length of this first char (1) and move on to the rest of the str
return 1 + decompressed_len(src_str[1:], len_func)
if __name__ == "__main__":
t1 = time.perf_counter()
main()
t2 = time.perf_counter()
print(f"Execution time: {t2 - t1:0.4f} seconds")
| StarcoderdataPython |
6685827 | from __future__ import unicode_literals
from django.core.paginator import Page, Paginator
from django.urls import reverse
from django.shortcuts import redirect
from django.utils.functional import cached_property
from .blocks import SkipState
class SkipLogicPaginator(Paginator):
"""
Breaks a series of questions into pages based on the logic
associated with the question. Skipped questions result in
empty entries to the data.
"""
def __init__(self, object_list, data=dict(), answered=dict()):
# Create a mutatable version of the query data
self.new_answers = data.copy()
self.previous_answers = answered
super(SkipLogicPaginator, self).__init__(object_list, per_page=1)
# be sure to exclude hidden article_page field
self.question_labels = [
question.clean_name for question in self.object_list
if question.pk and question.field_type != 'hidden'
]
self.page_breaks = []
i = 0
while i < len(self.object_list):
field = self.object_list[i]
i += 1
# Don't add breaks for unsaved or hidden fields
if (not field.pk) or field.field_type == 'hidden':
continue
if field.has_skipping:
self.page_breaks.append(i)
if field.clean_name in self.previous_answers:
# If skipping to a question then ignore page breaks for any
# skipped questions
# just add one before the question we're skipping TO
answer = self.previous_answers[field.clean_name]
if field.is_next_action(answer, SkipState.QUESTION):
next_question = field.skip_logic[
field.choice_index(answer)].value['question']
i = next_question - 1
elif field.page_break:
self.page_breaks.append(i)
num_questions = len([
j for j in self.object_list
if j.pk and j.field_type != 'hidden'])
if self.page_breaks:
# Always have a break at start to create first page
self.page_breaks.insert(0, 0)
if self.page_breaks[-1] != num_questions:
# Must break for last page
self.page_breaks.append(num_questions)
else:
# display one question per page
self.page_breaks = list(range(num_questions + 1))
# add the missing data
self.new_answers.update({
checkbox.clean_name: 'off'
for checkbox in self.missing_checkboxes
})
def _get_page(self, *args, **kwargs):
return SkipLogicPage(*args, **kwargs)
@cached_property
def num_pages(self):
return len(self.page_breaks) - 1
@cached_property
def last_question_index(self):
# The last question on the current page
return self.page_breaks[self.current_page] - 1
@cached_property
def current_page(self):
# search backwards to ensure we find correct lower bound
reversed_breaks = reversed(self.page_breaks)
page_break = next(
index for index in reversed_breaks
if index <= self.first_question_index
)
return self.page_breaks.index(page_break) + 1
@cached_property
def first_question_index(self):
# The first question on the current page
last_answer = self.last_question_previous_page
if last_answer >= 0:
# It isn't the first page
return self.next_question_from_previous_index(
last_answer, self.previous_answers)
return 0
@cached_property
def last_question_previous_page(self):
previous_answers_indexes = self.index_of_questions(
self.previous_answers)
try:
return max(previous_answers_indexes)
except ValueError:
# There have been no previous questions, its the first page
return -1
def next_question_from_previous_index(self, index, data):
last_question = self.object_list[index]
last_answer = data.get(last_question.clean_name)
if last_question.is_next_action(last_answer, SkipState.QUESTION):
# Sorted or is 0 based in the backend and 1 on the front
next_question_id = last_question.next_page(last_answer) - 1
question_ids = [
question.sort_order for question in self.object_list
]
return question_ids.index(next_question_id)
return index + 1
@cached_property
def next_question_index(self):
if self.new_answers:
return self.next_question_from_previous_index(
self.last_question_index,
self.new_answers,
)
return 0
@cached_property
def next_page(self):
try:
return next(
page for page, break_index in enumerate(self.page_breaks)
if break_index > self.next_question_index
)
except StopIteration:
return self.num_pages
@cached_property
def previous_page(self):
# Prevent returning 0 if the on the first page
return max(1, next(
page for page, break_index in enumerate(self.page_breaks)
if break_index > self.last_question_previous_page
))
def index_of_questions(self, data):
return [
self.question_labels.index(question) for question in data
if question in self.question_labels
]
@cached_property
def missing_checkboxes(self):
return [
question
for question in self.object_list[
# Correct for the slice
self.first_question_index:self.last_question_index + 1
]
if question.field_type == 'checkbox' and
question.clean_name not in self.new_answers
]
def page(self, number):
number = self.validate_number(number)
index = number - 1
if not self.new_answers:
top_index = index + self.per_page
bottom = self.page_breaks[index]
top = self.page_breaks[top_index]
elif self.previous_page == number or self.current_page == number:
# We are rebuilding the page with the data just submitted
bottom = self.first_question_index
# Correct for the slice
top = self.last_question_index + 1
else:
index = self.next_page - 1
bottom = self.next_question_index
top_index = index + self.per_page
top = self.page_breaks[top_index]
if number != 1:
return self._get_page(self.object_list[bottom:top], number, self)
object_list = [
i for i in self.object_list
if i.field_type == 'hidden'
]
object_list += self.object_list[bottom:top]
return self._get_page(object_list, number, self)
class SkipLogicPage(Page):
def has_next(self):
return super(SkipLogicPage, self).has_next() and not self.is_end()
def possibly_has_next(self):
return super(SkipLogicPage, self).has_next()
def get_last_non_empty_page(self, page):
# Recursively find the last page that had a question and return that
if len(page.object_list) == 0:
return self.get_last_non_empty_page(
self.paginator.page(page.previous_page_number()))
return page
@cached_property
def last_question(self):
page = self.get_last_non_empty_page(self)
return page.object_list[-1]
@cached_property
def last_response(self):
return self.paginator.new_answers[self.last_question.clean_name]
def is_next_action(self, *actions):
try:
question_response = self.last_response
except KeyError:
return False
return self.last_question.is_next_action(question_response, *actions)
def is_end(self):
return self.is_next_action(SkipState.END, SkipState.FORM)
def success(self, slug, article=None):
if self.is_next_action(SkipState.FORM):
return redirect(
self.last_question.next_page(self.last_response).url
)
if not article:
return redirect(
reverse('molo.forms:success', args=(slug, )))
return redirect(
reverse('molo.forms:success_article_form', kwargs={
'slug': slug, 'article': article}))
def next_page_number(self):
return self.paginator.next_page
def previous_page_number(self):
return self.paginator.previous_page
| StarcoderdataPython |
6487979 | <reponame>EliahKagan/old-practice-snapshot<filename>main/group-anagrams/group-anagrams.py
class Solution:
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
indices = {} # indexes into groups
groups = []
for word in strs:
key = ''.join(sorted(word))
try:
groups[indices[key]].append(word)
except KeyError:
indices[key] = len(groups)
groups.append([word])
return groups
| StarcoderdataPython |
4816873 | n1 = int(input('Um valor:'))
n2 = int(input('Outro valor:'))
soma = n1 + n2
multiplicacao = n1 * n2
divisao = n1 / n2
divisaoint = n1 // n2
potencia = n1 ** n2
print('A soma é {} , \no produto é {} e a divisão é {:.3f}'.format(soma, multiplicacao, divisao), end=' ')
print('Divisão inteira {} e potencia {}'.format(divisaoint, potencia)) | StarcoderdataPython |
262184 | <gh_stars>0
import numpy
from astropy.io import fits
hdu = fits.open('filter_curves.fits')
# WARNING: These filter curves include the telescope and detectors!
for band in ['U', 'G', 'R', 'I', 'Z']:
data_file = 'gunn_2001_{0}_response.db'.format(band.lower())
numpy.savetxt(data_file, numpy.transpose([hdu[band].data['wavelength'],
hdu[band].data['resnoa']]),
fmt=['%6.1f', '%8.4f'],
header='SDSS {0} filter response taken from \n'.format(band.lower()) +
'http://www.sdss.org/wp-content/uploads/2017/04/filter_curves.fits\n' +
'\nSpecifically provides the data in the \'resnoa\' column; see README' +
' for more details\n' +
'{0:>4s} {1:>8s}'.format('wave', 'res'))
| StarcoderdataPython |
4943645 | def main():
from argparse import ArgumentParser
parser = ArgumentParser(description='Gerar etiquetas para postagem nos Correios através de um arquivo CSV.')
parser.add_argument('arquivo_csv')
parser.add_argument('-o', '--arquivo-output', help='Arquivo de output. O output sairá para stdout se não especificado.')
parser.add_argument('-s', '--preencher-com-sigep', action='store_true',
help='Consultar CEP no SIGEP e preencher com os dados retornados.')
parser.add_argument('-p', '--papel', default='pimaco6184',
help='Tipo de papel para impressão. Valores disponíveis: pimaco6184 (padrão).')
args = parser.parse_args()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3315548 | <reponame>alex-dya/security_scanner
import logging
from scanner import transports, types, controls
from scanner.detect import detect
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger(__name__)
def scan(config: dict) -> list:
transports.config = config
detect()
controls.run_controls()
return controls.result()
def main():
transports.config = dict(
unix=dict(
login='vmuser',
password='<PASSWORD>',
address='192.168.56.10',
port=22,
root_logon='SudoLogon',
root_password='<PASSWORD>'
)
)
# transport = transports.get_transport('unix')
detect()
# transport.send_command('poweroff')
LOGGER.debug(f'Control list: {types.BaseContol._control_list}')
controls.run_controls()
for control in controls.result():
LOGGER.debug(control)
LOGGER.debug(control.result)
# is_unix = transport.is_unix()
# LOGGER.debug(f'Is unix={# is_unix}')
# shadow = transport.send_command('cat /etc/shadow')
# LOGGER.debug()
# ls_result = transport.send_command('ls -al /etc')
# LOGGER.debug(f'ls result: \n {ls_result.Output}')
if __name__ == '__main__':
main()
| StarcoderdataPython |
3216023 | <filename>survival/lifespan-bannedsimVsdata.py
# encoding: utf-8
import os
import csv
from pylab import *
from numpy import *
from loadData import loadData
from mymath import statistic, revcumsum
from random import sample as spl
#sim
N = 200000 # number of users
t0 = 500 # initial time for observation
T = t0 +320
P = [0.52] # probability of joining a banned group
mxP = 2 # maximum cumulative position for banning
#randomize initial time for each user
T0 = random.randint(0,t0,N)
POS = []
LSP = []
POS0 = []
for p in P:
Pos = zeros([N,T]) # position vs time for each user
Lsp = [] # life span
#do simulation
Pos[:,1:] = (random.random([N,T-1]) < p) * 2.0 - 1.0
for u in range(N):
Pos[u,:T0[u]+1] = 0.0
Pos = Pos.cumsum(1)
Pos0 = Pos[t0,:].tolist()
for u in range(N):
L = where(Pos[u,:]>=mxP)[0]
if len(L)>0:
L=L[0]
if L>t0:
Lsp.append(L-max(T0[u],t0))
POS.append(Pos)
LSP.append(Lsp)
POS0.append(Pos0)
#data
US=loadData('userStates')
N=len(US)
Dir='sav/Edgelists-all/'
fnames=os.listdir(Dir)
fileNames=[]
for f in fnames:
if f.startswith('Edges'):
fileNames.append(f)
fileNames=sorted(fileNames)
T=len(fileNames)
Ms={}
t=0
for f in fileNames:
fpath=Dir+f
csvfile=open(fpath, 'rb')
data = csv.reader(csvfile, delimiter=' ')
U=set()
for row in data:
if US[row[0]] == 0:
U.add(row[0])
csvfile.close()
del data
for u in U:
if not u in Ms:
Ms[u] = [t,T]
else:
Ms[u][1] = t
t+=1
Ls=[]
for u in Ms:
if Ms[u][1] != T:
Ls.append(Ms[u][1]-Ms[u][0]+1)
f1=figure(1,figsize=(6,4))
x,y=statistic(Ls,norm=True)
_=loglog(x,y,'ro',label='empirical',alpha=0.5)
Lsp=LSP[0]
x,y=statistic(Lsp,norm=True)
_=loglog(x,y,'g-',label='$p$='+str(P[0]),alpha=0.5)
xlim([1,330])
xlabel(r'Lifespan [day]')
ylabel(r'Fraction')
legend(loc='best')
f1.set_tight_layout(True)
savefig('figs/Lifespan-bannedUsers-data-vs-sim.pdf', format='pdf')
close(1)
f=figure(1,figsize=(6,4))
for i in range(len(P)):
f.clf()
Pos=POS[i]
for u in range(500):
_=plot(Pos[u,:],alpha=0.2)
_=plot([t0,t0],[100,-100],'w--',linewidth=2.0)
_=plot([t0,t0+T],[mxP,mxP],'w--',linewidth=2.0)
xlabel(r'$Time$')
ylabel(r'$Position$')
savefig('figs/Time-Position-sim-p'+str(P[i])+'.png',bbox_inches='tight')
f.clf()
for i in range(len(P)):
Pos0=POS0[i]
x,y=statistic(Pos0,norm=True)
_=plot(x,y,label='$p$='+str(P[i]))
xlabel(r'$Position$')
ylabel(r'$Fraction$')
legend(loc='best')
savefig('figs/InitialPosition-sim.pdf',bbox_inches='tight')
close(1)
| StarcoderdataPython |
3473100 | <reponame>e-gills/running-log<gh_stars>1-10
from pymysql import connect, cursors
from utilities.creds import mysql_creds
from utilities.mysql_query import Query
def get_charts_for_page(render_page):
conn = connect(host=mysql_creds['host'], port=mysql_creds['port'], user=mysql_creds['user'],
passwd=mysql_creds['password'], db='blog', cursorclass=cursors.DictCursor)
q = Query(conn, table='charts')
charts = q.select(fields=['*'], where='render_page = "{}"'.format(render_page))
return charts
| StarcoderdataPython |
1997181 | from rest_framework import viewsets, generics
from rest_framework.generics import get_object_or_404
from rest_framework.permissions import IsAuthenticated
from salary_calculator.models import Salary, Payout
from salary_calculator.serializers import DaySerializer, SalarySerializer, PayoutSerializer
from salary_calculator.utils.mixins import MultipleFieldLookupMixin
from salary_calculator.utils.permissions import IsOwner
class DayViewSet(viewsets.ModelViewSet):
"""ViewSet for the Day model, enabling complete object management for the logged user who owns the object."""
serializer_class = DaySerializer
permission_classes = [IsOwner, IsAuthenticated]
def get_queryset(self):
"""Displays the list of days from selected month assigned to the user."""
month_name = self.kwargs['month_name'].capitalize() # get month name from url kwargs
return self.request.user.days.filter(month__name=month_name)
class SalaryView(generics.RetrieveUpdateAPIView):
"""ViewSet for the Salary model, enabling retrieving and updating object for the logged user who owns the object."""
queryset = Salary.objects.all()
serializer_class = SalarySerializer
permission_classes = [IsOwner, IsAuthenticated]
def get_object(self):
"""Displays salary assigned to the user."""
queryset = self.get_queryset()
obj = get_object_or_404(queryset, user=self.request.user)
return obj
class PayoutView(MultipleFieldLookupMixin, generics.RetrieveAPIView):
"""ViewSet for the Salary model, enabling retrieving object for the logged user who owns the object."""
queryset = Payout.objects.all()
serializer_class = PayoutSerializer
permission_classes = [IsOwner, IsAuthenticated]
def get_object(self):
"""Displays payout from selected month assigned to the user."""
queryset = self.get_queryset()
month_name = self.kwargs['month__name'].capitalize() # get month name from url kwargs
obj = get_object_or_404(queryset, user=self.request.user, month__name=month_name)
return obj
| StarcoderdataPython |
3463713 | <filename>timelogger/urls.py
"""timelogger URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.views.generic import TemplateView
from django.contrib.auth.views import LoginView, PasswordResetView, LogoutView
from timelog.views import HomeView
from django.contrib.admin import site
from django.conf.urls.static import static
from django.conf import settings
admin.site.site_header = "SPS Time Tracker"
handler404 = 'timelogger.views.handler404'
handler500 = 'timelogger.views.handler500'
handler400 = 'timelogger.views.handler400'
handler403 = 'timelogger.views.handler403'
urlpatterns = [
url(r'^$',HomeView.as_view(),name='home'),
url(r'^admin/', admin.site.urls),
url(r'^login/$', LoginView.as_view(), name='login'),
url(r'^logout/$', LogoutView.as_view(), name='logout'),
url(r'^timelog/', include('timelog.urls', namespace='timelog')),
url(r'^password_reset/$', auth_views.password_reset, name='password_reset'),
url(r'^password_reset/done/$', auth_views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',auth_views.password_reset_confirm, name='password_reset_confirm'),
url(r'^reset/done/$', auth_views.password_reset_complete, name='password_reset_complete'),
url('admin/password_reset/',auth_views.PasswordResetView.as_view(),name='admin_password_reset',),
]
| StarcoderdataPython |
4871618 | GREEN = 0
YELLOW = 1
WHITE = 2
BLUE = 3
RED = 4
ALL_COLORS = [GREEN, YELLOW, WHITE, BLUE, RED]
COLORNAMES = ["green", "yellow", "white", "blue", "red"]
HINT_COLOR = 0
HINT_NUMBER = 1
PLAY = 2
DISCARD = 3
class Action(object):
def __init__(self, type, pnr=None, col=None, num=None, cnr=None, comment=None):
self.type = type
self.pnr = pnr
self.col = col
self.num = num
self.cnr = cnr
self.comment = comment
self.timing = ""
def __str__(self):
if self.type == HINT_COLOR:
return "hints " + str(self.pnr) + " about all their " + COLORNAMES[self.col] + " cards"
if self.type == HINT_NUMBER:
return "hints " + str(self.pnr) + " about all their " + str(self.num)
if self.type == PLAY:
return "plays their " + str(self.cnr)
if self.type == DISCARD:
return "discards their " + str(self.cnr)
def __eq__(self, other):
return (self.type, self.pnr, self.col, self.num, self.cnr) == (other.type, other.pnr, other.col, other.num, other.cnr)
# semi-intelligently format cards in any format
def f(something):
if type(something) == list:
return map(f, something)
elif type(something) == dict:
return {k: something(v) for (k,v) in something.iteritems()}
elif type(something) == tuple and len(something) == 2:
return (COLORNAMES[something[0]],something[1])
return something | StarcoderdataPython |
11286929 | <filename>HTMLReport/src/tools/log/handler_factory.py
"""
Copyright 2017 刘士
Licensed under the Apache License, Version 2.0 (the "License"): you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
import logging
import logging.handlers
import sys
import threading
from io import StringIO
from logging import Handler, getLevelName
_LOGGER_FORMAT = "%(asctime)s %(thread)7d %(levelname)8s %(filename)s(%(lineno)d) - %(message)s"
class _StreamHandler(Handler):
"""
分线程日志流记录
"""
terminator = '\n'
def __init__(self, streams=None):
"""
初始化处理程序
"""
Handler.__init__(self)
if streams is None:
streams = {}
self.streams = streams
def flush(self):
"""
刷新流
"""
self.acquire()
steam_id = str(threading.current_thread().ident)
try:
if self.streams.get(steam_id) and hasattr(self.streams[steam_id], "flush"):
self.streams[steam_id].flush()
finally:
self.release()
def emit(self, record):
"""
记录
"""
try:
msg = self.format(record)
steam_id = str(threading.current_thread().ident)
if steam_id not in self.streams:
self.streams[steam_id] = StringIO()
stream = self.streams[steam_id]
# issue 35046: merged two stream.writes into one.
stream.write(msg + self.terminator)
self.flush()
except:
self.handleError(record)
def __repr__(self):
level = getLevelName(self.level)
steam_id = str(threading.current_thread().ident)
name = getattr(self.streams[steam_id], "name", "")
if name:
name += " "
return f"<{self.__class__.__name__} {name}({level})>"
class InfoOrLessCritical(logging.Filter):
def filter(self, record):
return record.levelno < logging.WARNING
class HandlerFactory(object):
handlers = {}
streams = {}
@classmethod
def get_std_out_handler(cls):
if "std_out_handler" not in cls.handlers:
std_out_handler = logging.StreamHandler(sys.stdout)
std_out_handler.setFormatter(logging.Formatter(_LOGGER_FORMAT))
std_out_handler.addFilter(InfoOrLessCritical())
cls.handlers["std_out_handler"] = std_out_handler
return cls.handlers["std_out_handler"]
@classmethod
def get_std_err_handler(cls):
if "std_err_handler" not in cls.handlers:
std_err_handler = logging.StreamHandler(sys.stderr)
std_err_handler.setFormatter(logging.Formatter(_LOGGER_FORMAT))
std_err_handler.setLevel(logging.WARNING)
cls.handlers["std_err_handler"] = std_err_handler
return cls.handlers["std_err_handler"]
@classmethod
def get_rotating_file_handler(cls, log_path, max_bytes=0, backup_count=0):
if "rotating_file_handler" not in cls.handlers:
cls.handlers["rotating_file_handler"] = {}
if log_path not in cls.handlers["rotating_file_handler"]:
rotating_file_handler = logging.handlers.RotatingFileHandler(
log_path, "a", max_bytes, backup_count, encoding="utf8")
rotating_file_handler.setLevel(logging.NOTSET)
rotating_file_handler.setFormatter(logging.Formatter(_LOGGER_FORMAT))
cls.handlers["rotating_file_handler"][log_path] = rotating_file_handler
return cls.handlers["rotating_file_handler"][log_path]
@classmethod
def get_stream_handler(cls):
if "rotating_stream_handler" not in cls.handlers:
rotating_stream_handler = _StreamHandler(cls.streams)
rotating_stream_handler.setFormatter(logging.Formatter(_LOGGER_FORMAT))
cls.handlers["rotating_stream_handler"] = rotating_stream_handler
return cls.handlers["rotating_stream_handler"]
@classmethod
def get_stream_value(cls):
steam_id = str(threading.current_thread().ident)
if steam_id in cls.streams:
stream = cls.streams[steam_id].getvalue()
cls.streams[steam_id].truncate(0)
cls.streams[steam_id].seek(0)
return stream
return ""
| StarcoderdataPython |
3436983 | fname = input('Enter the file name:')
try :
fhand = open('ch07\\' + fname)
except :
print('File cannot be opened:',fname)
quit()
count = 0
conf = 0.
for str in fhand :
if not str.startswith('X-DSPAM-Confidence:') :
continue
count += 1
conf += float(str.lstrip('X-DSPAM-Confidence: '))
print('Average spam confidence:',conf / count)
try :
fhand.close()
except :
pass | StarcoderdataPython |
382035 | __author__ = '<NAME>'
from electricitycostcalculator.electricity_rate_manager.rate_manager import ElectricityRateManager
from electricitycostcalculator.openei_tariff.openei_tariff_analyzer import *
import pandas as pd
# ----------- TEST DEMO -------------- #
READ_FROM_JSON = False
# useful functions
def utc_to_local(data, local_zone="America/Los_Angeles"):
'''
This method takes in pandas DataFrame and adjusts index according to timezone in which is requested by user
'''
data = data.tz_convert(local_zone) # accounts for localtime shift
# Gets rid of extra offset information so can compare with csv data
data = data.tz_localize(None)
return data
if __name__ == '__main__':
meter_uuid = 'e9c51ce5-4aa1-399c-8172-92073e273a0b'
#
### Reading power consumption data
#
print("--- Loading meter data ...")
df = pd.read_csv('meter.csv', index_col=0) # import times series energy data for meters
df.index.name = 'Time'
df = df.set_index(pd.to_datetime(df.index, infer_datetime_format=True, utc=True))
df["date"] = df.index
data_meter = df[meter_uuid]
data_meter = utc_to_local(data_meter, local_zone="America/Los_Angeles")
#
### Reading OpenEI-based tariff rates, and binding it to the ElectricityRateManager
#
print("--- Calling OpenEI API ...")
# (a) Example of using the OpenEI WEB API
tariff_openei_apidata = OpenEI_tariff(utility_id='14328',
sector='Commercial',
tariff_rate_of_interest='E-19',
distrib_level_of_interest=None,
# it is at the secondary level, so not specified in the name
phasewing=None,
# the word 'Poly' is to be excluded, because the names may omit this info ..
tou=True,
option_exclusion=['(X)', '(W)', 'Poly']) # Need to reject the option X and W
tariff_openei_apidata.call_api(store_as_json=True)
# (b) Example of reading local data, encoded according to OpenEI structure
elecrate_manager = ElectricityRateManager()
# Binding an instance of ElectricityRateManager to a specific OpenEI tariff
tariff_struct_from_openei_data(tariff_openei_apidata, elecrate_manager) # This analyses the raw data from the openEI request and populate the "CostCalculator" object
# BILLING PERIOD
start_date_bill = datetime(2017, 7, 1, hour=0, minute=0, second=0)
end_date_bill = datetime(2017, 7, 30, hour=23, minute=59, second=59)
mask = (data_meter.index >= start_date_bill) & (data_meter.index <= end_date_bill)
data_meter = data_meter.loc[mask]
data_meter = data_meter.fillna(0)
# 1) Get the bill over the period
print("Calculating the bill for the period {0} to {1}".format(start_date_bill, end_date_bill))
bill = elecrate_manager.compute_bill(data_meter, monthly_detailed=True)
t, tt, ttt = elecrate_manager.print_aggregated_bill(bill)
print(t)
# 2) Get the electricity price per type of metric, for a specific period
tariff_openei_jsondata = OpenEI_tariff(utility_id='14328', sector='Commercial', tariff_rate_of_interest='B-19S')
if tariff_openei_jsondata.read_from_json(filename="tariff_revised/u14328_Commercial_B19S_revised.json") == 0:
print("Tariff read from JSON successful")
else:
print("An error occurred when reading the JSON file")
exit()
elecrate_manager = ElectricityRateManager()
tariff_struct_from_openei_data(tariff_openei_jsondata, elecrate_manager) # This analyses the raw data from the openEI request and populate the "CostCalculator" object
start_date_sig= datetime(2020, 1, 1, hour=0, minute=0, second=0)
end_date_sig = datetime(2020, 1, 7, hour=23, minute=59, second=59)
timestep = TariffElemPeriod.QUARTERLY # We want a 1h period
price_elec, map = elecrate_manager.get_electricity_price((start_date_sig, end_date_sig), timestep)
print(price_elec)
| StarcoderdataPython |
11248731 | from django.shortcuts import render, get_object_or_404
from .models import Mentiq
from .forms import MentiqForm
from taggit.models import Tag
from django.template.defaultfilters import slugify
def home_view_mentiq(request):
mentiqs = Mentiq.objects.all()
common_tags = Mentiq.tags.most_common()[:4]
form = MentiqForm(request.POST)
if form.is_valid():
mentiq = form.save(commit=False)
mentiq.slug = slugify(mentiq.title)
mentiq.save()
form.save_m2m()
context = {
'mentiqs':mentiqs,
'common_tags':common_tags,
'form':form,
}
return render(request, 'home-mentiq.html', context)
def upload_view_mentiq(request):
mentiqs = Mentiq.objects.all()
common_tags = Mentiq.tags.most_common()[:4]
form = MentiqForm(request.POST)
if form.is_valid():
newmentiq = form.save(commit=False)
newmentiq.slug = slugify(newmentiq.title)
newmentiq.save()
form.save_m2m()
context = {
'mentiqs':mentiqs,
'common_tags':common_tags,
'form':form,
}
return render(request, 'upload-mentiq.html', context)
def detail_view_mentiq(request, slug):
mentiq = get_object_or_404(Mentiq, slug=slug)
context = {
'mentiq':mentiq,
}
return render(request, 'detail-mentiq.html', context)
def tagged_mentiq(request, slug):
tag = get_object_or_404(Tag, slug=slug)
common_tags = Mentiq.tags.most_common()[:4]
mentiqs = Mentiq.objects.filter(tags=tag)
context = {
'tag':tag,
'common_tags':common_tags,
'mentiqs':mentiqs,
}
return render(request, 'home-mentiq.html', context) | StarcoderdataPython |
8066981 | <gh_stars>0
dict={"Usain":1, "Me":2, "Qazi":3}
def choice_to_number(choice):
return dict[choice]
def number_to_choice(number):
for x in dict:
if dict[x]==number:
return x
usr_choice=input("Person of choice:")
print(choice_to_number(usr_choice))
usr_number=int(input("Number of choice:"))
print(number_to_choice(usr_number))
| StarcoderdataPython |
11368381 | <filename>pythondesafios/desafio067.py
#Faça um programa que mostre a tabuada de vários números, um de cada vez, para cada valor digitado pelo usuário. O programa será interrompido quando o número solicitado for negativo.
número = contador = total = 0
while True:
número = int(input('Quer ver a tabuada de qual número? '))
if número < 0:
print('PROGRAMA ENCERRADO.')
break
print('-='*10)
while contador < 10:
contador += 1
total = número * contador
print(f'{número} * {contador} = {total}')
contador = 0
print('-='*10) | StarcoderdataPython |
3570471 | import requests
from chalicelib import util
import time
def order():
datas = {
"symbol": "TRX/USDT",
"position": "short",
"target_price": 1,
"trade_condition": "exit_short"
}
headers = {'Content-Type': 'application/json; charset=utf-8'}
url = "http://localhost:8000/trade"
res = requests.post(url, json=datas, headers=headers)
print(res.text)
trade_list = util.get_trade_list()
print(trade_list)
start = time.time()
trade_list = util.get_trade_list()
util.write_trade_list()
print(trade_list)
end = time.time()
print("소요시간: ", end-start)
| StarcoderdataPython |
3586836 | <gh_stars>1-10
from setuptools import setup
setup(
name='tripledraw',
version='0.1',
py_module=['tripledraw'],
install_requires=[
'click>=4.0',
'ansicolors'
],
author='<NAME>',
author_email='<EMAIL>',
entry_points='''
[console_scripts]
tripledraw=tripledraw:main
''',
)
| StarcoderdataPython |
1698229 | <filename>app/src/main/jni/src/test_healthd.py
#!/usr/bin/python
import sys
import dbus
import re
import dbus.service
import dbus.mainloop.glib
import os
import glib
from test_healthd_parser import *
dump_prefix = "XML"
system_ids = {}
def get_system_id(path, xmldata):
if path in system_ids:
return system_ids[path]
sid = ""
sid = get_system_id_from_mds(xmldata)
if sid:
system_ids[path] = sid
return sid
def dump(path, suffix, xmldata):
xmldata = beautify(xmldata)
rsid = get_system_id(path, None)
if len(rsid) > 4:
rsid = rsid[-4:]
if rsid:
rsid += "_"
f = open(dump_prefix + "_" + rsid + suffix + ".xml", "w")
f.write(xmldata)
f.close()
gsdr = {0: "Success", 1: "Segment unknown", 2: "Fail, try later",
3: "Fail, segment empty", 512: "Fail, other"}
def getsegmentdata_response_interpret(i):
try:
s = gsdr[i]
except KeyError:
s = "Unknown fail code"
return s
pmstore_handle = 11
pmsegment_instance = 0
clear_segment = 0
get_segment = 0
get_pmstore = 0
get_mds = 0
interpret_data = 0
class Agent(dbus.service.Object):
@dbus.service.method("com.signove.health.agent", in_signature="ss", out_signature="")
def Connected(self, dev, addr):
print
print "Connected from addr %s, dev %s" % (addr, dev)
# Convert path to an interface
dev = bus.get_object("com.signove.health", dev)
dev = dbus.Interface(dev, "com.signove.health.device")
glib.timeout_add(0, do_something, dev)
@dbus.service.method("com.signove.health.agent", in_signature="ss", out_signature="")
def Associated(self, dev, xmldata):
print
print "Associated dev %s: XML with %d bytes" % (dev, len(xmldata))
print "System ID: %s" % get_system_id(dev, xmldata)
dump(dev, "associated", xmldata)
# Convert path to an interface
devpath = dev
dev = bus.get_object("com.signove.health", dev)
dev = dbus.Interface(dev, "com.signove.health.device")
glib.timeout_add(0, getConfiguration, dev, devpath)
if clear_segment == 1:
glib.timeout_add(1000, clearSegment, dev, pmstore_handle, pmsegment_instance)
return
elif clear_segment == 2:
glib.timeout_add(1000, clearAllSegments, dev, pmstore_handle)
return
if get_mds:
glib.timeout_add(1000, requestMdsAttributes, dev)
if get_pmstore:
glib.timeout_add(2000, getPMStore, dev, pmstore_handle)
glib.timeout_add(3000, getSegmentInfo, dev, pmstore_handle)
if get_segment:
glib.timeout_add(5000, getSegmentData, dev, pmstore_handle, pmsegment_instance)
@dbus.service.method("com.signove.health.agent", in_signature="ss", out_signature="")
def MeasurementData(self, dev, xmldata):
print
print "MeasurementData dev %s" % dev
if interpret_data:
Measurement(DataList(xmldata)).describe()
else:
print "=== Data: ", xmldata
dump(dev, "measurement", xmldata)
@dbus.service.method("com.signove.health.agent", in_signature="sis", out_signature="")
def PMStoreData(self, dev, pmstore_handle, xmldata):
print
print "PMStore dev %s handle %d" % (dev, pmstore_handle)
if interpret_data:
PMStore(DataList(xmldata)).describe()
else:
print "=== Data: XML with %d bytes" % len(xmldata)
dump(dev, "pmstore_%d" % pmstore_handle, xmldata)
@dbus.service.method("com.signove.health.agent", in_signature="sis", out_signature="")
def SegmentInfo(self, dev, pmstore_handle, xmldata):
print
print "SegmentInfo dev %s PM-Store handle %d" % (dev, pmstore_handle)
if interpret_data:
SegmentInfo(DataList(xmldata)).describe()
else:
print "=== XML with %d bytes" % len(xmldata)
dump(dev, "segmentinfo_%d" % pmstore_handle, xmldata)
@dbus.service.method("com.signove.health.agent", in_signature="siii", out_signature="")
def SegmentDataResponse(self, dev, pmstore_handle, pmsegment, response):
print
print "SegmentDataResponse dev %s PM-Store handle %d" % (dev, pmstore_handle)
print "=== InstNumber %d" % pmsegment
print "=== Response %s" % getsegmentdata_response_interpret(response)
if response != 0 and pmsegment < 7:
dev = bus.get_object("com.signove.health", dev)
dev = dbus.Interface(dev, "com.signove.health.device")
glib.timeout_add(0, getSegmentData, dev, pmstore_handle, pmsegment + 1)
@dbus.service.method("com.signove.health.agent", in_signature="siis", out_signature="")
def SegmentData(self, dev, pmstore_handle, pmsegment, xmldata):
print
print "SegmentData dev %s PM-Store handle %d inst %d" % \
(dev, pmstore_handle, pmsegment)
if interpret_data:
SegmentData(DataList(xmldata)).describe()
else:
print "=== Data: %d bytes XML" % len(xmldata)
dump(dev, "segmentdata_%d_%d" % (pmstore_handle, pmsegment), xmldata)
@dbus.service.method("com.signove.health.agent", in_signature="siii", out_signature="")
def SegmentCleared(self, dev, pmstore_handle, pmsegment, retstatus):
print
print "SegmentCleared dev %s PM-Store handle %d" % (dev, pmstore_handle)
print "=== InstNumber %d retstatus %d" % (pmsegment, retstatus)
print
@dbus.service.method("com.signove.health.agent", in_signature="ss", out_signature="")
def DeviceAttributes(self, dev, xmldata):
print
print "DeviceAttributes dev %s" % dev
if interpret_data:
DeviceAttributes(DataList(xmldata)).describe()
else:
print "=== Data: XML with %d bytes" % len(xmldata)
dump(dev, "attributes", xmldata)
@dbus.service.method("com.signove.health.agent", in_signature="s", out_signature="")
def Disassociated(self, dev):
print
print "Disassociated dev %s" % dev
@dbus.service.method("com.signove.health.agent", in_signature="s", out_signature="")
def Disconnected(self, dev):
print
print "Disconnected %s" % dev
def requestMdsAttributes (dev):
dev.RequestDeviceAttributes()
return False
def getConfiguration(dev, devpath):
config = dev.GetConfiguration()
print
print "Configuration: XML with %d bytes" % len(config)
print
dump(devpath, "config", config)
if interpret_data:
Configuration(DataList(config)).describe()
return False
def getSegmentInfo(dev, handle):
ret = dev.GetSegmentInfo(handle)
print
print "GetSegmentInfo ret %d" % ret
print
return False
def getSegmentData(dev, handle, instance):
ret = dev.GetSegmentData(handle, instance)
print
print "GetSegmentData ret %d" % ret
print
return False
def clearSegment(dev, handle, instance):
ret = dev.ClearSegment(handle, instance)
print
print "ClearSegment ret %d" % ret
print
return False
def clearAllSegments(dev, handle):
ret = dev.ClearAllSegments(handle)
print
print "ClearAllSegments ret %d" % ret
print
return False
def getPMStore(dev, handle):
ret = dev.GetPMStore(handle)
print
print "GetPMStore ret %d" % ret
print
return False
def do_something(dev):
# print dev.AbortAssociation()
# print dev.Connect()
# print dev.RequestMeasurementDataTransmission()
# print dev.RequestActivationScanner(55)
# print dev.RequestDeactivationScanner(55)
# print dev.ReleaseAssociation()
# print dev.Disconnect()
return False
args = sys.argv[1:]
i = 0
while i < len(args):
arg = args[i]
if arg == '--mds':
get_mds = 1
elif arg == "--interpret" or arg == "--interpret-xml":
interpret_data = 1
elif arg == '--prefix':
dump_prefix = args[i + 1]
i += 1
elif arg == '--get-segment':
get_segment = 1
get_pmstore = 1
elif arg == '--clear-segment':
clear_segment = 1
get_pmstore = 1
elif arg == '--clear-all-segments':
clear_segment = 2
get_pmstore = 1
elif arg == '--store' or arg == '--pmstore' or arg == '--pm-store':
pmstore_handle = int(args[i + 1])
get_pmstore = 1
i += 1
elif arg == '--instance' or arg == '--inst' or arg == '--segment' or \
arg == '--pm-segment' or arg == '--pmsegment':
pmsegment_instance = int(args[i + 1])
i += 1
else:
raise Exception("Invalid argument %s" % arg)
i += 1
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
def bus_nameownerchanged(service, old, new):
if service == "com.signove.health":
if old == "" and new != "":
start()
elif old != "" and new == "":
stop()
bus.add_signal_receiver(bus_nameownerchanged,
"NameOwnerChanged",
"org.freedesktop.DBus",
"org.freedesktop.DBus",
"/org/freedesktop/DBus")
def stop():
global system_ids
print "Detaching..."
system_ids = {}
def start():
print "Starting..."
try:
obj = bus.get_object("com.signove.health", "/com/signove/health")
except:
print "healthd service not found, waiting..."
return
srv = dbus.Interface(obj, "com.signove.health.manager")
print "Configuring..."
srv.ConfigurePassive(agent, [0x1004, 0x1007, 0x1029, 0x100f])
print "Waiting..."
agent = Agent(bus, "/com/signove/health/agent/%d" % os.getpid())
start();
mainloop = glib.MainLoop()
mainloop.run()
| StarcoderdataPython |
9661762 | import numpy as np
import cv2
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
# overlay_img = cv2.imread('laughingMan.png', cv2.IMREAD_UNCHANGED)
# ratio = overlay_img.shape[1] / overlay_img.shape[0]
boundingBox = {
"x":0,
"y":0,
"w":0,
"h":0
}
while True:
ret, img = cap.read()
if ret:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
if len(faces) != 0:
for (x, y, w, h) in faces:
boundingBox["x"] = x
boundingBox["y"] = y
boundingBox["w"] = w
boundingBox["h"] = h
# Attempt at adding image
# overlay_img = cv2.resize(overlay_img, (w,h))
# bg = img[y:y+h, x:x+w]
# np.multiply(bg, np.atleast_3d(255 - overlay_img[:, :, 3])/255.0, out=bg, casting="unsafe")
# np.add(bg, overlay_img[:, :, 0:3] * np.atleast_3d(overlay_img[:, :, 3]), out=bg)
# newImg = img[y:y+h, x:x+w] = bg
img = cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), -1)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
else:
img = cv2.rectangle(img, (boundingBox['x'], boundingBox['y']), (boundingBox['x']+boundingBox['w'], boundingBox['y']+boundingBox['h']), (255, 0, 0), -10)
cv2.imshow('img', img)
else:
break
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
| StarcoderdataPython |
5119461 | <reponame>PawWitGit/bentley-ottmann-api
from typing import Any, Optional, Union
from pydantic import BaseSettings, PostgresDsn, validator
class Settings(BaseSettings):
"""
Settings for `Geometry api` project.
"""
db_user: str
db_password: str
db_name: str
db_port: str
db_host: str
database_uri: Optional[PostgresDsn] = None
@validator("database_uri", pre=True)
def get_database_uri(cls, v: Optional[str], values: dict[str, Any]) -> Union[PostgresDsn, str]:
"""
Method returns database uri.
Args:
v: value
values: another value in class
Returns:
database uri
"""
if v is None:
return PostgresDsn.build(
scheme="postgresql",
user=values.get("db_user"),
password=<PASSWORD>("<PASSWORD>"),
path=f"/{values.get('db_name')}",
port=values.get("db_port"),
host=values.get("db_host"),
)
return v
class Config:
env_file = ".env"
settings = Settings()
| StarcoderdataPython |
3223643 | <filename>python/701.insert-into-a-binary-search-tree.py
#
# @lc app=leetcode.cn id=701 lang=python3
#
# [701] 二叉搜索树中的插入操作
#
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# 递归思路
def insertIntoBST(self, root: TreeNode, val: int) -> TreeNode:
# 空树则构造单根节点返回
if root is None:
return TreeNode(val)
# 比较val与根节点值的大小关系
if val > root.val:
root.right = self.insertIntoBST(root.right, val)
else:
# 只能大于或小于, 题目输入保证不重复
root.left = self.insertIntoBST(root.left, val)
return root
| StarcoderdataPython |
12857746 | <filename>app.py<gh_stars>1-10
# Import from system libraries
from flask import Flask
from flask_bcrypt import Bcrypt
from flask_cors import CORS
from flask_jwt_extended import JWTManager
from flask_restful import Api
# Import from application modules
from errors import errors
from models.User import User
from models.db import initialize_db
from routes.api import initialize_routes
# Flask app instance with static (html, css and js) folder configuration
app = Flask(__name__)
# Flask Restful configuration with errors included
api = Api(app, errors=errors)
# Files for Configuration System in environment
app.config.from_envvar('ENV_FILE_LOCATION')
# BCrypt instances
bcrypt = Bcrypt(app)
# JWT instances
jwt = JWTManager(app)
# CORS enabled
CORS(app)
# Get roles for authenticated user
@jwt.user_claims_loader
def add_claims_to_access_token(user):
return {'roles': user.roles}
# Load user identity
@jwt.user_identity_loader
def user_identity_lookup(user):
return user.username
# Database Configuration Initialization
initialize_db(app)
# API (Routing) Configuration Initialization
initialize_routes(api)
# Admin account initialization for first uses
user = User.objects(username='<EMAIL>')
if not user:
login = User(username='<EMAIL>', password='<PASSWORD>', roles=['admin'])
login.hash_password()
login.save()
# Running Flask Application when main class executed
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
9783874 | import sys
import copy
import json
import numpy as np
import numpy.linalg
#import scipy
#import scipy.linalg
from ..geometry import vertex_all_in_set
use_geom_accel=True
if use_geom_accel:
from ..geometry_accel import point_in_polygon_2d
from ..geometry_accel import polygon_intersects_box_2d
from ..geometry_accel import box_inside_polygon_2d
else:
from ..geometry import point_in_polygon_2d
from ..geometry import polygon_intersects_box_2d
from ..geometry import box_inside_polygon_2d
pass
use_pstp_accel=True
if use_pstp_accel:
from .polygonalsurface_texcoordparameterization_accel import enclosed_or_intersecting_polygons_2d_accel
from .polygonalsurface_texcoordparameterization_accel import _evaluate_curvature
from .polygonalsurface_texcoordparameterization_accel import _identify_polynum_uv
#from .polygonalsurface_texcoordparameterization_accel import _test_polynum_uv
from .polygonalsurface_texcoordparameterization_accel import _linelength_avgcurvature
from .polygonalsurface_texcoordparameterization_accel import _linelength_avgcurvature_meshbased
from .polygonalsurface_texcoordparameterization_accel import _linelength_avgcurvature_mirroredbox_meshbased
pass
def vecnorm(a,axis=-1):
# compatibility for old versions of Numpy
return np.sqrt(np.sum(a**2.0,axis))
def vecnormkeepshape(a,axis=-1):
if axis < 0:
axis+=len(a.shape)
pass
retarray = np.sqrt(np.sum(a**2.0,axis))
# Insert an axis of length one where we summed
# So if orig shape is (4, 8, 9 ,3, 2)
# and we summed over axis 3,
# result is (4, 8, 9, 2)
#
# We reshape result to (4, 8, 9, 1, 2)
return retarray.reshape(*( a.shape[:axis] + (1,) + a.shape[(axis+1):]))
def enclosed_or_intersecting_polygons_2d(polypool,vertexpool,texcoord,vertexidx_indices,texcoordidx,numvertices,texcoordredundant_numcopies,texcoordredundant_polystartindexes,texcoordredundant_polystartpolynum,texcoordredundant_texcoordidx,minu,minv,maxu,maxv):
#res=copy.copy(polypool)
#res=np.zeros(polypool.shape[0],dtype=np.bool)
res=[]
vertexset=frozenset(vertexpool)
num_fully_enclosed=0
for poolidx in np.arange(len(polypool)):
idx=polypool[poolidx]
if idx < 0:
# masked out polygon
continue
if idx < vertexidx_indices.shape[0]:
firstidx=vertexidx_indices[idx]
vertexidxs=texcoordidx[firstidx:(firstidx+numvertices[idx])]
polygon_fully_enclosed = vertex_all_in_set(vertexidxs,vertexset)
pass
else:
# redundant texcoords
firstidx=texcoordredundant_polystartindexes[idx-vertexidx_indices.shape[0]]
polynum=texcoordredundant_polystartpolynum[idx-vertexidx_indices.shape[0]]
vertexidxs = texcoordredundant_texcoordidx[firstidx:(firstidx+numvertices[polynum])]
polygon_fully_enclosed = vertex_all_in_set(vertexidxs,vertexset)
pass
if polygon_fully_enclosed:
res.append(idx)
# if it's fully enclosed, nothing else need look at at, so we filter it here from the broader sibling pool
polypool[poolidx] = -1 # mask out polygon
num_fully_enclosed +=1
pass
# ***Could optimize further here by checking if all of the vertices of a
# polygon are beyond each bound of the box.
if not polygon_fully_enclosed:
box_v0=np.array((minu,minv),dtype='d')
box_v1=np.array((maxu,maxv),dtype='d')
# does it intersect?
if polygon_intersects_box_2d(box_v0,
box_v1,
texcoord[vertexidxs,:]):
res.append(idx)
# Don't filter it out in this case because it must
# intersect with a sibiling too
pass
# What if the box is entirely inside the polygon?
# Then we should return it also
elif box_inside_polygon_2d(box_v0,box_v1,texcoord[vertexidxs,:]):
res.append(idx)
# Don't filter it out in this case because it must
# intersect with sibilings too
pass
pass
pass
# return updated pool as array
return (num_fully_enclosed,np.array(res,dtype=np.int32))
class polygonalsurface_texcoordparameterization(object):
cadpartparams=None
lowerleft_meaningfulunits=None # Coordinates of the lower left corner of the texture, in meaningful units.... these are the coordinates of the lower left corner of the lower left pixel
meaningfulunits_per_texcoord=None
# These next members are based on the polygons from the surface
# They should probably be modified to index on which surface, so that
# multiple surfaces parameterized into a single space
# can share this parameterization object
# p is number of independent texture coordinate vertices
texcoord=None # p x 3 texture coordinate array.... Only valid for valid vertices.
texcoordidx=None # indexes into texcoord for each polygon, terminated by -1, must be structured identically to polygonalsurface.vertexidx, and indices can be identified with polygonalsurface.vertexidx_indices and polygonalsurface.numvertices
# Support for redundant texture mappings
texcoordredundant_firstpolynum=None # indexed by which polygon, gives index into texcoordredundant_polystartindexes
texcoordredundant_numcopies=None # indexed by which polygon, gives number of redundant copies of the polygon, i.e. number of elements in texcoordredundant_polystartindexes that correspond to this polygon
texcoordredundant_polystartindexes=None # single index indicates which redundant copy of which polygon. Value gives starting index into texcoordredundant_texcoordidx (# of indices into texcoordredundant_texcoordidx is given by polysurf.numvertices[polynum]. The length of this vector is the total of texcoordredundant_numcopies.
texcoordredundant_polystartpolynum=None # indexed like texcoordredundant_polystartindexes, but gives the original polynum ( < vertexidx_indices.shape[0] )
texcoordredundant_texcoordidx=None # this is a vector of indices into the texcoord array. Entries for the same physical polygon (facet) are separated by -2. Entries for different polygons are separated by -1. If there is a polygon that has no redundant texcoords then there will still an extra -1 in the vector.
### ***!!! Will need redundant copies of these too... or
### More precisely, they need to be as big as the total of
### original + redundant polygons.
inplane2texcoords = None # 2x3 matrix that will convert in-plane (based on polysurf.inplanemats)
# coordinates to texture coordinates
texcoords2inplane = None # 3x3 matrix that will convert texture coordinates
# to in-plane (polysurf.inplanemats) coords.
boxes = None # Array of bounding boxes. Each box is identified by integers:
# 4 box children, or -1, identified as indices into
# this array, then an index into boxpolys array
# then the number of entries in boxpolys array
# So this is a num_boxes x 6 array
#
# Note that current boxing is inefficient because polygons on the boundary of big boxes tend to get tested a lot
# auto-set by buildboxes
boxpolys=None # array of polygon indices for each leaf box,
# each terminated by -1
boxcoords=None # Array of box coordinates, same structure as boxes, 4 values per box: 4 box bounds, Box bounds are (minu,minv,maxu,maxv)
def _buildbox(self,polysurf,boxlist,boxcoordlist,polys,boxpolys,vertexpool,cnt,depth,minu,minv,maxu,maxv):
#import pdb
# pdb.set_trace()
# polys is a truth value array
# boxpolys is accumulated array of where
# polys is true.
thisbox=np.ones(6,dtype='i4')*-1
#fullvertexpool=np.where((self.texcoord[:,0] >= minu) & (self.texcoord[:,0] <= maxu) & (self.texcoord[:,1] >= minv) & (self.texcoord[:,1] <= maxv))[0]
#assert((fullvertexpool==ourvertexpool).all())
# filter down polys according to what is in this box
if depth != 0: # all pass for depth = 0
#debugpolys = copy.copy(polys)
if use_pstp_accel:
(num_fully_enclosed,ourpolys)=enclosed_or_intersecting_polygons_2d_accel(polys,self.texcoord,polysurf.vertexidx_indices,self.texcoordidx,polysurf.numvertices,self.texcoordredundant_numcopies,self.texcoordredundant_polystartindexes,self.texcoordredundant_polystartpolynum,self.texcoordredundant_texcoordidx,minu,minv,maxu,maxv)
ourvertexpool=None
pass
else:
vertices=self.texcoord[vertexpool,:]
# Filter down vertexpool from parent
ourvertexpool=vertexpool[np.where((vertices[:,0] >= minu) &
(vertices[:,0] <= maxu) &
(vertices[:,1] >= minv) &
(vertices[:,1] <= maxv))[0]]
(num_fully_enclosed,ourpolys)=enclosed_or_intersecting_polygons_2d(polys,ourvertexpool,self.texcoord,polysurf.vertexidx_indices,self.texcoordidx,polysurf.numvertices,self.texcoordredundant_numcopies,self.texcoordredundant_polystartindexes,self.texcoordredundant_polystartpolynum,self.texcoordredundant_texcoordidx,minu,minv,maxu,maxv)
pass
##debugpolys=np.arange(polysurf.vertexidx_indices.shape[0])
#allvertexpool=np.where((self.texcoord[:,0] >= minu) &
# (self.texcoord[:,0] <= maxu) &
# (self.texcoord[:,1] >= minv) &
# (self.texcoord[:,1] <= maxv))[0]
#(junk,allpolys)=enclosed_or_intersecting_polygons_2d(debugpolys,allvertexpool,self.texcoord,polysurf.vertexidx_indices,self.texcoordidx,polysurf.numvertices,self.texcoordredundant_numcopies,self.texcoordredundant_polystartindexes,self.texcoordredundant_polystartpolynum,self.texcoordredundant_texcoordidx,minu,minv,maxu,maxv)
#assert((ourpolys==allpolys).all())
pass
else:
ourpolys=polys
num_fully_enclosed=ourpolys.shape[0]
ourvertexpool=vertexpool
pass
boxlist.append(thisbox)
boxcoordlist.append((minu,minv,maxu,maxv))
newcnt=cnt+1
if num_fully_enclosed > 6 and depth <= 18:
# split up box
distu=maxu-minu
distv=maxv-minv
eps=1e-4*np.sqrt(distu**2 + distv**2)
thisbox[0]=newcnt
newcnt=self._buildbox(polysurf,boxlist,boxcoordlist,ourpolys,boxpolys,ourvertexpool,newcnt,depth+1,minu,minv,minu+distu/2.0+eps,minv+distv/2.0+eps)
thisbox[1]=newcnt
newcnt=self._buildbox(polysurf,boxlist,boxcoordlist,ourpolys,boxpolys,ourvertexpool,newcnt,depth+1,minu+distu/2.0-eps,minv,maxu,minv+distv/2.0+eps)
thisbox[2]=newcnt
newcnt=self._buildbox(polysurf,boxlist,boxcoordlist,ourpolys,boxpolys,ourvertexpool,newcnt,depth+1,minu,minv+distv/2.0-eps,minu+distu/2.0+eps,maxv)
thisbox[3]=newcnt
newcnt=self._buildbox(polysurf,boxlist,boxcoordlist,ourpolys,boxpolys,ourvertexpool,newcnt,depth+1,minu+distu/2.0-eps,minv+distv/2.0-eps,maxu,maxv)
pass
#
else:
# This is a leaf node
# Record our polygons... these are those which are fully enclosed
# or intersecting us, the index where they start is boxpolys[4]
goodpolys=ourpolys >= 0
thisbox[4]=len(boxpolys)
thisbox[5]=np.count_nonzero(goodpolys)
boxpolys.extend(ourpolys[goodpolys])
boxpolys.append(-1)
pass
return newcnt
def invalidateboxes(self):
self.boxes=None
self.boxcoords=None
self.boxpolys=None
pass
def buildboxes(self,polysurf):
if self.boxes is not None:
return # already built
boxlist=[]
boxcoordlist=[]
numpolys=polysurf.vertexidx_indices.shape[0]
if self.texcoordredundant_numcopies is not None:
numpolys += self.texcoordredundant_numcopies.sum()
pass
# polys: indices of polygons which are inside this box
polys=np.arange(numpolys,dtype=np.int32) # -1 will be used as a flag to indicate this poly is no longer present
if use_pstp_accel:
vertexpool=None # not used in accelerated mode
pass
else:
vertexpool=np.arange(self.texcoord.shape[0],dtype=np.uint32) # indexes of texture vertices within a given box
pass
boxpolys=[]
boxcnt=0
minu=0.0
maxu=1.0
minv=0.0
maxv=1.0
#(minx,miny,minz)=np.min(self.vertices,axis=0)
#(maxx,maxy,maxz)=np.max(self.vertices,axis=0)
distu=maxu-minu
distv=maxv-minv
eps=1e-4*np.sqrt(distu**2 + distv**2)
self._buildbox(polysurf,boxlist,boxcoordlist,polys,boxpolys,vertexpool,0,0,minu-eps,minv-eps,maxu+eps,maxv+eps)
self.boxes=np.array(boxlist,dtype='i4')
self.boxcoords=np.array(boxcoordlist,dtype='d')
self.boxpolys=np.array(boxpolys,dtype='i4')
pass
#def eval_uv(self,polysurf,polysurf_vertexids):
# coords=polysurf.vertices[polysurf_vertexids,:] # n x 3 matrix
# relcoords=coords-self.centercoords.reshape(1,3)
#
# return np.array((np.inner(relcoords,self.u),np.inner(relcoords,self.v)),dtype='d').T
# def _determine_tex_xform(self,polysurf,polysurf_polynum):
# # See also scope_coin3d.cpp:DetermineTexXform
# # See also polygonalsurface.py: buildprojinfo()
#
# # NOTE: AijMat is 2x3, but AijMatInv is 3x3 for historical reasons
#
# # OK. So we extract the numpoints points. These are in
# # 3-space and nominally on a plane. How to flatten them
# # into 2D?
# # 1. Subtract out the centroid (best-fit plane
# # will pass through centroid)
# # (Also subtract same location from ObjectSpace intersection
# # point coordinates, from above)
# # 2. Assemble point vectors into a matrix of vectors from centroid
# # 3. Apply SVD. Resulting two basis vectors corresponding to
# # largest-magnitude two singular values will span the plane
# # 4. Re-evaluate point vectors and intersection location in
# # terms of these basis vectors. Now our point coordinates
# # and intersection coordinates are in 2-space.
# # 5. Evaluate a transform
# # [ A11 A12 A13 ][ x ] = [ tex ]
# # [ A21 A22 A23 ][ y ] = [ tey ]
# # [ 0 0 1 ][ 1 ] = [ 1 ]
# # or
# # [ x y 1 0 0 0 ] [ A11 ] = [ tex ]
# # [ 0 0 0 x y 1 ] [ A12 ] = [ tey ]
# # [ A13 ]
# # [ A21 ]
# # [ A22 ]
# # [ A23 ]
# # With rows repeated for each point.
# # Solve for Axx values from the known coordinates
# # Then substitute the 2D intersection coordinates as (x,y)
# # and multiply to get (tex,tey), the desired texture coordinates.
# numpoints=np.count_nonzero(polysurf.vertexids[polysurf_polynum,:] >= 0)
# centroid = np.mean(polysurf.vertices[polysurf.vertexids[polysurf_polynum,:numpoints],:],axis=0)
# coordvals = (polysurf.vertices[polysurf.vertexids[polysurf_polynum,:numpoints],:]-centroid.reshape(1,3)).T # coordvals is the coordinates relative to centroid, 3 x numpoints
# texcoordvals = self.texcoord[polysurf_polynum,:numpoints].T # texcoordvals is the texture coordinates, 2 rows by numpoints cols... # Note that textures are in range 0...1 by convention
#
# # calculate SVD
# (U,s,Vt)=scipy.linalg.svd(coordvals,full_matrices=True,compute_uv=True)
#
# # extract columns for 2d coordinate basis vectors
# # want columns that correspond to the largest two
# # singular values
# xcolindex=0
# ycolindex=1
#
# if abs(s[0]) < abs(s[1]) and abs(s[0]) < abs(s[2]):
# # element 0 is smallest s.v.
# xcolindex=2
# pass
# if abs(s[1]) < abs(s[2]) and abs(s[1]) < abs(s[0]):
# # element 1 is smallest s.v.
# ycolindex=2
# pass
#
# To2D=U[:,np.array((xcolindex,ycolindex))].T # 2x3... Rows of To2D are x and y basis vectors, respectively
#
# coordvals2d = np.dot(To2D,coordvals) # 2 rows by numpoints cols... in 2D basis relative to centroid
#
# TexXformMtx=np.zeros((2*numpoints,6),dtype='d')
# TexXformMtx[:(2*numpoints):2,0]=coordvals2d[0,:] # assign 'x' elements
# TexXformMtx[:(2*numpoints):2,1]=coordvals2d[1,:] # assign 'y' elements
# TexXformMtx[:(2*numpoints):2,2]=1 # assign '1' entries
# TexXformMtx[1:(2*numpoints):2,3]=coordvals2d[0,:] # assign 'x' elements
# TexXformMtx[1:(2*numpoints):2,4]=coordvals2d[1,:] # assign 'y' elements
# TexXformMtx[1:(2*numpoints):2,5]=1 # assign '1' entries
#
# TexCoordVec=np.zeros((2*numpoints),dtype='d')
# TexCoordVec[:(2*numpoints):2] = texcoordvals[0,:] # assign tex
# TexCoordVec[1:(2*numpoints):2] = texcoordvals[1,:] # assign tey
#
# (AijVals,residuals,rank,lstsq_s) = np.linalg.lstsq(TexXformMtx,TexCoordVec)
# AijMat=AijVals.reshape(2,3) # reshape to 2x3
# AijMatExt = np.concatenate((AijMat,np.array((0.0,0.0,1.0),dtype='d').reshape(1,3)),axis=0) # Add 0.0, 0.0, 1.0 row to bottom of matrix
#
# AijMatInv=np.linalg.inv(AijMatExt)
#
# return (centroid,s,xcolindex,ycolindex,To2D, AijMat,AijMatInv)
def eval_texcoord_polygonvertex(self,polysurf,polysurf_polynum,polysurf_vertexnum):
# Can supply vectors as polysurf_polynum and/or polysurf_vertexnum
#texcoords = self.texcoord[polysurf_polynum,polysurf_vertexnum,:]
firstidx=polysurf.vertexidx_indices[polysurf_polynum]
texcoords = self.texcoord[self.texcoordidx[firstidx+polysurf_vertexnum],:]
return texcoords
def invalidateprojinfo(self):
self.inplane2texcoords = None
self.texcoords2inplane = None
pass
def buildprojinfo(self,polysurf):
# See also scope_coin3d.cpp:DetermineTexXform
# see also polygonalsurface_intrinsicparameterization.py/_determine_tex_xform()
# and preceding steps in polygonalsurface.py:buildprojinfo()
# 5. Evaluate a transform
# [ A11 A12 A13 ][ x ] = [ tex ]
# [ A21 A22 A23 ][ y ] = [ tey ]
# [ 0 0 1 ][ 1 ] = [ 1 ]
# or
# [ x y 1 0 0 0 ] [ A11 ] = [ tex ]
# [ 0 0 0 x y 1 ] [ A12 ] = [ tey ]
# [ A13 ]
# [ A21 ]
# [ A22 ]
# [ A23 ]
# With rows repeated for each point.
# Solve for Axx values from the known coordinates
# Then substitute the 2D intersection coordinates as (x,y)
# and multiply to get (tex,tey), the desired texture coordinates.
if self.inplane2texcoords is not None:
return # already built
numpolys=polysurf.vertexidx_indices.shape[0]
self.inplane2texcoords = np.zeros((numpolys,2,3),dtype='d')
self.texcoords2inplane = np.zeros((numpolys,3,3),dtype='d')
for polynum in range(numpolys):
firstidx=polysurf.vertexidx_indices[polynum]
numpoints=polysurf.numvertices[polynum]
centroid = polysurf.refpoints[polynum,:]
coordvals = (polysurf.vertices[polysurf.vertexidx[firstidx:(firstidx+numpoints)],:]-centroid.reshape(1,3)).T # coordvals is the coordinates relative to centroid, 3 x numpoints
To2D = polysurf.inplanemats[polynum,:,:]
coordvals2d = np.dot(To2D,coordvals) # 2 rows by numpoints cols... in 2D basis relative to centroid
texcoordvals = self.texcoord[self.texcoordidx[firstidx:(firstidx+numpoints)],:].T # texcoordvals is the texture coordinates, 2 rows by numpoints cols... # Note that textures are in range 0...1 by convention
TexXformMtx=np.zeros((2*numpoints,6),dtype='d')
TexXformMtx[:(2*numpoints):2,0]=coordvals2d[0,:] # assign 'x' elements
TexXformMtx[:(2*numpoints):2,1]=coordvals2d[1,:] # assign 'y' elements
TexXformMtx[:(2*numpoints):2,2]=1 # assign '1' entries
TexXformMtx[1:(2*numpoints):2,3]=coordvals2d[0,:] # assign 'x' elements
TexXformMtx[1:(2*numpoints):2,4]=coordvals2d[1,:] # assign 'y' elements
TexXformMtx[1:(2*numpoints):2,5]=1 # assign '1' entries
TexCoordVec=np.zeros((2*numpoints),dtype='d')
TexCoordVec[:(2*numpoints):2] = texcoordvals[0,:] # assign tex
TexCoordVec[1:(2*numpoints):2] = texcoordvals[1,:] # assign tey
(AijVals,residuals,rank,lstsq_s) = np.linalg.lstsq(TexXformMtx,TexCoordVec,rcond=-1)
AijMat=AijVals.reshape(2,3) # reshape to 2x3
AijMatExt = np.concatenate((AijMat,np.array((0.0,0.0,1.0),dtype='d').reshape(1,3)),axis=0) # Add 0.0, 0.0, 1.0 row to bottom of matrix
# NOTE: Possible bug: This matrix inversion (next line) will
# fail if the polygon has zero area in texture space due to
# (for example) limited precision in writing down the
# texture coordinates in the data file.
#
# Not sure what to do in this case...
AijMatInv=np.linalg.inv(AijMatExt)
# Assign AijMat
self.inplane2texcoords[polynum,:,:]=AijMat
self.texcoords2inplane[polynum,:,:]=AijMatInv
pass
pass
def _evaluate_curvature(self,polysurf,polynum,u,v):
# Evaluate the curvature, within polygon # polynum
# at (u,v) coordinates... (u,v) in texture coordinate
# range [0...1]
# ... C accelerated version available
if polynum >= polysurf.vertexidx_indices.shape[0]:
# This polynum corresponds to a redundant texture
polysurf_polynum=self.texcoordredundant_polystartpolynum[polynum]
pass
else:
polysurf_polynum=polynum
pass
To2D=polysurf.inplanemats[polysurf_polynum,:,:] # To2D is 2x3
#AijMat=self.inplane2texcoords[polynum,:,:]
AijMatInv=self.texcoords2inplane[polynum,:,:]
# Note Capital UV represent the texture parameterization
# of the in-plane 3D space of this facet.
TexUVExt = np.inner(AijMatInv,np.array((u,v,1.0)))
TexUVExt /= TexUVExt[2] # normalize inhomogeneous coordinates
# These coordinates of this (u,v) of this facet are relative to its centroid,
# and are in terms of the basis vectors in To2D
TexUV = TexUVExt[:2]
# Get 3D coordinates relative to centroid
Tex3D = np.inner(To2D.T,TexUV)
# Need to evaluate 3D vertex coords, relative to centroid,
# Use them to weight the vertex curvatures
# according to distance from our point.
centroid = polysurf.refpoints[polysurf_polynum,:] # Centroied in 3d coords
firstidx=polysurf.vertexidx_indices[polysurf_polynum]
numpoints=polysurf.numvertices[polysurf_polynum]
# Check to see if we have curvatures at all vertices:
if np.isnan(polysurf.principal_curvatures[polysurf.vertexidx[firstidx:(firstidx+numpoints)],0]).any():
# abort if we are missing a curvature
#curvmat[vcnt,ucnt,:,:]=np.NaN
return np.array(((np.NaN,np.NaN),(np.NaN,np.NaN)),dtype='d')
# For this facet, the 3D coords of the vertices are
coordvals = (polysurf.vertices[polysurf.vertexidx[firstidx:(firstidx+numpoints)],:]-centroid.reshape(1,3)).T # coordvals is the coordinates relative to centroid, 3 x numpoints
# Now coordvals is 3 x numvertices, coordinates of the vertices
# relative to centroid
# Tex3D is 3 vector, coordinates of our (u,v) location
# relative to centroid.
#
# Perform weighted average
dists = vecnorm(Tex3D.reshape(3,1) - coordvals,axis=0)
eps = np.max(dists)/10000.0 # small number, so we don't divide by 0
rawweights=1.0/(dists+eps)
totalweights=np.sum(rawweights)
weights=rawweights/totalweights
## The 2D coords of the vertices are
#coordvals2d = np.dot(To2D,coordvals) # 2 rows by numpoints cols... in 2D basis relative to centroid
# Likewise 2D coords of the curvature_tangent_axes
CTA_2D = np.inner(To2D,polysurf.curvature_tangent_axes[polysurf.vertexidx[firstidx:(firstidx+numpoints)],:,:]).transpose(1,0,2) # Transpose to keep broadcast axis to the left. Pre-transpose axes lengths are: 2 (2D axes) by # of vertices by 2 (principal curvature)
# CTA_2D axes: # of vertices by 2 (2D axes) by 2 (principal curvature)
# Normalize curvature_tangent_axes (should be unit length)
CTA_2D /= vecnormkeepshape(CTA_2D,1) # Axis is axis 0 because it came from To2D
# Construct curvature matrices ...
# Need to construct V*K*V', broadcasting over which vertex
curvmatrices=np.einsum('...ij,...j,...jk->...ik', CTA_2D,polysurf.principal_curvatures[polysurf.vertexidx[firstidx:(firstidx+numpoints)],:],CTA_2D.transpose(0,2,1)) # result is # of vertices by 2x2 curvature matrix
# Weighting of vertices relative to our point (u,v)
weightedcurvmatrices = weights.reshape(numpoints,1,1)*curvmatrices
# meancurvmatrix (weighted average)
meancurvmatrix = weightedcurvmatrices.sum(axis=0)
# meancurvmatrix is a 2x2 which should be close to symmetric
asymmetry = meancurvmatrix[1,0]-meancurvmatrix[0,1]
if abs(asymmetry) > 0.1*np.linalg.norm(meancurvmatrix):
sys.stderr.write("_evaluate_curvature: WARNING Large asymmetry in mean curvature matrix at (u,v) = (%g,%g). Matrix = %s\n" % (u,v,str(meancurvmatrix)))
pass
# correct asymmetry
meancurvmatrix[1,0] -= asymmetry/2.0
meancurvmatrix[0,1] += asymmetry/2.0
## Determine principal curvatures
#(princcurvs,evects) = np.linalg.eig(meancurvmatrix)
# curvtangentaxes3d = np.dot(To2D.T,evects)
#
# # We don't want the eigenframe to be mirrored relative to the (U,V)
# # frame, for consistency in interpreting positive vs. negative curvature.
# # ... so if the dot/inner product of (UxV) with (TANGENT0xTANGENT1)
# is negative, that indicates mirroring
# Negating one of the eigenvectors will un-mirror it.
#if np.inner(np.cross(To2D[0,:],To2D[1,:]),np.cross(curvtangentaxes[:,0],curvtangentaxes[:,1])) < 0.0:
# curvtangentaxes3d[:,0]=-curvtangentaxes3d[:,0]
# evects[:,0]=-evects[:,0]
# pass
## Then assign curvature
#principal_curvatures[vcnt,ucnt,:]=princcurvs
#curvature_tangent_axes[vcnt,ucnt,:,:]=curvtangentaxes.T
# meancurvmatrix is in the polysurf.inplanemats (i.e. To2D)
# orthonormal basis, with units of meters
# We want to return 2D vectors in the AijMat i.e. self.inplane2texcoords) frame
# with units of texcoords.
# i.e. AijMat * meancurv * inv(AijMat)
#
# NOTE: AijMat is in inhomogeneous coordinates
# ... meancurvmatrix represents vectors, not coords
# so use only first two rows
return np.dot(self.inplane2texcoords[polynum,:,:2],np.dot(meancurvmatrix,self.texcoords2inplane[polynum,:2,:2]))
# For testing, run x3d_add_curvature.py <_UV.x3d> <.brep> <_UV_curvature.x3d>
# then curvmat=obj.implpart.surfaces[0].intrinsicparameterization.interpolate_curvature(obj.implpart.surfaces[0],100,100)
# pl.imshow(curvmat[::-1,:,0,0]) # Gives curvature along X
# pl.imshow(curvmat[::-1,:,1,1]) # Gives curvature along Y
# pl.imshow(curvmat[::-1,:,0,1]) # Gives curvature along XY
def interpolate_curvature(self,polysurf,ny,nx):
# Interpolate the surface curvature on a pixel grid
# Grid size is nx by ny going from texcoords 0..1
if self.boxes is None:
self.buildboxes(polysurf)
pass
#principal_curvatures=np.empty((ny,nx,2),dtype='f')
#principal_curvatures[::]=np.NaN
#curvature_tangent_axes=np.empty((ny,nx,2,3),dtype='f')
curvmat=np.empty((ny,nx,2,2),dtype='f')
#curvature_tangent_axes[::]=np.NaN
# For each pixel, need to figure out which polygon
# contains that pixel
polynum=None
for vcnt in range(ny):
for ucnt in range(nx):
# ucoord_pixels=u*(nx)-0.5
# vcoord_pixels=v*(ny)-0.5
# so u = (ucoord_pixels + 0.5)/nx
# so v = (vcoord_pixels + 0.5)/ny
# these u,v in [0,1] range
u = (ucnt+0.5)/nx
v = (vcnt+0.5)/ny
# Try to find which polygon... use result from last attempt as a candidate
if use_pstp_accel:
polynum=_identify_polynum_uv(polysurf.vertexidx_indices,
polysurf.numvertices,
self.texcoordidx,
self.texcoordredundant_polystartindexes,
self.texcoordredundant_polystartpolynum,
self.texcoordredundant_texcoordidx,
self.texcoord,
self.boxes,
self.boxpolys,
self.boxcoords,
u,v,candidate_polynum=polynum)
pass
else:
polynum=self._identify_polynum_uv(polysurf,u,v,candidate_polynum=polynum)
pass
if polynum is None:
# Abort if this pixel is outside all polygons
curvmat[vcnt,ucnt,:,:]=np.NaN
continue
#if vcnt==50 and ucnt==50:
# import pdb
# pdb.set_trace()
# pass
if use_pstp_accel: # use Cython accelerated version
curvmat[vcnt,ucnt,:,:]=_evaluate_curvature(polysurf.vertexidx_indices,
polysurf.numvertices,
polysurf.vertexidx,
polysurf.vertices,
polysurf.refpoints,
self.texcoordredundant_polystartpolynum,
polysurf.inplanemats,
self.inplane2texcoords,
self.texcoords2inplane,
polysurf.principal_curvatures,
polysurf.curvature_tangent_axes,
polynum,u,v)
pass
else:
curvmat[vcnt,ucnt,:,:]=self._evaluate_curvature(polysurf,polynum,u,v)
pass
pass
pass
return curvmat
def _evaluate_stepsize(self,polysurf,polynum,nv,nu):
# Evaluate the step sizes within polygon # polynum
#if polynum >= polysurf.vertexidx_indices.shape[0]:
# # This polynum corresponds to a redundant texture
# polysurf_polynum=self.texcoordredundant_polystartpolynum[polynum]
# pass
#else:
# polysurf_polynum=polynum
# pass
# surf.intrinsicparameterization.inplane2texcoords[something,:,:]
# is a 2x3 matrix that when multiplied on the right by in-polygon-plane
# inhomogeneous 2D coordinates relative to polygon centroid, gives (u,v)
# i.e. left 2x2 gives [ du/dX du/dY ; dv/dX dv/dY ] where X,Y
# is the polygons arbitrary 2D frame
#
# We need to operate on the inverse to get [ dX/du dX/dv ; dY/du dY/dv ]
# because X and Y are measured in physical distance units so we can
# get the physical distance corresponding to a pixel in the
# parameterization.
#
# The original Matrix (extended) is
# [ du/dX du/dY uoffs ]
# [ dv/dX dv/dY voffs ]
# [ 0 0 1 ]
#
# Represent this as [ Ared offs ]
# [ 0 1 ]
# Inverse is then [ Aredinv offsinv ]
# [ 0 1 ]
#
# Aredinv = [ dX/du dX/dv ]
# [ dY/du dY/dv ]
# Check: Original*Inverse = [ Ared*Aredinv 0 ]
# [ 0 1 ]
# Where Ared*Aredinv is eye(2) by definition
# So we can get the inverse we are looking for from
# the upper 2x2 of surf.intrinsicparameterization.texcoord2inplane
#
# ... So the net result is we multiply texcoord2inplane by [ 1.0/num_u_steps;0;0 ]
# and then take the vector norm to get the physical step size corresponding
# to one pixel in u for that polygon.
# Likewise we multiply texcoord2inplane by [ 0;1.0/num_v_steps;0 ]
# and then take the vector norm to get the physical step size corresponding
# to one pixel in u for that polygon.
#
# ... Or more efficiently, we take the first two rows of the first column,
# divide by num_u_steps, and take the norm to get the u step size (m)
# and take the first two rows of the 2nd column, divide by num_v_steps
# and take the norm to get the v step size (m)
dRdu=np.linalg.norm(self.texcoords2inplane[polynum,:2,0]/nu)
dRdv=np.linalg.norm(self.texcoords2inplane[polynum,:2,1]/nv)
return (dRdu,dRdv)
def interpolate_stepsizes(self,polysurf,ny,nx):
# Interpolate the surface dv and du physical stepsizes on a pixel grid
# Grid size is nx by ny going from texcoords 0..1
if self.boxes is None:
self.buildboxes(polysurf)
pass
stepsizemat=np.empty((ny,nx,2),dtype='f')
polynum=None
for vcnt in range(ny):
for ucnt in range(nx):
# ucoord_pixels=u*(nx)-0.5
# vcoord_pixels=v*(ny)-0.5
# so u = (ucoord_pixels + 0.5)/nx
# so v = (vcoord_pixels + 0.5)/ny
# these u,v in [0,1] range
u = (ucnt+0.5)/nx
v = (vcnt+0.5)/ny
# Try to find which polygon... use result from last attempt as a candidate
if use_pstp_accel:
polynum=_identify_polynum_uv(polysurf.vertexidx_indices,
polysurf.numvertices,
self.texcoordidx,
self.texcoordredundant_polystartindexes,
self.texcoordredundant_polystartpolynum,
self.texcoordredundant_texcoordidx,
self.texcoord,
self.boxes,
self.boxpolys,
self.boxcoords,
u,v,candidate_polynum=polynum)
pass
else:
polynum=self._identify_polynum_uv(polysurf,u,v,candidate_polynum=polynum)
pass
if polynum is None:
# Abort if this pixel is outside all polygons
stepsizemat[vcnt,ucnt,:]=np.NaN
continue
stepsizemat[vcnt,ucnt,:]=self._evaluate_stepsize(polysurf,polynum,ny,nx)
pass
pass
return stepsizemat
def _test_polynum_uv(self,polysurf,u,v,polynum):
"""This takes [0,1] u,v coordinates """
# Acclerated Cython version available
# Find whether our (u,v) coordinate is inside this polygon
#// ... is our (u,v) point inside this polygon?
if polynum < polysurf.vertexidx_indices.shape[0]:
firstidx=polysurf.vertexidx_indices[polynum]
numpoints=polysurf.numvertices[polynum]
vertexidxs=self.texcoordidx[firstidx:(firstidx+numpoints)]
pass
else:
firstidx=self.texcoordredundant_polystartindexes[polynum-polysurf.vertexidx_indices.shape[0]]
polysurf_polynum=self.texcoordredundant_polystartpolynum[polynum-polysurf.vertexidx_indices.shape[0]]
vertexidxs = self.texcoordredundant_texcoordidx[firstidx:(firstidx+polysurf.numvertices[polysurf_polynum])]
pass
vertices = self.texcoord[vertexidxs,:]
return point_in_polygon_2d(vertices-np.array(((u,v),),dtype='d'))
def _identify_polynum_uv(self,polysurf,u,v,candidate_polynum=None):
"""This takes [0,1] u,v coordinates """
# NOTE: Accelerated Cython version available
if self.boxes is None:
self.buildboxes(polysurf)
pass
if candidate_polynum is not None:
# Check provided polygon to see if it fits
point_in_poly=self._test_polynum_uv(polysurf,u,v,candidate_polynum)
if point_in_poly:
return candidate_polynum
pass
# Search boxes to find candidate polygons
candidatepolys=[]
boxes_to_test=[]
boxes_to_test.append(0)
while len(boxes_to_test) > 0:
curbox=boxes_to_test.pop()
if (u >= self.boxcoords[curbox,0] and
v >= self.boxcoords[curbox,1] and
u <= self.boxcoords[curbox,2] and
v <= self.boxcoords[curbox,3]):
# we are inside box
children=self.boxes[curbox,:4]
if children[0] >= 0:
boxes_to_test.extend(children)
pass
if self.boxes[curbox,4] >= 0:
polys_in_curbox_idx=self.boxes[curbox,4]
polys_in_curbox_num=self.boxes[curbox,5]
candidatepolys.extend(self.boxpolys[polys_in_curbox_idx:(polys_in_curbox_idx+polys_in_curbox_num)])
pass
pass
pass
for polynum in candidatepolys:
point_in_poly=self._test_polynum_uv(polysurf,u,v,polynum)
if point_in_poly:
return polynum
pass
# If we got this far, the search failed! */
return None
def linelength_avgcurvature_meshbased(self,
polysurf,
curvmats,
stepsizearray,
param_lowerleft_meaningfulunits_u,
param_lowerleft_meaningfulunits_v,
param_stepsize_u,
param_stepsize_v,
du,dv,u1,v1,u2,v2):
# param_lowerleft_meaningfulunits_u is the u coordinate of the
# lower-left corner of the lower left element of curvmats and
# stepsize array. Likewise param_stepsize_u and _v are the
# step sizes of the stepsize arrya.
# NOTE: Unlike the non-meshbased routines, the meshbased
# linelength routines take scaled (i.e. meaningfulunits)
# u,v coordinates
#u1_unscaled = (u1-self.lowerleft_meaningfulunits[0])/self.meaningfulunits_per_texcoord[0]
#v1_unscaled = (v1-self.lowerleft_meaningfulunits[1])/self.meaningfulunits_per_texcoord[1]
#u2_unscaled = (u2-self.lowerleft_meaningfulunits[0])/self.meaningfulunits_per_texcoord[0]
#v2_unscaled = (v2-self.lowerleft_meaningfulunits[1])/self.meaningfulunits_per_texcoord[1]
return _linelength_avgcurvature_meshbased(curvmats,
stepsizearray,
param_lowerleft_meaningfulunits_u,
param_lowerleft_meaningfulunits_v,
param_stepsize_u,
param_stepsize_v,
du,
dv,
u1,
v1,
u2,
v2)
def linelength_avgcurvature_mirroredbox_meshbased(self,
polysurf,
curvmats,
stepsizearray,
param_lowerleft_meaningfulunits_u,
param_lowerleft_meaningfulunits_v,
param_stepsize_u,
param_stepsize_v,
boxu1,boxv1,boxu2,boxv2,du,dv,u1,v1,u2,v2):
# param_lowerleft_meaningfulunits_u is the u coordinate of the
# lower-left corner of the lower left element of curvmats and
# stepsize array. Likewise param_stepsize_u and _v are the
# step sizes of the stepsize arrya.
# NOTE: Unlike the non-meshbased routines, the meshbased
# linelength routines take scaled (i.e. meaningfulunits)
# u,v coordinates
#boxu1_unscaled = (boxu1-self.lowerleft_meaningfulunits[0])/self.meaningfulunits_per_texcoord[0]
#boxv1_unscaled = (boxv1-self.lowerleft_meaningfulunits[1])/self.meaningfulunits_per_texcoord[1]
#boxu2_unscaled = (boxu2-self.lowerleft_meaningfulunits[0])/self.meaningfulunits_per_texcoord[0]
#boxv2_unscaled = (boxv2-self.lowerleft_meaningfulunits[1])/self.meaningfulunits_per_texcoord[1]
#u1_unscaled = (u1-self.lowerleft_meaningfulunits[0])/self.meaningfulunits_per_texcoord[0]
#v1_unscaled = (v1-self.lowerleft_meaningfulunits[1])/self.meaningfulunits_per_texcoord[1]
#u2_unscaled = (u2-self.lowerleft_meaningfulunits[0])/self.meaningfulunits_per_texcoord[0]
#v2_unscaled = (v2-self.lowerleft_meaningfulunits[1])/self.meaningfulunits_per_texcoord[1]
return _linelength_avgcurvature_mirroredbox_meshbased(curvmats,
stepsizearray,
param_lowerleft_meaningfulunits_u,
param_lowerleft_meaningfulunits_v,
param_stepsize_u,
param_stepsize_v,
boxu1,
boxv1,
boxu2,
boxv2,
du,
dv,
u1,
v1,
u2,
v2)
def linelength_avgcurvature(self,polysurf,du,dv,u1,v1,u2,v2):
"""NOTE: This takes meaningfully scaled u,v coordinates"""
if self.boxes is None:
self.buildboxes(polysurf)
pass
u1_unscaled = (u1-self.lowerleft_meaningfulunits[0])/self.meaningfulunits_per_texcoord[0]
v1_unscaled = (v1-self.lowerleft_meaningfulunits[1])/self.meaningfulunits_per_texcoord[1]
u2_unscaled = (u2-self.lowerleft_meaningfulunits[0])/self.meaningfulunits_per_texcoord[0]
v2_unscaled = (v2-self.lowerleft_meaningfulunits[1])/self.meaningfulunits_per_texcoord[1]
return _linelength_avgcurvature(
polysurf.vertexidx_indices,
polysurf.numvertices,
polysurf.vertexidx,
polysurf.vertices,
polysurf.refpoints,
self.texcoordidx,
self.texcoordredundant_polystartindexes,
self.texcoordredundant_polystartpolynum,
self.texcoordredundant_texcoordidx,
self.texcoord,
self.boxes,
self.boxpolys,
self.boxcoords,
polysurf.inplanemats,
self.inplane2texcoords,
self.texcoords2inplane,
polysurf.principal_curvatures,
polysurf.curvature_tangent_axes,
du/self.meaningfulunits_per_texcoord[0],
dv/self.meaningfulunits_per_texcoord[1],
u1_unscaled,v1_unscaled,u2_unscaled,v2_unscaled)
def eval_xyz_uv(self,polysurf,u,v):
"""NOTE: This takes meaningfully scaled u,v coordinates"""
# See also scope_coin3d.cpp:Get3DCoordsGivenTexCoords()
if self.boxes is None:
self.buildboxes(polysurf)
pass
# Evaluate x,y,z coordinates given (u,v) coordinates
# Convert meaningfully scaled (u,v) to 0-1 range
TexXY=np.array(((u-self.lowerleft_meaningfulunits[0])/self.meaningfulunits_per_texcoord[0],(v-self.lowerleft_meaningfulunits[1])/self.meaningfulunits_per_texcoord[1]),dtype='d')
if use_pstp_accel:
polynum=_identify_polynum_uv(polysurf.vertexidx_indices,
polysurf.numvertices,
self.texcoordidx,
self.texcoordredundant_polystartindexes,
self.texcoordredundant_polystartpolynum,
self.texcoordredundant_texcoordidx,
self.texcoord,
self.boxes,
self.boxpolys,
self.boxcoords,
TexXY[0],TexXY[1])
pass
else:
polynum = self._identify_polynum_uv(polysurf,TexXY[0],TexXY[1])
pass
if polynum is None:
raise ValueError("polygonalsurface_intrinsicparameterization: polygonalsurface_texcoordparameterization.eval_uv_xyz(): failed to find polygon for (U,V) point\n")
return polysurf._eval_xyz_polygonuv(self,polynum,u,v)
#def eval_uv_xyz(self,polysurf,xyz):
#
# pass
def __init__(self,**kwargs):
for kwarg in kwargs:
if not hasattr(self,kwarg):
raise AttributeError("Unknown attribute %s" % (kwarg))
setattr(self,kwarg,kwargs[kwarg])
pass
pass
# intrinsicparameterizationparams
# are ( (lowerleft_meaningfulunits_u, lowerleft_meaningfulunits_v), meaningfulunits_per_texcoord_u, meaningfulunits_per_texcoord_v )
@classmethod
def new(cls,polysurface,texcoord,texcoordidx,appearance,cadpartparams=None):
# appearance is a spatialnde.cadpart.appearance.vrml_x3d_appearance subclass
if cadpartparams is not None and "UV_ScalingParamsByTexURL" in cadpartparams and hasattr(appearance,"texture_url"):
scalingparams = cadpartparams["UV_ScalingParamsByTexURL"]
(lowerleft_meaningfulunits,meaningfulunits_per_texcoord) = scalingparams[appearance.texture_url]
pass
elif cadpartparams is not None and "UV_ScalingParamsBySurfaceNum" in cadpartparams:
# Obsolete structure
# I think it is safe to remove this
assert(0) # (if not would be constantly crashing here)
scalingparams = cadpartparams["UV_ScalingParamsBySurfaceNum"]
(lowerleft_meaningfulunits,meaningfulunits_per_texcoord) = scalingparams[str(polysurface.surfaceid)]
pass
else:
lowerleft_meaningfulunits=(0.0,0.0)
meaningfulunits_per_texcoord=(1.0,1.0)
pass
return cls(texcoord=texcoord,
texcoordidx=texcoordidx,
lowerleft_meaningfulunits=lowerleft_meaningfulunits,
meaningfulunits_per_texcoord=meaningfulunits_per_texcoord,
cadpartparams=cadpartparams)
# intrinsicparameterizationparams=intrinsicparameterizationparams)
pass
| StarcoderdataPython |
3312989 | from ._pixel_classifier import PixelClassifier
import numpy as np
class ObjectClassifier():
def __init__(self, opencl_filename="temp_object_classifier.cl", max_depth: int = 2, num_ensembles: int = 10):
"""
A RandomForestClassifier for label classification that converts itself to OpenCL after training.
Parameters
----------
opencl_filename : str (optional)
max_depth : int (optional)
num_ensembles : int (optional)
See Also
--------
https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
"""
self.FEATURE_SPECIFICATION_KEY = "feature_specification = "
self.classifier = PixelClassifier(opencl_filename=opencl_filename, max_depth=max_depth,
num_ensembles=num_ensembles, overwrite_classname=self.__class__.__name__)
def train(self, features: str, labels, sparse_annotation, image=None, continue_training : bool = False):
"""
Train a classifier that can differentiate label types according to intensity, size and shape.
Parameters
----------
features: Space separated string containing those:
'area',
'min_intensity', 'max_intensity', 'sum_intensity', 'mean_intensity', 'standard_deviation_intensity',
'mass_center_x', 'mass_center_y', 'mass_center_z',
'centroid_x', 'centroid_y', 'centroid_z',
'max_distance_to_centroid', 'max_distance_to_mass_center',
'mean_max_distance_to_centroid_ratio', 'mean_max_distance_to_mass_center_ratio',
'touching_neighbor_count', 'average_distance_of_touching_neighbors', 'average_distance_of_n_nearest_neighbors'
labels: label image
sparse_annotation: label image with annotations. If one label is annotated with multiple classes, the
maximum is considered while training.
image: intensity image (optional)
"""
self.classifier.feature_specification = features.replace(",", " ")
selected_features, gt = self._make_features(self.classifier.feature_specification , labels, sparse_annotation, image)
self.classifier.train(selected_features, gt, continue_training=continue_training)
self.classifier.to_opencl_file(self.classifier.opencl_file, overwrite_classname=self.__class__.__name__)
def predict(self, labels, image=None):
"""Predict object class from label image and optional intensity image.
Parameters
----------
labels: label image
image: intensity image
Returns
-------
label image representing a semantic segmentation: pixel intensities represent label class
"""
import pyclesperanto_prototype as cle
labels = cle.push(labels)
selected_features, gt = self._make_features(self.classifier.feature_specification, labels, None, image)
output = cle.create_like(selected_features[0].shape)
parameters = {}
for i, f in enumerate(selected_features):
parameters['in' + str(i)] = cle.push(f)
parameters['out'] = output
cle.execute(None, self.classifier.opencl_file, "predict", selected_features[0].shape, parameters)
# set background to zero
cle.set_column(output, 0, 0)
result_labels = cle.create_labels_like(labels)
cle.replace_intensities(labels, output, result_labels)
return result_labels
def _make_features(self, features: str, labels, annotation=None, image=None):
"""Determine requested features. If annotation is provided, also a ground-truth vector will be returned.
Parameters
----------
features: str
see train() function for explanation
labels: ndimage (int)
annotation: ndimage(int), optional
sparse annotation label image
image: ndimage, optional
intensity image for e.g. mean intensity calculation
Returns
-------
table: dict of vectors
gt: vector
"""
import pyclesperanto_prototype as cle
pixel_statistics = cle.statistics_of_background_and_labelled_pixels(image, labels)
if annotation is not None:
# determine ground truth
annotation_statistics = cle.statistics_of_background_and_labelled_pixels(annotation, labels)
classification_gt = annotation_statistics['max_intensity']
classification_gt[0] = 0
else:
classification_gt = None
feature_list = features.split(' ')
table, gt = self._select_features(pixel_statistics, feature_list, labels, classification_gt)
return table, gt
def _make_touch_matrix(self, labels, touch_matrix = None):
"""Generate an adjacency graph matrix representing touching object.
Parameters
----------
labels: ndimage
touch_matrix: ndimage, optional
will be returned in case not none
Returns
-------
touch_matrix, see [1]
See Also
--------
..[1] https://github.com/clEsperanto/pyclesperanto_prototype/blob/master/demo/neighbors/mesh_between_touching_neighbors.ipynb
"""
if touch_matrix is None:
import pyclesperanto_prototype as cle
touch_matrix = cle.generate_touch_matrix(labels)
return touch_matrix
def _make_distance_matrix(self, labels, distance_matrix = None):
"""Generate a matrix with (n+1)*(n+1) elements for a label image with n labels. In this matrix, element (x,y)
corresponds to the centroid distance between label x and label y.
Parameters
----------
labels: ndimage(int)
distance_matrix: ndimage, optional
will be returned in case not none
Returns
-------
distance_matrix, see [1]
..[1] https://github.com/clEsperanto/pyclesperanto_prototype/blob/master/demo/neighbors/mesh_with_distances.ipynb
"""
if distance_matrix is None:
import pyclesperanto_prototype as cle
centroids = cle.centroids_of_labels(labels)
distance_matrix = cle.generate_distance_matrix(centroids, centroids)
cle.set_column(distance_matrix, 0, 0)
cle.set_row(distance_matrix, 0, 0)
return distance_matrix
def _select_features(self, all_features, features_to_select, labels, ground_truth=None):
"""Provided with all easy-to-determine features, select requested features and calculate the more complicated
features.
Parameters
----------
all_features: dict[vector]
features_to_select: list[str]
labels: ndimage
ground_truth: ndimage, optional
Returns
-------
result:list[vector]
list of vectors corresponding to the requested features. The vectors are shaped (n) for n labels that
were annotated. Labels without annotation are removed from the vectors.
Background measurements are removed, because background cannot be classified.
ground_truth: ndimage
selected elements of provided ground truth where it's not 0
"""
import pyclesperanto_prototype as cle
result = []
touch_matrix = None
distance_matrix = None
mask = None
if ground_truth is not None:
mask = ground_truth > 0
for key in features_to_select:
vector = None
if key in all_features.keys():
vector = np.asarray([0] + all_features[key])
elif key == "touching_neighbor_count":
touch_matrix = self._make_touch_matrix(labels, touch_matrix)
vector = cle.pull(cle.count_touching_neighbors(touch_matrix))[0]
elif key == "average_distance_of_touching_neighbors":
touch_matrix = self._make_touch_matrix(labels, touch_matrix)
distance_matrix = self._make_distance_matrix(labels, distance_matrix)
vector = cle.pull(cle.average_distance_of_touching_neighbors(distance_matrix, touch_matrix))[0]
elif key.startswith("average_distance_of_n_nearest_neighbors="):
n = int(key.replace("average_distance_of_n_nearest_neighbors=", ""))
distance_matrix = self._make_distance_matrix(labels, distance_matrix)
vector = cle.pull(cle.average_distance_of_n_shortest_distances(distance_matrix, n=n))[0]
if vector is not None:
if ground_truth is not None:
result.append(np.asarray([vector[mask]]))
else:
result.append(np.asarray([vector]))
# print(key, result[-1])
if ground_truth is not None:
return result, ground_truth[mask]
else:
return result, None
def statistics(self):
return self.classifier.statistics() | StarcoderdataPython |
11351725 | #!/bin/python3
###############################################################################
# Copyright 2020 UChicago Argonne, LLC.
# (c.f. AUTHORS, LICENSE)
# SPDX-License-Identifier: BSD-3-Clause
##############################################################################
import argparse
import re
import os
from starbind import MPI, OpenMPI, MPICH, OpenMP, Ptrace
from tmap.topology import Topology
from tmap.permutation import Permutation
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--method',
choices = ['OpenMPI', 'MPICH', 'OpenMP', 'ptrace', 'auto'],
default = 'auto',
help='''
OpenMPI, MPICH: starbind is used inside a mpi command
line to bind the local process. Depending on weather
it is MPICH or OpenMPI, binding is made via the
command line or via the interception of subprocesses
and their environment variables 'MPI_LOCALRANKID',
'OMPI_COMM_WORLD_LOCAL_RANK'.
Only MPI processes will be bound. Starbind can be used
inside MPI command line or outside and it will use
mpirun.
------------------------------------------------------
OpenMP: starbind is used to launch an OpenMP
application and bind its threads. Envrionment variable
OMP_PLACES of child application will be set reflecting
the resource list. If more threads than locations are
used, then threads are continuously packed on
locations processing units from first location to the
last one.
------------------------------------------------------
ptrace: starbind is used to launch an application and
bind child threads and processes. ptrace uses ptrace()
system call to catch vfork(), fork(), clone() syscalls
and bind child processes to the next resource in
resource list. Bindings are applied in a round-robin
order of resources and will cycle if more processes
than available resources need to be bound.
------------------------------------------------------
auto: starbind has to figure out one of above methods.
MPI is tried first. If target environment variables
are not set and no MPI library was found in the binary
, then OpenMP is tried. OpenMP will look into
executable linked dynamic libraries with ldd and will
try to match a name with openmp, omp. If no match is
found, ptrace is used.''')
parser.add_argument('-t', '--type',
help="Topology object type used to bind threads",
default='Core', type=str)
parser.add_argument('-s', '--singlify',
help="Restrict topology to one processing unit per binding object.",
default=False, action='store_true')
parser.add_argument('-p', '--permutation',
help="A permutation id to reorder topology objects.",
default=0, type=int)
parser.add_argument('-c', '--command',
help="The command line to run",
required=True, type=str)
parser.add_argument('-n', '--num',
help="The number of threads (OpenMP) or processes (MPI) to set.",
default=None, type=int)
parser.add_argument('-v', '--verbose',
help="Print resource permutattion",
default=False, action='store_true')
args = parser.parse_args()
# Get the list of topology resources
topology = Topology(structure=False)
resources = [ n for n in topology if hasattr(n, 'type') and args.type.lower() in n.type.lower() ]
if args.singlify:
for r in resources:
r.PUs = [ r.PUs[0] ]
if len(resources) == 0:
raise ValueError('Invalid topology type {}. Valid types are: {}'\
.format(args.type, set(n.type for n in topology)))
# Apply permutation on resources
permutation = Permutation(len(resources), args.permutation)
resources = [ resources[i] for i in permutation.elements ]
bin=args.command.split()[0]
# Assign bind method
if args.method == 'OpenMPI':
binder = OpenMPI(resources, num_procs=args.num)
if args.method == 'MPICH':
binder = MPICH(resources, num_procs=args.num)
elif args.method == 'OpenMP':
binder = OpenMP(resources, num_threads=args.num)
elif args.method == 'ptrace':
binder = Ptrace(resources)
elif MPI.is_MPI_process() or MPI.is_MPI_application(bin):
binder = OpenMPI(resources, num_procs=args.num)
elif OpenMP.is_OpenMP_application(bin):
binder = OpenMP(resources, num_threads=args.num)
else:
binder = Ptrace(resources)
# Print info
if args.verbose:
if MPI.is_MPI_process():
if MPI.get_rank() == 0:
print('Bind to: {!s}'.format(resources))
print('Bind with {}'.format(binder.__class__.__name__))
else:
print('Bind to: {!s}'.format(resources))
print('Bind with {}'.format(binder.__class__.__name__))
# Run command
binder.run(args.command)
| StarcoderdataPython |
232032 | <filename>src/__init__.py
from .view import *
from .service import *
| StarcoderdataPython |
1944084 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 28 19:59:24 2019
@author: avelinojaver
"""
import pandas as pd
from pathlib import Path
import matplotlib.pylab as plt
import numpy as np
#from py4j.java_gateway import JavaGateway
from openslide import OpenSlide
#%%
if __name__ == '__main__':
pred_dir = Path.home() / 'workspace/localization/predictions/histology_detection/bladder-cancer-tils_unet_l1smooth_20190406_000552_adam_lr0.00064_wd0.0_batch64'
#slides_dir = '/Users/avelinojaver/OneDrive - Nexus365/bladder_cancer_tils/raw'
slides_dir = Path.home() / 'workspace/localization/data/histology_bladder/bladder_cancer_tils/raw/'
pred_dir = Path(pred_dir)
slides_dir = Path(slides_dir)
pred_files = pred_dir.glob('*.csv')
slide_files_d = {x.stem:x for x in slides_dir.rglob('*.svs')}
#slide_files_d = {x.stem:x for x in slides_dir.rglob('101TUR1-HE.m*')}
#%%
for pred_file in pred_files:
coords = pd.read_csv(pred_file)
coords.columns = ['label', 'x', 'y']
slide_file = slide_files_d[pred_file.stem]
reader = OpenSlide(str(slide_file))
level_n = reader.level_count - 1
level_dims = reader.level_dimensions[level_n]
downsample = reader.level_downsamples[level_n]
corner = (0,0)
if False:
level_n = 0
level_dims = reader.level_dimensions[level_n]
downsample = reader.level_downsamples[level_n]
corner = (18880,11200)
#corner = (24000,11200)
#corner = (550*32, 1180*32)
#corner = (2850*32, 200*32)
#corner = (1725*32, 80*32)
level_dims = (3200, 3200)
img = reader.read_region(corner, level_n, level_dims)
coords_r = coords.copy()
coords_r[['x', 'y']] /= downsample
good = (coords_r['x'] >= corner[0]) & (coords_r['x'] <= corner[0] + level_dims[0])
good &= (coords_r['y'] >= corner[1]) & (coords_r['y'] <= corner[1] + level_dims[1])
coords_r = coords_r[good]
img = np.array(img)
#%%
plt.figure()
plt.imshow(img)
colors = {'L' : 'r', 'E' : 'g'}
for lab, dat in coords_r.groupby('label'):
x = dat['x'] - corner[0]
y = dat['y'] - corner[1]
plt.plot(x, y, '.', color = colors[lab])
| StarcoderdataPython |
6692385 | <gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) IBM Corporation 2020
# Apache License, Version 2.0 (see https://opensource.org/licenses/Apache-2.0)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback
from typing import Optional, Dict
from urllib.parse import urlencode, quote
import re
import traceback
try:
import requests
except ImportError:
requests = None
REQUESTS_IMP_ERR = traceback.format_exc()
try:
import xmltodict
except ImportError:
xmltodict = None
XMLTODICT_IMP_ERR = traceback.format_exc()
CMCI_HOST = 'cmci_host'
CMCI_PORT = 'cmci_port'
CMCI_USER = 'cmci_user'
CMCI_PASSWORD = '<PASSWORD>'
CMCI_CERT = 'cmci_cert'
CMCI_KEY = 'cmci_key'
SECURITY_TYPE = 'security_type'
CONTEXT = 'context'
SCOPE = 'scope'
RESOURCES = 'resources'
TYPE = 'type'
ATTRIBUTES = 'attributes'
PARAMETERS = 'parameters'
NAME = 'name'
VALUE = 'value'
FILTER = 'filter'
COMPLEX_FILTER = 'complex_filter'
attribute_dict = dict(
type='str',
required=False
)
operator_dict = dict(
type='str',
required=False,
default='EQ',
choices=['<', '<=', '=', '>=', '>=', '¬=', '==', '!=', 'EQ', 'NE', 'LT', 'LE', 'GE', 'GT', 'IS']
)
value_dict = dict(
type='str',
required=False
)
def _nest_and_or_dicts():
return _get_and_or_dict(
_create_and_or_dicts(_create_and_or_dicts(_create_and_or_dicts(_create_and_or_dicts()))))
def _create_and_or_dicts(children=None):
c = children if children else {}
return {
'and': _get_and_or_dict(c),
'or': _get_and_or_dict(c)
}
def _get_and_or_dict(nested=None):
d = nested if nested else {}
return {
'type': 'list',
'required': False,
'elements': 'dict',
'options': {
'attribute': attribute_dict,
'operator': operator_dict,
'value': value_dict,
**d
},
'required_together': [('attribute', 'value')]
}
PARAMETERS_ARGUMENT = {
PARAMETERS: {
'type': 'list',
'required': False,
'elements': 'dict',
'options': {
NAME: {
'type': 'str',
'required': True
},
# Value is not required for flag-type parameters like CSD
VALUE: {
'type': 'str'
}
}
}
}
RESOURCES_ARGUMENT = {
RESOURCES: {
'type': 'dict',
'required': False,
'options': {
FILTER: {
'type': 'dict',
'required': False
},
COMPLEX_FILTER: {
'type': 'dict',
'required': False,
'options': {
'attribute': attribute_dict,
'operator': operator_dict,
'value': value_dict,
'and': _nest_and_or_dicts(),
'or': _nest_and_or_dicts()
},
'required_together': '[(\'attribute\', \'value\')]'
},
**PARAMETERS_ARGUMENT
}
}
}
ATTRIBUTES_ARGUMENT = {
ATTRIBUTES: {
'type': 'dict',
'required': False
}
}
class AnsibleCMCIModule(object):
def __init__(self, method):
self._module = AnsibleModule(argument_spec=self.init_argument_spec()) # type: AnsibleModule
self.result = dict(changed=False) # type: dict
if not requests:
self._fail_tb(missing_required_lib('requests'), REQUESTS_IMP_ERR)
if not xmltodict:
self._fail_tb(missing_required_lib('encoder'), XMLTODICT_IMP_ERR)
self._method = method # type: str
self._p = self.init_p() # type: dict
self._session = self.init_session() # type: requests.Session
self._url = self.init_url() # type: str
# TODO: can this fail?
# full_document=False suppresses the xml prolog, which CMCI doesn't like
body_dict = self.init_body()
self._body = xmltodict.unparse(self.init_body(), full_document=False) if body_dict else None # type: str
request_params = self.init_request_params()
if request_params:
self._url = self._url +\
"?" +\
urlencode(requests.utils.to_key_val_list(request_params), quote_via=quote)
result_request = {
'url': self._url,
'method': self._method,
'body': self._body
}
self.result['request'] = result_request
def init_argument_spec(self): # type: () -> Dict
return {
CMCI_HOST: {
'required': True,
'type': 'str'
},
CMCI_PORT: {
'required': True,
'type': int
},
CMCI_USER: {
'type': 'str',
'fallback': (env_fallback, ['CMCI_USER'])
},
CMCI_PASSWORD: {
'type': 'str',
'no_log': True,
'fallback': (env_fallback, ['<PASSWORD>PASSWORD'])
},
CMCI_CERT: {
'type': 'str',
'no_log': True,
'fallback': (env_fallback, ['CMCI_CERT'])
},
CMCI_KEY: {
'type': 'str',
'no_log': True,
'fallback': (env_fallback, ['CMCI_KEY'])
},
SECURITY_TYPE: {
'type': 'str',
'default': 'none',
'choices': ['none', 'basic', 'certificate']
},
CONTEXT: {
'required': True,
'type': 'str'
},
SCOPE: {
'type': 'str'
},
TYPE: {
'type': 'str',
'required': True
}
}
def main(self):
response = self._do_request() # type: Dict
self.handle_response(response)
self._module.exit_json(**self.result)
def init_p(self):
self.validate(
CMCI_HOST,
'^((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.)'
'{3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|((([a-zA-Z0-9]|[a-zA-Z0-9]'
'[a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*'
'[A-Za-z0-9]))$',
'an IP address or host name.'
)
port = self._module.params.get(CMCI_PORT)
if port < 0 or port > 65535:
self._fail(
'Parameter "{0}" with value "{1}" was not valid. Expected a port number 0-65535.'
.format(CMCI_PORT, str(port)))
self.validate(
CONTEXT,
'^([A-Za-z0-9]{1,8})$',
'a CPSM context name. CPSM context names are max 8 characters. Valid characters are A-Z a-z 0-9.'
)
self.validate(
SCOPE,
'^([A-Za-z0-9]{1,8})$',
'a CPSM scope name. CPSM scope names are max 8 characters. Valid characters are A-Z a-z 0-9.'
)
return self._module.params
def validate(self, name, regex, message): # type: (str, str, str) -> None
value = self._module.params.get(name)
if value:
pattern = re.compile(regex)
if not pattern.fullmatch(value):
self._fail('Parameter "{0}" with value "{1}" was not valid. Expected {2}'.format(name, value, message))
def init_body(self): # type: () -> Optional[Dict]
return None
def handle_response(self, response_dict): # type: (Dict) -> None
try:
response_node = response_dict['response']
self.result['connect_version'] = response_node.get('@connect_version')
result_summary = response_node['resultsummary']
cpsm_response_code = int(result_summary['@api_response1'])
cpsm_response = result_summary['@api_response1_alt']
cpsm_reason = result_summary['@api_response2_alt']
cpsm_reason_code = int(result_summary['@api_response2'])
self.result['cpsm_response'] = cpsm_response
self.result['cpsm_response_code'] = cpsm_response_code
self.result['cpsm_reason'] = cpsm_reason
self.result['cpsm_reason_code'] = cpsm_reason_code
if '@recordcount' in result_summary:
self.result['record_count'] = int(result_summary['@recordcount'])
if '@successcount' in result_summary:
self.result['success_count'] = int(result_summary['@successcount'])
# TODO: maybe only allow this bit in results that will definitely include records
if 'records' in response_node:
records_node = response_node['records']
resource_type = self._p[TYPE].lower()
if resource_type in records_node:
records = records_node[resource_type]
# Copy records in result, stripping @ from attributes
self.result['records'] =\
[
{k[1:]: v for k, v in record.items()}
for record in records
]
# Non-OK CPSM responses fail the module
if cpsm_response_code != 1024:
self._fail('CMCI request failed with response "{0}" reason "{1}"'.format(
cpsm_response, cpsm_reason if cpsm_reason else cpsm_response_code
))
if self._method != 'GET':
self.result['changed'] = True
except KeyError as e:
# CMCI response parse error
self._fail('Could not parse CMCI response: missing node "{0}"'.format(e.args[0]))
def init_url(self): # type: () -> str
t = self._p.get(TYPE).lower()
security_type = self._p.get(SECURITY_TYPE)
if security_type == 'none':
scheme = 'http://'
else:
scheme = 'https://'
url = scheme + self._p.get(CMCI_HOST) + ':' + str(self._p.get(CMCI_PORT)) + '/CICSSystemManagement/'\
+ t + '/' + self._p.get(CONTEXT) + '/'
if self._p.get(SCOPE):
url = url + self._p.get(SCOPE)
return url
def init_request_params(self): # type: () -> Optional[Dict[str, str]]
return None
def get_resources_request_params(self): # type: () -> Dict[str, str]
# get, delete, put will all need CRITERIA{}
request_params = {}
resources = self._p.get(RESOURCES)
if resources:
f = resources.get(FILTER)
if f:
# AND basic filters together, and use the = operator for each one
filter_string = ''
if not request_params:
request_params = {}
for key, value in f.items():
filter_string = _append_filter_string(filter_string, key + '=' + '\'' + value + '\'',
joiner=' AND ')
request_params['CRITERIA'] = filter_string
complex_filter = resources.get(COMPLEX_FILTER)
if complex_filter:
complex_filter_string = ''
if not request_params:
request_params = {}
and_item = complex_filter['and']
or_item = complex_filter['or']
attribute_item = complex_filter['attribute']
if ((and_item is not None and or_item is not None) or
(or_item is not None and attribute_item is not None) or
(attribute_item is not None and and_item is not None)):
self._fail("complex_filter can only have 'and', 'or', or 'attribute' dictionaries at the top level")
if and_item is not None:
complex_filter_string = _get_filter(and_item, complex_filter_string, ' AND ')
if or_item is not None:
complex_filter_string = _get_filter(or_item, complex_filter_string, ' OR ')
if attribute_item is not None:
operator = _convert_filter_operator(complex_filter['operator'])
value = complex_filter['value']
complex_filter_string = _append_filter_string(complex_filter_string,
attribute_item + operator + '\'' + value + '\'')
request_params['CRITERIA'] = complex_filter_string
parameters = resources.get(PARAMETERS)
if parameters:
def mapper(p):
return p.get('name') + '(' + p.get('value') + ')' if p.get('value') else p.get('name')
request_params['PARAMETER'] = ' '.join(map(mapper, parameters))
return request_params
def init_session(self): # type: () -> requests.Session
session = requests.Session()
security_type = self._p.get(SECURITY_TYPE)
if security_type == 'certificate':
cmci_cert = self._p.get(CMCI_CERT)
cmci_key = self._p.get(CMCI_KEY)
if cmci_cert is not None and cmci_cert.strip() != '' and cmci_key is not None and cmci_key.strip() != '':
session.cert = cmci_cert.strip(), cmci_key.strip()
else:
self._fail('HTTP setup error: cmci_cert/cmci_key are required ')
# TODO: there's no clear distinction between unauthenticated HTTPS and authenticated HTTP
if security_type == 'basic':
cmci_user = self._p.get(CMCI_USER)
cmci_password = self._p.get(CMCI_PASSWORD)
if cmci_user is not None and cmci_user.strip() != '' and \
cmci_password is not None and cmci_password.strip() != '':
session.auth = cmci_user.strip(), cmci_password.strip()
else:
self._fail('HTTP setup error: cmci_user/cmci_password are required')
return session # type: requests.Session
def _do_request(self): # type: () -> Dict
try:
response = self._session.request(
self._method,
self._url,
verify=False,
timeout=30,
data=self._body
)
self.result['http_status_code'] = response.status_code
self.result['http_status'] = response.reason if response.reason else str(response.status_code)
# TODO: in OK responses CPSM sometimes returns error feedback information.
# TODO: in non-OK responses CPSM returns a body with error information
# Can recreate this by supplying a malformed body with a create request.
# We should surface this error information somehow. Not sure what content type we get.
if response.status_code != 200:
# TODO: <?xml version=\"1.0\" encoding=\"UTF-8\"?> \r\n<error message_id=\"DFHWU4007\" connect_version=
# \"0560\">\r\n\t<title> 400 CICS management client interface HTTP Error</title>\r\n\t<short>An error
# has occurred in the CICS management client interface. The request cannot be processed.</short>\r\n\t
# <full> The body of the HTTP request was not specified correctly.</full> \r\n</error>
# This sort of thing's probably relevant for warning count errors too
self._fail('CMCI request returned non-OK status: {0}'.format(self.result.get('http_status')))
# Try and parse the XML response body into a dict
content_type = response.headers.get('content-type')
# Content type header may include the encoding. Just look at the first segment if so
content_type = content_type.split(';')[0]
if content_type != 'application/xml':
self._fail('CMCI request returned a non application/xml content type: {0}'.format(content_type))
# Missing content
if not response.content:
self._fail('CMCI response did not contain any data')
namespaces = {
'http://www.ibm.com/xmlns/prod/CICS/smw2int': None,
'http://www.w3.org/2001/XMLSchema-instance': None
} # namespace information
r = xmltodict.parse(
response.content,
process_namespaces=True,
namespaces=namespaces,
# Make sure we always return a list for the resource node
force_list=(self._p.get(TYPE).lower(),)
)
return r
except requests.exceptions.RequestException as e:
cause = e
if isinstance(cause, requests.exceptions.ConnectionError):
cause = cause.args[0]
if isinstance(cause, requests.packages.urllib3.exceptions.MaxRetryError):
cause = cause.reason
# Can't use self._fail_tb here, because we'll end up with tb for RequestException, not the cause
# which invalidates our attempts to clean up the message
self._fail('Error performing CMCI request: {0}'.format(cause))
except xmltodict.expat.ExpatError as e:
# Content couldn't be parsed as XML
self._fail_tb(
'CMCI response XML document could not be successfully parsed: {0}'.format(e),
traceback.format_exc()
)
def append_parameters(self, element):
# Parameters are <parameter name="pname" value="pvalue" />
parameters = self._p.get(PARAMETERS)
if parameters:
ps = []
for p in parameters:
np = {'@name': p.get('name')}
value = p.get('value')
if value:
np['@value'] = value
ps.append(np)
element['parameter'] = ps
def append_attributes(self, element):
# Attributes are <attributes name="value" name2="value2"/>
attributes = self._p.get(ATTRIBUTES)
if attributes:
element['attributes'] = {'@' + key: value for key, value in attributes.items()}
def _fail(self, msg): # type: (str) -> None
self._module.fail_json(msg=msg, **self.result)
def _fail_tb(self, msg, tb): # type: (str, str) -> None
self._module.fail_json(msg=msg, exception=tb, **self.result)
def _convert_filter_operator(operator):
if operator in ['<', 'LT']:
return '<'
if operator in ['<=', 'LE']:
return '<='
if operator in ['=', 'EQ']:
return '='
if operator in ['>=', 'GE']:
return '>='
if operator in ['>', 'GT']:
return '>'
if operator in ['¬=', '!=', 'NE']:
return '¬='
if operator in ['==', 'IS']:
return '=='
def _get_filter(list_of_filters, complex_filter_string, joiner):
for i in list_of_filters:
and_item = i.get('and')
or_item = i.get('or')
attribute = i.get('attribute')
if and_item is not None:
and_filter_string = _get_filter(and_item, '', ' AND ')
complex_filter_string = _append_filter_string(complex_filter_string, and_filter_string, joiner)
if or_item is not None:
or_filter_string = _get_filter(or_item, '', ' OR ')
complex_filter_string = _append_filter_string(complex_filter_string, or_filter_string, joiner)
if attribute is not None:
operator = _convert_filter_operator(i['operator'])
value = i['value']
attribute_filter_string = attribute + operator + '\'' + value + '\''
complex_filter_string = _append_filter_string(complex_filter_string, attribute_filter_string, joiner)
return complex_filter_string
def _append_filter_string(existing_filter_string, filter_string_to_append, joiner=' AND '):
# joiner is ' AND ' or ' OR '
if not existing_filter_string:
# if the existing string is empty, just return the new filter string
return '(' + filter_string_to_append + ')'
if existing_filter_string.endswith(joiner):
return existing_filter_string + '(' + filter_string_to_append + ')'
else:
return existing_filter_string + joiner + '(' + filter_string_to_append + ')'
| StarcoderdataPython |
3346763 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Python Snake CLI Game
This is an implementation of the classic snake game.
This game has been ported from the PHP version of the game:
https://github.com/joakimwinum/php-snake
Author: <NAME> <<EMAIL>>
License: https://opensource.org/licenses/mit-license.html MIT License
Version: 1.0.0
Repository: https://github.com/joakimwinum/python-snake
"""
import os
import copy
import random
import select
import subprocess
import sys
import time
# functions
# create functions
def create_player():
player_sprite = '&'
return [[40, 12, player_sprite], [39, 12, player_sprite], [38, 12, player_sprite]]
def create_frame_wall():
frame_wall_array = []
wall_sprite = '#'
i = 0
while i < board_x:
j = 0
while j < board_y:
if i == 0 or i == board_x - 1 or j == 0 or j == board_y - 1:
# create the frame wall
frame_wall_array.append([i, j, wall_sprite])
j += 1
i += 1
return frame_wall_array
def create_background():
background_array = []
background_sprite = ' '
i = 0
while i < board_x:
j = 0
while j < board_y:
# create the background
background_array.append([i, j, background_sprite])
j += 1
i += 1
return background_array
def draw(entities):
global blank_board
global cache_draw
board = ''
# create a blank board array if it is not already done
if '0,0' not in blank_board:
j = 0
while j < board_y:
i = 0
while i < board_x:
blank_board[''+str(i)+','+str(j)+''] = '%'
i += 1
j += 1
board_array = copy.copy(blank_board)
# draw all the entities onto the board array
for entity in entities:
entity_is_multidimensional = True
try:
entity[0][0]
except (NameError, KeyError, TypeError):
entity_is_multidimensional = False
if entity_is_multidimensional:
for coo in entity:
board_array[''+str(coo[0])+','+str(coo[1])+''] = coo[2]
else:
board_array[''+str(entity[0])+','+str(entity[1])+''] = entity[2]
# store the current entities in the draw cache
if cache_draw:
blank_board = board_array
cache_draw = False
# convert the board array to string
j = 0
while j < board_y:
i = 0
while i < board_x:
# add margin on the left side of the board
if i == 0:
board += left_margin
# draw the board array
board += board_array[''+str(i)+','+str(j)+'']
# add a line break on end of each line
if i == board_x - 1:
board += '\n'
i += 1
j += 1
board = board.rstrip()
# return the board string
return board
# other functions
def player_function(player):
global snake_len
snake_len = len(player)
head_direction = None
north = 'north'
south = 'south'
west = 'west'
east = 'east'
# determine the direction of the players head
if player[0][0] > player[1][0]:
head_direction = east
elif player[0][0] < player[1][0]:
head_direction = west
elif player[0][1] < player[1][1]:
head_direction = north
elif player[0][1] > player[1][1]:
head_direction = south
# move player with or without input
if key is not None:
if key == 'w' and (head_direction == west or head_direction == east):
player = move_player(player, north)
elif key == 'a' and (head_direction == north or head_direction == south):
player = move_player(player, west)
elif key == 's' and (head_direction == west or head_direction == east):
player = move_player(player, south)
elif key == 'd' and (head_direction == north or head_direction == south):
player = move_player(player, east)
else:
player = move_player(player, head_direction)
return player
def move_player(player, direction):
north = 'north'
south = 'south'
west = 'west'
east = 'east'
# take off the tail
if not increase_player():
player.pop()
# create the new head
new_head = copy.copy(player[0])
# move the new head
if direction == north:
new_head[1] -= 1
engine.fps = engine.fps_vertical
elif direction == west:
new_head[0] -= 1
engine.fps = engine.fps_horizontal
elif direction == south:
new_head[1] += 1
engine.fps = engine.fps_vertical
elif direction == east:
new_head[0] += 1
engine.fps = engine.fps_horizontal
# add the new head on
player = [new_head] + player
return player
def increase_player(set_variable=False, int_variable=None):
global snake_old_len
global do_increase_player
global increase_interval
global score
score = snake_len - 3
if int_variable is not None:
increase_interval = int_variable
if set_variable:
snake_old_len = snake_len
if snake_len >= snake_old_len + increase_interval:
do_increase_player = False
else:
do_increase_player = True
return do_increase_player
def collision_testing(player, point_dot):
global update_point_dot
# players head
player_head = player[0]
# check for collision with wall
for wall in frame_wall:
if wall[0] == player_head[0] and wall[1] == player_head[1]:
game_over()
# player eats point dot
if player_head[0] == point_dot[0] and player_head[1] == point_dot[1]:
increase_player(True)
update_point_dot = True
# check if player head touches its own tail
for key, part in enumerate(player, start=0):
if key == 0:
# skip head
continue
if player_head[0] == part[0] and player_head[1] == part[1]:
game_over()
def game_over():
screen = left_margin
screen += global_game_title
screen += ' Game Over '
pad_score = str(score).rjust(4, '0')
right_pointing_triangle_sprite = '>'
screen += right_pointing_triangle_sprite
screen += ' Score: '+pad_score
if dev_mode:
screen += ' [DevMode]'
screen += '\n'
screen += board
# clear the screen
engine.clear_screen()
# print the screen
print(screen)
engine.reset_tty()
exit()
def generate_new_coordinates(point_dot, player):
while True:
# get random coordinates
rand_x = random.randint(1, (board_x-2))
rand_y = random.randint(1, (board_y-2))
# check if the player already is on the new coordinates
do_continue = False
for part in player:
if part[0] == rand_x and part[1] == rand_y:
do_continue = True
break
if do_continue:
continue
# check if the new coordinates are in the old place of the point dot
if point_dot is not None and point_dot[0] == rand_x and point_dot[1] == rand_y:
continue
break
return [rand_x, rand_y]
def point_dot_function(player, point_dot=None):
global update_point_dot
point_dot_sprite = '*'
# generate the first dot
if point_dot is None:
coordinates = generate_new_coordinates(None, player)
point_dot = [coordinates[0], coordinates[1], point_dot_sprite]
# update the dot
if update_point_dot:
coordinates = generate_new_coordinates(point_dot, player)
point_dot = [coordinates[0], coordinates[1], point_dot_sprite]
update_point_dot = False
return point_dot
def print_stats():
# add left margin
string = left_margin
# display game name
string += global_game_title
# display score
pad_score = str(score).rjust(4, '0')
string += ' points: '+pad_score
# display extra stats in dev mode
if dev_mode:
# display snake length
pad_snake_len = str(snake_len).rjust(4, '0')
string += ', length: '+pad_snake_len
# display total number of frames
pad_frames = str(total_number_of_frames).rjust(4, '0')
string += ', total frames: '+pad_frames
# display frames per second
pad_fps = str(engine.fps).rjust(4, '0')
string += ', FPS: '+pad_fps
# add new line
string += '\n'
return string
def key_actions():
global dev_mode
global update_point_dot
# do actions upon certain key presses
if key is not None:
if key == 'q':
# exit the game
engine.reset_tty()
exit()
elif key == 'i':
# increase length
if dev_mode:
increase_player(True, 40)
elif key == 'u':
# increase length
if dev_mode:
increase_player(True, 140)
elif key == 'r':
# reset length increase
if dev_mode:
increase_player(False, 1)
elif key == 'e':
# increase fps
if dev_mode:
engine.fps_horizontal = 25
engine.fps_vertical = int(engine.fps_horizontal*engine.fps_factor)
elif key == 'y':
# increase fps by 1 fps
if dev_mode:
engine.fps_horizontal = engine.fps_horizontal + 1
engine.fps_vertical = int(engine.fps_horizontal*engine.fps_factor)
elif key == 'n':
# replace point dot
if dev_mode:
update_point_dot = True
elif key == 't':
# activate dev mode
if not dev_mode:
dev_mode = True
class PythonGameEngine:
"""Class PythonGameEngine
The game engine takes care of mainly three things:
* clearing the screen
* syncing the game loop
* detecting key presses
Remember to call the TTY reset method before exit if the built in key
detection function have been used.
Author: <NAME> <<EMAIL>>
License: https://opensource.org/licenses/mit-license.html MIT License
Version: 1.0.0
"""
def __init__(self):
self._game_time_beginning = None
self._game_time_end = None
self._fps = None
self._fps_horizontal = None
self._fps_vertical = None
self._fps_factor = None
self._os_variable = None
self._key_read_timeout = None
self._tty_settings = None
@property
def game_time_beginning(self):
return self._game_time_beginning
@game_time_beginning.setter
def game_time_beginning(self, value):
self._game_time_beginning = value
@property
def game_time_end(self):
return self._game_time_end
@game_time_end.setter
def game_time_end(self, value):
self._game_time_end = value
@property
def fps(self):
return self._fps
@fps.setter
def fps(self, value):
self._fps = value
@property
def fps_horizontal(self):
return self._fps_horizontal
@fps_horizontal.setter
def fps_horizontal(self, value):
self._fps_horizontal = value
@property
def fps_vertical(self):
return self._fps_vertical
@fps_vertical.setter
def fps_vertical(self, value):
self._fps_vertical = value
@property
def fps_factor(self):
return self._fps_factor
@fps_factor.setter
def fps_factor(self, value):
self._fps_factor = value
@property
def os_variable(self):
return self._os_variable
@os_variable.setter
def os_variable(self, value):
self._os_variable = value
@property
def key_read_timeout(self):
return self._key_read_timeout
@key_read_timeout.setter
def key_read_timeout(self, value):
self._key_read_timeout = value
@property
def tty_settings(self):
return self._tty_settings
@tty_settings.setter
def tty_settings(self, value):
self._tty_settings = value
@staticmethod
def microtime_now():
microtime = time.time()
time_variable = str(microtime).split('.')
timestamp = int(time_variable[0])
microseconds = int(int(time_variable[1])/100)
return [microseconds, timestamp]
def fps_sync(self):
"""This method sets a sleep depending on chosen fps
Put this at the end of a game loop to sync with the fps you have chosen.
"""
# get the time from the bottom of the code
self.game_time_end = self.microtime_now()
if self.game_time_beginning is not None:
time_beginning = self.game_time_beginning[0]
else:
time_beginning = None
time_end = self.game_time_end[0]
if time_beginning is None:
self.key_read_timeout = 100
self.game_time_beginning = self.microtime_now()
return False
# the loop is taking longer than 1 second
if self.game_time_end[1] - self.game_time_beginning[1] > 1:
self.key_read_timeout = 100
self.game_time_beginning = self.microtime_now()
return False
fps = self.fps # frames per second
microsecond = 10**6 # 1 second = 1*10^6 microseconds
if time_end > time_beginning:
time_variable = time_end - time_beginning
else:
time_variable = microsecond + time_end - time_beginning
if time_variable > microsecond:
# the code is going too slow, no wait
self.key_read_timeout = 100
self.game_time_beginning = self.microtime_now()
return False
frames_per_microsecond = int(microsecond/fps)
pause = frames_per_microsecond - time_variable
if pause < 0:
# the code is going too slow, no wait
self.key_read_timeout = 100
self.game_time_beginning = self.microtime_now()
return False
# actively adjust the key reading timeout
self.key_read_timeout = int(pause/10)
# sleep
time.sleep(pause/microsecond)
# get the time from the beginning of the code
self.game_time_beginning = self.microtime_now()
return True
def clear_screen(self):
"""Clears the screen
It will detect the current operation system and choose which system
screen clear function to use.
"""
os_variable = self.os_variable
# check which os the host is running
if os_variable is None:
if os.name == 'nt':
# windows
self.os_variable = 'windows'
else:
# other (linux)
self.os_variable = 'other'
os_variable = self.os_variable
# clear the screen
if os_variable == 'windows':
# windows
os.system('cls')
else:
# other (linux)
os.system('clear')
def read_key_press(self):
"""Returns the key character typed
Can cause high CPU usage.
Timeout variable will be auto updated by the fps_sync function.
"""
self.modify_tty()
timeout = self.key_read_timeout # microseconds
microsecond = 10**6 # 1 second = 1*10^6 microseconds
# set the timeout variable if it has not already been set
if timeout is None:
timeout = 200*10**3 # recommended value
self.key_read_timeout = timeout
stdin = sys.stdin
read = [stdin]
read_timeout = timeout/microsecond # timeout variable in seconds
# check if any key is pressed within the timeout period
rlist, wlist, xlist = select.select(read, [], [], read_timeout)
if len(rlist) == 0:
return None
# return the key pressed
return stdin.read(1)
def modify_tty(self):
tty_settings = self.tty_settings
if tty_settings is not None:
return False
# save current tty config
command = ['stty', '-g']
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
tty_settings = output.decode('ascii')
self.tty_settings = tty_settings
# change tty to be able to read in characters
os.system('stty -icanon')
return True
def reset_tty(self):
tty_settings = self.tty_settings
if tty_settings is None:
return False
# reset tty back to its original state
tty_settings = tty_settings.rstrip()
os.system("stty '"+tty_settings+"'")
return True
# init
engine = PythonGameEngine()
# settings
frames_per_second_horizontal = 16
diff_constant = .65
engine.fps_horizontal = frames_per_second_horizontal
engine.fps_factor = diff_constant
engine.fps_vertical = int(engine.fps_horizontal*engine.fps_factor)
engine.fps = engine.fps_horizontal
point_dot = None
snake_sprite = '&'
right_pointing_triangle_sprite = '>'
# global variables
board_x = 80
board_y = 24
score = 0
snake_len = 0
snake_old_len = 0
total_number_of_frames = 0
increase_interval = 1
global_game_title = snake_sprite+' Python Snake '+right_pointing_triangle_sprite
key = None
left_margin = ' '
screen = None
blank_board = {}
do_increase_player = False
update_point_dot = False
dev_mode = False
# game setup (to be run once)
# create the background and frame wall
background = create_background()
frame_wall = create_frame_wall()
# draw the background and frame onto the board and store it in the draw cache
cache_draw = True
draw([
background,
frame_wall
])
# create the player
player = create_player()
# game loop
while True:
# add stats to the screen
screen = print_stats()
# update the player
player = player_function(player)
# update the point dot
point_dot = point_dot_function(player, point_dot)
# collision testing
collision_testing(player, point_dot)
# draw the board with all the entities on it and add it to the screen
board = draw([
point_dot,
player
])
screen += board
# clear the screen
engine.clear_screen()
# print the screen
print(screen)
# take key input
print(left_margin)
key = engine.read_key_press()
# perform key actions
key_actions()
# count frames
total_number_of_frames += 1
# sync game loop to the saved fps value
engine.fps_sync()
| StarcoderdataPython |
1647937 |
# Generate a report of all new staged test centers, optionally push to Unverified table.
import requests
import os
import importlib
from helpers import preprocessing_utils, gtc_auth
from dotenv import load_dotenv
import time
import click
import json
import boto3
DAY_IN_MILLIS = 60 * 60 * 24 * 1000
S3_BUCKET = 'staging-gtc-data-batches'
load_dotenv(override=True)
GTC_API_URL = os.getenv('GTC_API_URL')
auth_token = gtc_auth.authenticate_gtc()
headers = {'Authorization': 'Bearer ' + auth_token}
def post_unverified_test_center(test_center_obj):
res = requests.post(GTC_API_URL + "/api/v1/internal/unverified-test-centers/", test_center_obj, headers=headers)
return res
def get_recent_staged_test_center_rows(from_timestamp):
recent_staged_response = requests.get(GTC_API_URL + "/api/v1/internal/test-centers-staging/fresh?since=" + from_timestamp, headers=headers)
staged_rows = recent_staged_response.json()
normalized_rows = [normalize_test_center_row(staged_row) for staged_row in staged_rows]
return normalized_rows
def get_unverified_test_centers():
query_response = requests.get(GTC_API_URL + "/api/v1/internal/unverified-test-centers/", headers=headers)
rows = query_response.json()
normalized_rows = [normalize_test_center_row(row) for row in rows]
return normalized_rows
def get_verified_test_centers():
query_response = requests.get(GTC_API_URL + "/api/v1/internal/verified-test-centers/", headers=headers)
rows = query_response.json()
normalized_rows = [normalize_test_center_row(row) for row in rows]
return normalized_rows
# All tables have same 'address' field and 'name' field, so normalization of data
# is not currently required.
def normalize_test_center_row(row):
# TODO: handle formatting failures by discarding row, log bad rows in report.
row['formatted_address_obj'] = preprocessing_utils.get_formatted_address(row['address'])
return row
def format_unverified_test_center_row(decorated_staging_row):
formatted_address_obj = decorated_staging_row['formatted_address_obj']
lat_lng = formatted_address_obj['lat_lng']
unverified_output_row = {
'full_formatted_address': formatted_address_obj['formatted_address'],
'latitude': lat_lng['lat'],
'longitude': lat_lng['lng'],
'google_place_id': formatted_address_obj['google_place_id'],
'staging_row_id': None,
'name': decorated_staging_row['name'],
'phone_number': decorated_staging_row['phone_number'],
'website': decorated_staging_row['website'],
'public_description': decorated_staging_row['description'],
'appointment_required': decorated_staging_row['appointment_required'],
'doctor_screen_required_beforehand': decorated_staging_row['doctor_screen_required_beforehand'],
'drive_thru_site': decorated_staging_row['drive_thru_site'],
'estimated_daily_test_capacity': decorated_staging_row['estimated_daily_test_capacity']
}
return unverified_output_row
# Check whether two normalized test center rows refer to the same test center
def check_test_center_match(row1, row2):
ident_flags = []
warn_flags = []
row1_formatted_address_obj = row1['formatted_address_obj']
row2_formatted_address_obj = row2['formatted_address_obj']
if row1['name'] == row2['name']:
ident_flags.append('NAME_MATCH')
if(row1_formatted_address_obj['google_place_id'] == row2_formatted_address_obj['google_place_id']):
ident_flags.append('GOOG_PLACEID_MATCH')
if(row1_formatted_address_obj['formatted_address'] == row2_formatted_address_obj['formatted_address']):
ident_flags.append('FORMATTED_ADDR_MATCH')
if(check_test_centers_near(row1_formatted_address_obj, row2_formatted_address_obj)):
warn_flags.append('CLOSE_GEO_RANGE')
return ident_flags, warn_flags
# This is rough math, may need to revise due to Python's round() behavior
# Want to return test centers within 100 meters
def check_test_centers_near(row1_formatted_address_obj, row2_formatted_address_obj):
row1_lat = round(row1_formatted_address_obj['lat_lng']['lat'], 3)
row1_lng = round(row1_formatted_address_obj['lat_lng']['lng'], 3)
row2_lat = round(row2_formatted_address_obj['lat_lng']['lat'], 3)
row2_lng = round(row2_formatted_address_obj['lat_lng']['lng'], 3)
if(row1_lat == row2_lat and row1_lng == row2_lng):
return True
return False
def check_row_against_ver_unver(staged_row, unverified_rows, verified_rows):
staged_row['matches'] = []
for unverified_row in unverified_rows:
ident_flags, warn_flags = check_test_center_match(staged_row, unverified_row)
if(len(ident_flags) > 0 or len(warn_flags) > 0):
staged_row['matches'].append({'ident_flags': ident_flags, 'warn_flags': warn_flags, 'type': 'UNVERIFIED', 'unverified_row_id': unverified_row['id'] })
for verified_row in verified_rows:
ident_flags, warn_flags = check_test_center_match(staged_row, verified_row)
if(len(ident_flags) > 0 or len(warn_flags) > 0):
staged_row['matches'].append({'ident_flags': ident_flags, 'warn_flags': warn_flags, 'type': 'VERIFIED', 'verified_row_id': verified_row['id'] })
return staged_row
def get_mapping_stats(mapped_rows):
ver_unver_match_count = 0
unverified_match_count = 0
verified_match_count = 0
unmatched_rows = []
for row in mapped_rows:
unver_count = 0
ver_count = 0
for match in row['matches']:
if match['type'] == 'VERIFIED':
ver_count = ver_count + 1
elif match['type'] == 'UNVERIFIED':
unver_count = unver_count + 1
if unver_count > 0 and ver_count > 0:
ver_unver_match_count = ver_unver_match_count + 1
elif unver_count > 0:
unverified_match_count = unverified_match_count + 1
elif ver_count > 0:
verified_match_count = verified_match_count + 1
else:
unmatched_rows.append(row)
return {
'ver_unver_match_count': ver_unver_match_count,
'unverified_match_count': unverified_match_count,
'verified_match_count': verified_match_count,
'unmatched_rows': unmatched_rows,
'unmatched_row_count': len(unmatched_rows)
}
def group_staging_rows(staging_rows):
staging_row_groups = {}
for row in staging_rows:
google_place_id = row['formatted_address_obj']['google_place_id']
if google_place_id in staging_row_groups:
staging_row_groups[google_place_id].append(row)
else:
staging_row_groups[google_place_id] = [row]
# For any duplicate staging rows, our selection of which row to use for matching is arbitrary
deduplicated_staging_rows = []
for row_group in staging_row_groups.values():
deduplicated_staging_rows.append(row_group[0])
return staging_row_groups, deduplicated_staging_rows
# Meant to give quick feedback about status of job during manual usage of tool
def pretty_print_results(dump_obj, commit_job_filename):
print('Proposed test center rows to be added to UnverifiedTestCenters table: ', dump_obj['post_processing_stats']['unmatched_row_count'])
print('Rows: ')
unmatched_rows = dump_obj['post_processing_stats']['unmatched_rows']
for row in unmatched_rows:
print('Staging ID: ', row['id'], ' Name: ', row['name'], ' OrigAddr: ', row['address'], ' FormtAddr: ', row['formatted_address_obj']['formatted_address'])
print('\n\nCommit Job Filename: ', commit_job_filename)
print('(pass this commit_job back into the tool to upload to Unverified API)')
# Command line interface
# Get all recent staged test center rows that aren't already in our verified or unverified datasets
@click.command()
@click.option('--days', default=7, help='Use staging table rows from up to X days ago.')
@click.option('--commit_job', default=None, help='Pass in the file name of a JSON output dump from a prior run of this script.'
+ ' The specified file will be loaded and the results will be pushed to the Unverified API.')
def exec_tool(days, commit_job):
aws_ident = boto3.client('sts').get_caller_identity().get('Account')
print('Using AWS Identity: ', aws_ident)
if commit_job:
load_job_dump_and_push_to_api(commit_job)
else:
map_test_centers(days)
def load_job_dump_and_push_to_api(commit_job_filename):
with open('./logs/' + commit_job_filename) as json_file:
dump_obj = json.load(json_file)
new_test_center_rows = dump_obj['post_processing_stats']['unmatched_rows']
for test_center in new_test_center_rows:
unverified_test_center_row = format_unverified_test_center_row(test_center)
print('Formatted test center row: ', unverified_test_center_row)
post_status = post_unverified_test_center(unverified_test_center_row)
print('Row POSTed successfully? ', post_status)
def map_test_centers(days):
s3 = boto3.client('s3')
ms = str(int(round(time.time() * 1000)) - DAY_IN_MILLIS * days)
recent_staged_rows = get_recent_staged_test_center_rows(ms)
grouped_staging_row_dict, deduplicated_staging_rows = group_staging_rows(recent_staged_rows)
unverified_rows = get_unverified_test_centers()
verified_rows = get_verified_test_centers()
print('Staged test centers since: ', ms, '. Row count: ', len(recent_staged_rows))
print('Staged test centers (deduplicated): ', len(deduplicated_staging_rows))
print('Total unverified rows: ', len(unverified_rows))
print('Total verified rows: ', len(verified_rows))
#simple brute force for visibility/traceability
processed_rows = [check_row_against_ver_unver(staged_row, unverified_rows, verified_rows) for staged_row in deduplicated_staging_rows]
stats = get_mapping_stats(processed_rows)
# Dump results of processing, currently dumps to standard I/O and also writes to a file in /logs - for passive analysis.
dump_obj = {
'staging_row_deduplication_groups': grouped_staging_row_dict,
'staging_row_deduplicated_count': len(deduplicated_staging_rows),
'post_processing_stats': stats,
'processed_rows': processed_rows
}
commit_job_filename = 'su_' + str(time.time()) + '_report.json'
pretty_print_results(dump_obj, commit_job_filename)
with open('./logs/' + commit_job_filename, 'w') as outfile:
json.dump(dump_obj, outfile, indent=4)
S3_OBJECT_KEY = 'unver-staged-jobs/' + commit_job_filename
s3.put_object(Bucket=S3_BUCKET,
Body=(bytes(json.dumps(dump_obj, indent=4).encode('UTF-8'))),
Key= S3_OBJECT_KEY)
S3_OBJECT_URL = 'https://' + S3_BUCKET + '.s3.amazonaws.com/' + S3_OBJECT_KEY
print('s3 object URL: ', S3_OBJECT_URL)
if __name__ == '__main__':
exec_tool() | StarcoderdataPython |
49098 | #!/usr/bin/env python
"""Creates or updates distribution files"""
import subprocess
print "Updates JavaScript and Type Definition files..."
subprocess.call(['rm', 'dist', '-rf'])
subprocess.call(['tsc', '--declaration'])
| StarcoderdataPython |
9686569 | import logging
import os
from src.python_discord_logger.utils import get_discord_logger
def initialize_logger() -> logging.Logger:
logger = get_discord_logger(
__name__,
os.environ["WEBHOOK_URL"],
os.environ["WEBHOOK_USER_ID"],
)
logger.setLevel(logging.DEBUG)
return logger
logger = initialize_logger()
def test_log():
logger.info("sample info message.")
logger.warning("sample warning message.")
logger.error("sample error message.")
def test_exception_log():
try:
raise ValueError("sample error happened")
except ValueError as e:
logger.error("sample exception error message", exc_info=e)
| StarcoderdataPython |
190218 | <reponame>cristianmtr/improved-initiative
import json
spells = json.load(open("spell-list.json", 'r', encoding="utf8"))
for s in spells:
try:
s["Description"] = s["Description"][0]
for learner in s["learnedBy"]:
if "level" in learner.keys():
s["Level"] = learner["level"]
except Exception as e:
print(e)
print(s)
if "Level" not in s.keys():
print(s)
json.dump(spells, open("ogl_spells.json", "w")) | StarcoderdataPython |
3564395 | <filename>info/utils/common.py
import functools
import qiniu
from flask import current_app
from flask import g
from flask import session
def do_index_class(index):
"""自定义过滤器,过滤点击排序html的class"""
if index == 1:
return "first"
elif index == 2:
return "second"
elif index == 3:
return "third"
else:
return ""
# 装饰器查询登录用户的信息
def user_login_data(u):
# 使用装饰器装饰视图函数时,会出现试视图函数绑定bug,
# 视图路由会调用装饰器的name属性,当前name指的是wrapper
# 所以使用 functools进行还原为u
@functools.wraps(u)
def wrapper(*args,**kwargs):
# 获取用户的id
user_id = session.get("user_id")
user = None
if user_id:
try:
from info.models import User
# 根据用户的id从数据库中查询用户的信息
user = User.query.get(user_id)
except Exception as e:
current_app.logger.error(e)
# g变量是用来临时存储用户信息,可以在其他函数调用
g.user = user
return u(*args,**kwargs)
return wrapper
def file_storage(data):
"""上传文件到七牛云"""
# data 上传文件的内容[ 经过read()读取出来的文件内容 ]
# 应用ID
access_key = "<KEY>"
secret_key = "<KEY>"
# 存储空间名【一个项目使用一个空间】
bucket_name = "gz-python6"
# 实例化七牛云操作对象
q = qiniu.Auth(access_key, secret_key)
# key 保存到七牛云以后的文件名,一般不设置,如果不设置,则七牛云会自动帮我们生成随机的唯一的文件名
key = None
# upload_token 上传文件的方法
token = q.upload_token(bucket_name)
ret, info = qiniu.put_data(token, key, data)
return ret["key"] # 上传文件的相关信息 | StarcoderdataPython |
8023693 | from datetime import datetime
from typing import Any
from unittest.mock import ANY, MagicMock
import pytest
from starlette import status
from starlite import TestClient
from app import models, repositories
from app.config import app_settings
from app.types import BeforeAfter, LimitOffset
from tests.utils import USERS_PATH, check_response
@pytest.mark.parametrize("patch_repo_scalars", ["db_users"], indirect=True)
def test_get_users(
db_users: list[models.User],
users_path: str,
test_client: TestClient,
patch_repo_scalars: None,
) -> None:
with test_client as client:
response = client.get(users_path)
check_response(response, status.HTTP_200_OK)
db_ids = {str(user.id) for user in db_users}
for user in response.json():
assert user["id"] in db_ids
@pytest.mark.parametrize(
"params, call_arg",
[
({}, BeforeAfter("updated_date", None, None)),
(
{"updated-before": str(datetime.max)},
BeforeAfter("updated_date", datetime.max, None),
),
(
{"updated-after": str(datetime.min)},
BeforeAfter("updated_date", None, datetime.min),
),
(
{
"updated-before": str(datetime.max),
"updated-after": str(datetime.min),
},
BeforeAfter("updated_date", datetime.max, datetime.min),
),
],
)
@pytest.mark.parametrize("patch_repo_scalars", ["db_users"], indirect=True)
def test_get_users_filter_by_updated(
params: dict[str, str],
call_arg: BeforeAfter,
users_path: str,
test_client: TestClient,
patch_repo_scalars: None,
monkeypatch: pytest.MonkeyPatch,
) -> None:
filter_on_datetime_field_mock = MagicMock()
monkeypatch.setattr(
repositories.UserRepository,
"filter_on_datetime_field",
filter_on_datetime_field_mock,
)
with test_client as client:
response = client.get(users_path, params=params)
check_response(response, status.HTTP_200_OK)
filter_on_datetime_field_mock.assert_called_once_with(call_arg)
@pytest.mark.parametrize(
"params, call_arg",
[
({}, LimitOffset(app_settings.DEFAULT_PAGINATION_LIMIT, 0)),
(
{"page": 11},
LimitOffset(
app_settings.DEFAULT_PAGINATION_LIMIT,
app_settings.DEFAULT_PAGINATION_LIMIT * 10,
),
),
({"page-size": 11}, LimitOffset(11, 0)),
({"page": 11, "page-size": 11}, LimitOffset(11, 110)),
],
)
@pytest.mark.parametrize("patch_repo_scalars", ["db_users"], indirect=True)
def test_get_users_pagination(
params: dict[str, str],
call_arg: LimitOffset,
users_path: str,
test_client: TestClient,
patch_repo_scalars: None,
monkeypatch: pytest.MonkeyPatch,
) -> None:
apply_limit_offset_pagination_mock = MagicMock()
monkeypatch.setattr(
repositories.UserRepository,
"apply_limit_offset_pagination",
apply_limit_offset_pagination_mock,
)
with test_client as client:
response = client.get(users_path, params=params)
check_response(response, status.HTTP_200_OK)
apply_limit_offset_pagination_mock.assert_called_once_with(call_arg)
@pytest.mark.parametrize(
"params, call_arg",
[
({}, {"is_active": True}),
({"is-active": True}, {"is_active": True}),
({"is-active": False}, {"is_active": False}),
],
)
@pytest.mark.parametrize("patch_repo_scalars", ["db_users"], indirect=True)
def test_get_users_filter_by_is_active(
params: dict[str, str],
call_arg: Any,
users_path: str,
test_client: TestClient,
patch_repo_scalars: None,
monkeypatch: pytest.MonkeyPatch,
) -> None:
_filter_select_by_kwargs_mock = MagicMock()
monkeypatch.setattr(
repositories.UserRepository,
"_filter_select_by_kwargs",
_filter_select_by_kwargs_mock,
)
with test_client as client:
response = client.get(users_path, params=params)
check_response(response, status.HTTP_200_OK)
_filter_select_by_kwargs_mock.assert_called_once_with(call_arg)
@pytest.mark.parametrize("patch_repo_scalar", ["db_user"], indirect=True)
def test_get_user(
db_user: models.User,
unstructured_user: dict[str, Any],
test_client: TestClient,
patch_repo_scalar: None,
) -> None:
with test_client as client:
response = client.get(f"{USERS_PATH}/{db_user.id}")
check_response(response, status.HTTP_200_OK)
del unstructured_user["password"]
assert response.json() == unstructured_user
def test_post_user(
unstructured_user: dict[str, str],
users_path: str,
test_client: TestClient,
patch_repo_add_flush_refresh: None,
) -> None:
del unstructured_user["id"]
with test_client as client:
response = client.post(users_path, json=unstructured_user)
check_response(response, status.HTTP_201_CREATED)
assert response.json() == {
"id": ANY,
"username": "A User",
"is_active": True,
}
def test_post_user_invalid_payload(
unstructured_user: dict[str, str], users_path: str, test_client: TestClient
) -> None:
del unstructured_user["password"]
with test_client as client:
response = client.post(users_path, json=unstructured_user)
check_response(response, status.HTTP_400_BAD_REQUEST)
def test_get_user_404(
user_detail_path: str, test_client: TestClient, patch_repo_scalar_404: None
) -> None:
with test_client as client:
response = client.get(user_detail_path)
check_response(response, status.HTTP_404_NOT_FOUND)
@pytest.mark.parametrize("patch_repo_scalar", ["db_user"], indirect=True)
def test_put_user(
db_user: models.User,
unstructured_user: dict[str, Any],
user_detail_path: str,
test_client: TestClient,
patch_repo_add_flush_refresh: None,
patch_repo_scalar: None,
) -> None:
del unstructured_user["password"]
unstructured_user["username"] = "Morty"
with test_client as client:
response = client.put(user_detail_path, json=unstructured_user)
check_response(response, status.HTTP_200_OK)
assert response.json() == unstructured_user
def test_put_user_404(
unstructured_user: dict[str, Any],
user_detail_path: str,
patch_repo_scalar_404: None,
test_client: TestClient,
) -> None:
with test_client as client:
response = client.put(user_detail_path, json=unstructured_user)
check_response(response, status.HTTP_404_NOT_FOUND)
@pytest.mark.parametrize("patch_repo_scalar", ["db_user"], indirect=True)
def test_delete_user(
unstructured_user: dict[str, Any],
user_detail_path: str,
test_client: TestClient,
patch_repo_delete: None,
patch_repo_scalar: None,
) -> None:
with test_client as client:
response = client.delete(user_detail_path)
check_response(response, status.HTTP_200_OK)
del unstructured_user["password"]
assert response.json() == unstructured_user
def test_delete_user_404(
user_detail_path: str, test_client: TestClient, patch_repo_scalar_404: None
) -> None:
with test_client as client:
response = client.delete(user_detail_path)
check_response(response, status.HTTP_404_NOT_FOUND)
| StarcoderdataPython |
351272 | <gh_stars>10-100
import numpy as np
import scipy.ndimage as ndimage
import scipy.signal
def bahorich_coherence(data, zwin):
ni, nj, nk = data.shape
out = np.zeros_like(data)
padded = np.pad(data, ((0, 0), (0, 0), (zwin//2, zwin//2)), mode='reflect')
for i, j, k in np.ndindex(ni - 1, nj - 1, nk - 1):
center_trace = data[i,j,:]
center_std = center_trace.std()
x_trace = padded[i+1, j, k:k+zwin]
y_trace = padded[i, j+1, k:k+zwin]
xcor = np.correlate(center_trace, x_trace)
ycor = np.correlate(center_trace, y_trace)
px = xcor.max() / (xcor.size * center_std * x_trace.std())
py = ycor.max() / (ycor.size * center_std * y_trace.std())
out[i,j,k] = np.sqrt(px * py)
return out
def moving_window(data, window, func):
wrapped = lambda region: func(region.reshape(window))
return ndimage.generic_filter(data, wrapped, window, mode='reflect')
def marfurt_semblance(region):
region = region.reshape(-1, region.shape[-1])
ntraces, nsamples = region.shape
cov = region.dot(region.T)
return cov.sum() / cov.diagonal().sum() / ntraces
def semblance2(region):
region = region.reshape(-1, region.shape[-1])
ntraces, nsamples = region.shape
square_of_sums = np.sum(region, axis=0)**2
sum_of_squares = np.sum(region**2, axis=0)
return square_of_sums.sum() / sum_of_squares.sum() / ntraces
def eig(region):
region = region.reshape(-1, region.shape[-1])
cov = region.dot(region.T)
vals = np.linalg.eigvalsh(cov)
return vals.max() / vals.sum()
def complex_semblance(region):
region = region.reshape(-1, region.shape[-1])
ntraces, nsamples = region.shape
region = scipy.signal.hilbert(region)
region = np.hstack([region.real, region.imag])
cov = region.dot(region.T)
return np.abs(cov.sum() / cov.diagonal().sum()) / ntraces
def complex_eig(region):
region = region.reshape(-1, region.shape[-1])
region = scipy.signal.hilbert(region)
region = np.hstack([region.real, region.imag])
cov = region.dot(region.T)
vals = np.linalg.eigvals(cov)
return np.abs(vals.max() / vals.sum())
def flatten(data, surface, window):
surface = ndimage.gaussian_filter(surface.astype(float), 3)
ni, nj, nk = data.shape
ik = np.arange(nk)
out_ik = np.arange(window) - window // 2
out = np.zeros((ni, nj, window))
for i, j in np.ndindex(ni, nj):
trace = data[i,j,:]
k = surface[i, j]
shifted = np.interp(out_ik + k, ik, trace)
out[i,j,:] = shifted
return out
def unflatten(data, surface, orig_shape):
out = np.zeros(orig_shape)
surface = np.clip(surface, 0, orig_shape[-1] - 1)
win = data.shape[-1] // 2
for i, j in np.ndindex(orig_shape[0], orig_shape[1]):
k = surface[i,j]
outmin, outmax = max(0, k - win), min(orig_shape[-1], k + win + 1)
inmin, inmax = outmin - (k - win), k + win + 1 - outmax
inmax = data.shape[-1] - abs(inmax)
out[i, j, outmin:outmax] = data[i, j, inmin:inmax]
return out
def gradients(seismic, sigma):
"""Builds a 4-d array of the gaussian gradient of *seismic*."""
grads = []
for axis in range(3):
# Gaussian filter with order=1 is a gaussian gradient operator
grad = scipy.ndimage.gaussian_filter1d(seismic, sigma, axis=axis, order=1)
grads.append(grad[..., np.newaxis])
return np.concatenate(grads, axis=3)
def moving_window4d(grad, window, func):
"""Applies the given function *func* over a moving *window*, reducing
the input *grad* array from 4D to 3D."""
# Pad in the spatial dimensions, but leave the gradient dimension unpadded.
half_window = [(x // 2, x // 2) for x in window] + [(0, 0)]
padded = np.pad(grad, half_window, mode='reflect')
out = np.empty(grad.shape[:3], dtype=float)
for i, j, k in np.ndindex(out.shape):
region = padded[i:i+window[0], j:j+window[1], k:k+window[2], :]
out[i,j,k] = func(region)
return out
def gst_coherence_calc(region):
"""Calculate gradient structure tensor coherence on a local region.
Intended to be applied with *moving_window4d*."""
region = region.reshape(-1, 3)
gst = region.T.dot(region) # This is the 3x3 gradient structure tensor
# Reverse sort of eigenvalues of the GST (largest first)
eigs = np.sort(np.linalg.eigvalsh(gst))[::-1]
return (eigs[0] - eigs[1]) / (eigs[0] + eigs[1])
def gst_coherence(seismic, window, sigma=1):
"""Randen, et al's (2000) Gradient Structure Tensor based coherence."""
# 4-d gradient array (ni x nj x nk x 3)
grad = gradients(seismic, sigma)
return moving_window4d(grad, window, gst_coherence_calc)
| StarcoderdataPython |
3517861 | <filename>LearningPython.py
datalist = [1, 3, 4, 7, 2, 9,55]
biggest = datalist[2]
for val in datalist:
if val > biggest:
biggest = val
print(biggest) | StarcoderdataPython |
3479709 | <filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import sys
try:
from setuptools.core import setup
except ImportError:
from distutils.core import setup
with open('SimpleHOHMM/package_info.json') as f:
_info = json.load(f)
def setup_package():
needs_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)
sphinx = ['sphinx'] if needs_sphinx else []
setup(
setup_requires=sphinx,
name='SimpleHOHMM',
version=_info["version"],
author=_info["author"],
author_email=_info["author_email"],
packages=['SimpleHOHMM'],
package_data={'SimpleHOHMM': ['package_info.json']},
url='https://simple-hohmm.readthedocs.io',
license='LICENSE.txt',
description='High Order Hidden Markov Model for sequence classification',
test_suite='test.test_suite',
)
if __name__ == "__main__":
setup_package()
| StarcoderdataPython |
6415607 | <reponame>silentos1/OpenCV
import cv2
import numpy as np
plik1 = open('test1.txt', 'w')
plik2 = open('test2.txt', 'w')
MinProg = 5
MaxProg = 120
MinObszar = 60
MinKolistosc = .1
MinInertia = .3
kamera = cv2.VideoCapture(0) # kamerka przypisana do zmiennej
kamera.set(15, -8) # ustawienie kamerki, 15- jasnoc , -8 zakres
zliczanie = 0 # zmienna do powtorzen petli
listaodczyt = [0, 0] # lista do zapisu wyniku rzutu
listaoczko = [0, 0] # lista do wyswietlanai wyniku rzutu
while True:
ret, klatka = kamera.read() # klatka z kamerki
params = cv2.SimpleBlobDetector_Params() # parametry
params.filterByArea = True
params.filterByCircularity = True
params.filterByInertia = True
params.minThreshold = MinProg
params.maxThreshold = MaxProg
params.minArea = MinObszar
params.minCircularity = MinKolistosc
params.minInertiaRatio = MinInertia
detektor = cv2.SimpleBlobDetector_create(params) # obiekt typu blob dedector
oczka = detektor.detect(klatka) # wykrycie keypoint
klatka_z_oczkami = cv2.drawKeypoints(klatka, oczka, np.array([]), (0, 255, 0),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow("Kostka", klatka_z_oczkami)
liczba_oczek = len(oczka)
if zliczanie % 4 == 0:
if liczba_oczek < 7 :
listaodczyt.append(liczba_oczek)
if listaodczyt[-1] == listaodczyt[-2] == listaodczyt[-3] == listaodczyt[-4]== listaodczyt[-5]:
listaoczko.append(listaodczyt[-1])
if listaoczko[-1] != listaoczko[-2] and listaoczko[-1] != 0:
msg = "Wypadlo: " + str(listaoczko[-1]) + "\n"
print(msg)
zliczanie += 1
k = cv2.waitKey(30) & 0xff
if k == 27:
break
plik1.write(str(listaoczko[-1]))
plik1.close()
print(listaodczyt)
plik2.writelines(str(listaoczko))
plik2.close()
| StarcoderdataPython |
367685 | # wujian@2018
import os
import json
def dump_json(obj, fdir, name):
"""
Dump python object in json
"""
if fdir and not os.path.exists(fdir):
os.makedirs(fdir)
with open(os.path.join(fdir, name), "w") as f:
json.dump(obj, f, indent=4, sort_keys=False)
| StarcoderdataPython |
5142421 | <reponame>dansuh17/deep-supervised-hashing<filename>model.py
import torch
from torch import nn
import torch.nn.functional as F
class ResBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int, stride=1):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(
in_channels=in_channels, out_channels=out_channels,
kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
)
self.downsample_layer = None
self.do_downsample = False
if in_channels != out_channels or stride != 1:
self.do_downsample = True
self.downsample_layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, stride, 1, bias=False),
nn.BatchNorm2d(out_channels),
)
# initialize weights
self.apply(self.init_weights)
def forward(self, x):
identity = x
out = self.net(x)
if self.do_downsample:
identity = self.downsample_layer(x)
return F.relu(out + identity, inplace=True)
@staticmethod
def init_weights(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
class ResNet(nn.Module):
def __init__(self, num_classes: int):
super().__init__()
self.net = nn.Sequential(
ResBlock(in_channels=1, out_channels=16),
ResBlock(in_channels=16, out_channels=16),
ResBlock(in_channels=16, out_channels=16, stride=2),
)
self.linear_input_size = 3136
self.linear = nn.Linear(self.linear_input_size, num_classes)
# initialize weights
self.apply(self.init_weights)
def forward(self, x):
x = self.net(x)
x = x.view(-1, self.linear_input_size)
return self.linear(x)
@staticmethod
def init_weights(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
class LiuDSH(nn.Module):
def __init__(self, code_size: int):
super().__init__()
resnet = ResNet(num_classes=10)
resnet.linear = nn.Linear(
in_features=resnet.linear_input_size, out_features=code_size)
self.net = resnet
# initialize weights
self.apply(self.init_weights)
def forward(self, x):
return self.net(x)
@staticmethod
def init_weights(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
if __name__ == '__main__':
dummy_tensor = torch.randn((10, 1, 28, 28))
dsh = LiuDSH(code_size=11)
print(dsh)
print(dsh(dummy_tensor).size())
| StarcoderdataPython |
156596 | #!/usr/bin/env python
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@summary: Provides utilties to install/uninstall the given procedural language using gppkg utility.
"""
import sys
import os
import traceback
import time
import re
import tinctest
from mpp.lib.gpdbSystem import GpdbSystem
from mpp.lib.gpSystem import GpSystem
from mpp.lib.PSQL import PSQL
from mpp.lib.mppUtil import hasExpectedStr
from tinctest.lib import local_path, run_shell_command
class ProceduralLanguage:
"""
@summary: Base test class for procedural languages.
"""
def __init__(self):
self.__setSystemInfo()
def __setSystemInfo(self):
"""
@summary: Internal function. Sets attributes for this test using information from current GPDB system. Used in constructor.
"""
system = GpSystem()
gpdb = GpdbSystem()
self.gppkg_os = system.GetOS().lower() + system.GetOSMajorVersion()
self.gppkg_platform = system.GetArchitecture()
self.gppkg_branch = gpdb.GetGpdbVersion()[0]
self.gppkg_build = gpdb.GetGpdbVersion()[1]
def language_in_db(self, language_name, dbname=os.environ.get('PGDATABASE', 'gptest')):
"""
@summary: Checks if a given procedural language is defined in a given database.
@param language_name: The name of the procedural language, e.g. plperl
@param dbname: Optional. The name of the database. If not specified, uses PGDATABASE if defined in environment or gptest if not.
@return: True if language is found in pg_language, False otherwise
"""
sql = "select lanname from pg_language where lanname = '%s'" % language_name
result = PSQL.run_sql_command(sql_cmd = sql, flags = '-q -t', dbname=dbname)
tinctest.logger.info("Running command - %s" %sql)
if len(result.strip()) > 0:
return True
else:
return False
def create_language_in_db(self, language_name, dbname=os.environ.get('PGDATABASE', 'gptest')):
"""
@summary: Creates a procedural language from a given database
@param language_name: The name of the procedural language to added, e.g. plperl
@param dbname: Optional. The name of the database. If not specified, uses PGDATABASE if defined in environment or gptest if not.
@return: list - (True, output of sql) if language is added, (False, output of sql) otherwise
@raise Exception: If sql returns an error
"""
sql = 'create language %s' % language_name
result = PSQL.run_sql_command(sql_cmd = sql, flags = '-q -t', dbname=dbname)
tinctest.logger.info("Running command - %s" %sql)
if 'error' in result.lower():
return False
else:
return True
| StarcoderdataPython |
4913536 | <reponame>dukundejeanne/neighbourhood_django<gh_stars>0
from django.db import models
from django.contrib.auth.models import User
from tinymce.models import HTMLField
from django.core.validators import MaxValueValidator
# Create your models here.
class Neighbour(models.Model):
name=models.CharField(max_length=30)
location=models.TextField(max_length=300)
image=models.ImageField(upload_to='images_galleries/')
# user=models.ForeignKey(User,on_delete=models.CASCADE, blank=True,related_name="images")
count=models.IntegerField(default=0,blank=True)
def create_neighbourhood(self):
self.save()
def delete_neighbourhood(self):
self.delete()
@classmethod
def filter_by_busines_id(cls,id):
hood=cls.objects.filter(id=id)
return hood
def __str__(self):
return self.name
class Profile(models.Model):
class Meta:
db_table='profile'
# profile_pic=models.ImageField(upload_to='picture/',null=True,blank=True)
# user=models.OneToOneField(User, on_delete=models.CASCADE,blank=True,related_name="profile")
# bio=models.TextField(max_length=200,null=True,default="bio")
# contact=models.TextField(max_length=200,null=True)
profile_pic=models.ImageField(upload_to='picture/',null=True,blank=True)
user=models.OneToOneField(User, on_delete=models.CASCADE,blank=True,related_name="profile")
name=models.CharField(max_length=200,null=True,default="bio")
email=models.EmailField(max_length=200,null=True)
location=models.CharField(max_length=100,null=True)
neighborhood=models.ForeignKey(Neighbour, on_delete=models.CASCADE,null=True)
def save_prof(self):
self.save()
def delete_prof(self):
self.delete()
@classmethod
def get_by_id(cls,id):
profile=cls.objects.get(user=id)
return profile
@classmethod
def find_by_id(cls,id):
profile=cls.objects.filter(user=id).first()
return profile
def __str__(self):
return self.user.username
class NewsLetterRecients(models.Model):
name=models.CharField(max_length=30)
email=models.EmailField()
class Post(models.Model):
title=models.CharField(max_length=100)
content=models.TextField(max_length=100)
location=models.ForeignKey(Neighbour,on_delete=models.CASCADE)
user=models.ForeignKey(User,on_delete=models.CASCADE)
date_posted=models.DateTimeField(auto_now_add=True)
def save_post(Self):
self.save()
@classmethod
def get_location_contacts(cls,location):
contacts=Post.objects.filter(location__pk=location)
return contacts
def __str__(self):
return f'{self.title},{self.post_hood.neighborhood_name}'
class Business(models.Model):
owner=models.CharField(max_length=40)
bussiness=models.CharField(max_length=200)
location=models.ForeignKey(Neighbour, on_delete=models.CASCADE,null=True)
user=models.ForeignKey(User, on_delete=models.CASCADE,null=True)
# comment_image=models.ForeignKey(Project,on_delete=models.CASCADE,null=True)
bussiness_email=models.EmailField(max_length=200,null=True)
post_date=models.DateTimeField(auto_now_add=True)
def create_bussiness(self):
self.save()
def delete_bussiness(self):
self.save()
@classmethod
def search_by_business(cls,search_term):
search_term=cls.objects.filter(bussiness__icontains=search_term)
return search_term
def __str__(self):
return self.owner
class Rates(models.Model):
design=models.PositiveIntegerField(default=0,validators=[MaxValueValidator(10)])
usability=models.PositiveIntegerField(default=0,validators=[MaxValueValidator(10)])
content=models.PositiveIntegerField(default=0,validators=[MaxValueValidator(10)])
user =models.ForeignKey(User,on_delete=models.CASCADE)
project=models.IntegerField(default=0) | StarcoderdataPython |
8131727 | <gh_stars>0
import typing as T
from contextlib import contextmanager
from pathlib import Path
from click.utils import LazyFile
OpenFileLike = T.Union[T.TextIO, LazyFile]
FileLike = T.Union[OpenFileLike, Path, str]
@contextmanager
def open_file_like(
file_like: T.Optional[FileLike], mode, **kwargs
) -> T.ContextManager[T.Tuple[str, OpenFileLike]]:
"""Look at a file-like or path-like object and open it
Returns a) the path it was given OR a best-guess of the path and
b) the open file object
Used properly as a context manager, it will close the file later
if appropriate.
"""
if not file_like:
yield None, None
if isinstance(file_like, str):
file_like = Path(file_like)
if isinstance(file_like, Path):
with file_like.open(mode, **kwargs) as f:
yield file_like, f
elif hasattr(file_like, "name"):
yield file_like.name, file_like
elif hasattr(file_like, "read"):
yield None, file_like
| StarcoderdataPython |
1607384 | <gh_stars>0
"""
Given an array of integers, find the pair of adjacent elements that has the largest product and return that product.
Example
For inputArray = [3, 6, -2, -5, 7, 3], the output should be
adjacentElementsProduct(inputArray) = 21.
7 and 3 produce the largest product.
Input/Output
[execution time limit] 4 seconds (py3)
[input] array.integer inputArray
An array of integers containing at least two elements.
Guaranteed constraints:
2 ≤ inputArray.length ≤ 10,
-1000 ≤ inputArray[i] ≤ 1000.
[output] integer
The largest product of adjacent elements.
[Python 3] Syntax Tips
"""
def adjacentElementsProduct(inputArray):
max = inputArray[0] * inputArray[1]
for i in range(len(inputArray)-1):
product = inputArray[i] * inputArray[i+1]
if product >= max:
max = product
return max
inputArray = [3, 6, -2, -5, 7, 3]
print(adjacentElementsProduct(inputArray))# = 21. | StarcoderdataPython |
154651 | """
LC89. Gray Code
The gray code is a binary numeral system where two successive values differ in only one bit.
Given a non-negative integer n representing the total number of bits in the code, print the sequence of gray code. A gray code sequence must begin with 0.
Example 1:
Input: 2
Output: [0,1,3,2]
Explanation:
00 - 0
01 - 1
11 - 3
10 - 2
For a given n, a gray code sequence may not be uniquely defined.
For example, [0,2,3,1] is also a valid gray code sequence.
00 - 0
10 - 2
11 - 3
01 - 1
Example 2:
Input: 0
Output: [0]
Explanation: We define the gray code sequence to begin with 0.
A gray code sequence of n has size = 2n, which for n = 0 the size is 20 = 1.
Therefore, for n = 0 the gray code sequence is [0].
"""
# Runtime: 40 ms, faster than 22.57% of Python3 online submissions for Gray Code.
# Memory Usage: 14.7 MB, less than 5.26% of Python3 online submissions for Gray Code.
class Solution:
def grayCode(self, n: int) -> List[int]:
if n == 0:
return [0]
res = {}
curr = "0" * n
self.dfs(res, curr, n, 0)
return [int(key, 2) for key,_ in sorted(res.items(), key=lambda x:x[1])]
def dfs(self, res, curr, n, index):
res[curr] = index
for i in range(n):
if curr[i] == "0":
tmp = curr[:i] + "1" + curr[i+1:]
else:
tmp = curr[:i] + "0" + curr[i+1:]
if tmp in res:
continue
self.dfs(res, tmp, n, index+1)
break
| StarcoderdataPython |
6567646 | from __future__ import absolute_import
from sentry.testutils import AcceptanceTestCase
FEATURE_NAME = "organizations:events"
class OrganizationEventsTest(AcceptanceTestCase):
def setUp(self):
super(OrganizationEventsTest, self).setUp()
self.user = self.create_user("<EMAIL>")
self.org = self.create_organization(owner=None, name="<NAME>")
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.create_member(user=self.user, organization=self.org, role="owner", teams=[self.team])
self.login_as(self.user)
self.path = u"/organizations/{}/events/".format(self.org.slug)
def test_no_access(self):
self.browser.get(self.path)
self.browser.wait_until_not(".loading-indicator")
self.browser.snapshot("global events - no access")
def test_events_empty(self):
with self.feature(FEATURE_NAME):
self.browser.get(self.path)
self.browser.wait_until_not(".loading-indicator")
self.browser.wait_until_not('[data-test-id="events-request-loading"]')
self.browser.snapshot("global events - empty")
| StarcoderdataPython |
5068422 | <filename>xfdnn/tools/compile/network/__init__.py
#!/usr/bin/env python
#
# // SPDX-License-Identifier: BSD-3-CLAUSE
#
# (C) Copyright 2018, Xilinx, Inc.
#
import os, sys
for d in ["codegeneration","graph","memory","network","optimizations", "weights","version","tests"]:
path = "%s/../%s" % (os.path.dirname(os.path.realpath(__file__)), d)
sys.path.insert(0, path)
| StarcoderdataPython |
8095794 | from telethon.tl.custom.message import Message
from ..Filter import Filter
class All(Filter):
def valid(self, msg: Message ) -> bool:
return True
| StarcoderdataPython |
6603384 | import gi
gi.require_version('Ahoviewer', '1.0')
gi.require_version('Gtk', '3.0')
from gi.repository import Ahoviewer, GObject, Gtk
class PythonHelloPlugin(Ahoviewer.WindowAbstract):
# This is just an exmaple of using the open_file member function, using a dialog
# here is obviously redundant because ahoviewer has it's own file chooser dialog.
# A more practical plugin that uses the WindowAbstract class could be something
# like a local (or online) manga library, similar to mcomix's library.
def do_activate(self):
self.dialog = Gtk.FileChooserDialog(
title="Please choose a file",
action=Gtk.FileChooserAction.OPEN)
self.dialog.add_buttons(
Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN,
Gtk.ResponseType.OK)
response = self.dialog.run()
if response == Gtk.ResponseType.OK:
self.open_file(self.dialog.get_filename())
self.dialog.destroy()
def do_deactivate(self):
pass
| StarcoderdataPython |
11346743 | <filename>controle_colaboradores_api/apps/usuarios/management/commands/criar_grupos_do_projeto.py
from django.core.management.base import BaseCommand
from django.db import transaction
from django.conf import settings
from django.contrib.auth.models import Group
class Command(BaseCommand):
help = "Cria ou confirma a criação dos grupos de usuários necessários ao funcionamento do projeto."
@transaction.atomic
def handle(self, *args, **options):
grupos = settings.USER_GROUPS_DO_PROJETO
for grupo in grupos:
obj, created = Group.objects.get_or_create(name=grupo)
if created:
self.stdout.write(self.style.SUCCESS(f"Grupo '{grupo}' criado."))
else:
self.stdout.write(f"Grupo '{grupo}' não precisou ser criado pois já existe.")
return "Fim da execução bem sucedida."
| StarcoderdataPython |
1707850 | from utils import timer_decorator
@timer_decorator
def find_min_1(array: list) -> int:
"""
O(n^2)
:param array: list of integers
:return: integer
"""
overallmin = array[0]
for i in array:
is_smallest = True
for j in array:
if i > j:
is_smallest = False
if is_smallest:
overallmin = i
return overallmin
@timer_decorator
def find_min_2(array: list) -> list:
"""
Best Case:
Worst Case: O(n)
:param array: list of integers
:return: integer
"""
min_so_far = array[0]
for i in array:
if i < min_so_far:
min_so_far = i
return min_so_far
| StarcoderdataPython |
9771868 | <gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
#Created on Tue Jul 29 10:12:58 2014
#@author: mcollado
"""
import Adafruit_DHT
import time
import sqlite3 as lite
import sys
import ConfigParser
import os
# If ConfigParser code fails this values are hardcoded
# To be removed when code works
sensor = Adafruit_DHT.AM2302
pin = 4
config = ConfigParser.RawConfigParser()
config.read(os.path.join(os.curdir,'tempsensor.cfg'))
# getfloat() raises an exception if the value is not a float
# getint() and getboolean() also do this for their respective types
sensor = config.get('Sensor', 'sensor')
pin = config.getint('Sensor', 'pin')
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
# Note that sometimes you won't get a reading and
# the results will be null (because Linux can't
# guarantee the timing of calls to read the sensor).
# If this happens try again!
if humidity is not None and temperature is not None:
#print time.strftime("%Y-%m-%d %H:%M:%S") + ' Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity)
try:
con = lite.connect('temps.db')
cur = con.cursor()
# insert into temps(data, temp,uum) values("2014-08-01 11:10:00",'27,4','40,3');
data = "INSERT INTO temps(data, temp, hum) VALUES (\"" + time.strftime("%Y-%m-%d %H:%M:%S") + "\",'{0:0.1f}','{1:0.1f}')".format(temperature, humidity)
print data
cur.execute(data)
con.commit()
except lite.Error, e:
if con:
con.rollback()
print "Error %s:" % e.args[0]
sys.exit(1)
finally:
if con:
con.close()
else:
print 'Failed to get reading. Try again!'
| StarcoderdataPython |
6692164 | <reponame>TIFOSI528/icefall
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: <NAME>
# <NAME>)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
To run this file, do:
cd icefall/egs/tedlium3/ASR
python ./pruned_transducer_stateless/test_decoder.py
"""
import torch
from decoder import Decoder
def test_decoder():
vocab_size = 3
blank_id = 0
unk_id = 2
embedding_dim = 128
context_size = 4
decoder = Decoder(
vocab_size=vocab_size,
embedding_dim=embedding_dim,
blank_id=blank_id,
unk_id=unk_id,
context_size=context_size,
)
N = 100
U = 20
x = torch.randint(low=0, high=vocab_size, size=(N, U))
y = decoder(x)
assert y.shape == (N, U, vocab_size)
# for inference
x = torch.randint(low=0, high=vocab_size, size=(N, context_size))
y = decoder(x, need_pad=False)
assert y.shape == (N, 1, vocab_size)
def main():
test_decoder()
if __name__ == "__main__":
main()
| StarcoderdataPython |
6418281 | #!/usr/bin/env python
import os
import re
from setuptools import setup
DIRNAME = os.path.abspath(os.path.dirname(__file__))
rel = lambda *parts: os.path.abspath(os.path.join(DIRNAME, *parts))
README = open(rel('README.md')).read()
INIT_PY = open(rel('flask_cqlengine.py')).read()
VERSION = re.findall("__version__ = '([^']+)'", INIT_PY)[0]
setup(
name='flask-cqlengine',
version=VERSION,
description='Flask with cqlengine.',
long_description=README,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/chilldotcome/Flask-CQLEngine',
dependency_links=[
'git+git://github.com/chilldotcom/CQLEngine-Session#egg=CQLEngine-Session-0.1'
],
install_requires=[
'CQLEngine-Session>=0.1',
'Flask',
'cqlengine>=0.18'
],
py_modules=[
'flask_cqlengine',
],
platforms='any',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Operating System :: OS Independent',
'Topic :: Utilities',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
],
keywords='flask cqlengine',
license='BSD License',
)
| StarcoderdataPython |
70845 | <gh_stars>1-10
from datetime import date
import pytest
from quickbase_client.orm.field import QB_DATE
from quickbase_client.orm.field import QB_TEXT
from quickbase_client.orm.field import QuickBaseField
from quickbase_client.query import ast
from quickbase_client.query.ast import eq_
from quickbase_client.query.ast import qb_query_ast
from quickbase_client.query.query_base import QuickBaseQuery
mock_field = QuickBaseField(fid=18, field_type=QB_TEXT)
mock_field_2 = QuickBaseField(fid=19, field_type=QB_DATE)
class TestAstQueryBuilding:
def test_decorator_wraps_in_obj(self):
@qb_query_ast
def foo():
return "{'18'.EX.19}"
x = foo()
assert isinstance(x, QuickBaseQuery)
assert x.where == "{'18'.EX.19}"
def test_makes_string(self):
q = eq_(mock_field, 19)
assert q.where == "{'18'.EX.19}"
def test_simple_conjunction(self):
q = ast.and_(
ast.eq_(mock_field, 19),
ast.on_or_before_(mock_field_2, date(year=2020, day=7, month=2)),
)
assert "{'18'.EX.19}AND{'19'.OBF.'02-07-2020'}" in q.where
def test_combine_conjunctions(self):
q = ast.and_(
ast.or_(ast.eq_(mock_field, 19), ast.eq_(mock_field, 21)),
ast.on_or_before_(mock_field_2, date(year=2020, day=7, month=2)),
)
assert q.where == "(({'18'.EX.19}OR{'18'.EX.21})AND{'19'.OBF.'02-07-2020'})"
@pytest.mark.parametrize(
"f,op",
[
("contains_", "CT"),
("not_contains_", "XCT"),
("has_", "HAS"),
("not_has_", "XHAS"),
("eq_", "EX"),
("not_eq_", "XEX"),
("true_", "TV"),
("starts_with_", "SW"),
("not_starts_width_", "XSW"),
("before_", "BF"),
("on_or_before_", "OBF"),
("after_", "AF"),
("on_or_after_", "OAF"),
("during_", "IR"),
("not_during_", "XIR"),
("lt_", "LT"),
("lte_", "LTE"),
("gt_", "GT"),
("gte_", "GTE"),
],
)
def test_all(self, f, op):
f = getattr(ast, f)
q = f(mock_field, "oops")
assert q.where == f"{{'18'.{op}.'oops'}}"
| StarcoderdataPython |
1764043 | #!/usr/bin/python
import os, re
import numpy as np
import matplotlib.pyplot as plt
debug = True
def compute_rcr_parameters(area, Q_goal, P_min, P_max, P_mean, Q_mean, ratio_prox_to_distal_resistors, decay_time, C_prefactor=1.0):
tol = 1e-12
# total resistances
R_total = []
for Q in Q_goal:
R_total.append(P_mean/Q)
if debug:
print(("R_total = ", R_total))
# ratio of resistors is constant
# resistors sum to total resistance
R_d = []
for R_t in R_total:
R_d.append(R_t / (1.0 + ratio_prox_to_distal_resistors))
R_p = []
for R_dist, R_t in zip(R_d, R_total):
R_p.append(R_t - R_dist)
for R_prox, R_dist, R_t in zip(R_p, R_d, R_total):
assert abs(R_dist + R_prox - R_t) < tol
C = []
for R_dist in R_d:
C.append(-C_prefactor * decay_time / (R_dist * np.log(P_min/P_max)))
return R_p, C, R_d, R_total
def read_flow_file(file_name):
# times in first column, flows in second
# no checks for uncompliant files listed
times = []
flows = []
with open(file_name, 'r') as f:
for line in f:
times.append(float(line.split()[0]))
flows.append(float(line.split()[1]))
n_times = len(times)
beat_time = times[n_times-1]
if flows[0] != flows[n_times-1]:
raise ValueError('Expecting periodic flow, did not get it')
# remove last element, do not count it twice
times.pop()
flows.pop()
plots = False
if plots:
print("should be plotting... ")
plt.plot(times, flows)
plt.xlabel('time (s)')
plt.ylabel('flow (ml/s)')
plt.show()
t_at_min_flow = times[np.argmin(flows)]
t_at_max_flow = times[np.argmax(flows)]
assert t_at_max_flow > t_at_min_flow
Q_min = np.min(flows)
Q_max = np.max(flows)
Q_mean = np.mean(flows)
return beat_time, Q_min, Q_max, Q_mean, t_at_min_flow, t_at_max_flow
if __name__== "__main__":
if debug:
print("Debug info:")
tol = 1e-12
MMHG_TO_CGS = 1333.22368
# four outlets
P_sys = 82.66666667 * MMHG_TO_CGS
P_dia = 63.5 * MMHG_TO_CGS
P_mean = 0.7*P_dia + 0.3*P_sys # tune this value
C_prefactor = 0.1 # tune this value,
# 1 for no adjustment
# lower for faster response
# higher for slower response
P_min = P_dia
P_max = P_sys
file_name = '97_test.flow'
beat_time, Q_min, Q_max, Q_mean, t_at_min_flow, t_at_max_flow = read_flow_file(file_name)
decay_time = 0.6 * beat_time # tune this parameter, should be duration of diastole
heart_rate = 1/beat_time # beats per second
if debug:
print(("heart_rate = ", heart_rate, "beats per second, ", 60*heart_rate, " bpm"))
print(("P_dia, P_sys, P_mean = ", P_dia, P_sys, P_mean))
print(("Q_min, Q_max, Q_mean = ", Q_min, Q_max, Q_mean))
print(("t_at_min_flow = ", t_at_min_flow, ", t_at_max_flow = ", t_at_max_flow))
dt_transition = t_at_max_flow - t_at_min_flow
timescale_rd_c = 2.0*dt_transition
names = ['innominate', 'L_carotid', 'L_subclavian', 'aorta']
area = np.array([2.03203, 0.327643, 0.633362, 2.17127])
Q_goal = np.zeros(4)
Q_goal[3] = .7 # https://doi.org/10.1161/JAHA.115.002657, fig 4 bottom right
# .7 of total flow to descending aorta
Q_goal[0] = .15 # right gets half of remaining flow to upper body
area_L = area[1] + area[2] # total area of L_carotid, L_subclavian
Q_goal[1] = (area[1]/area_L) * .15
Q_goal[2] = (area[2]/area_L) * .15
# scale by total flow
Q_goal *= Q_mean
if debug:
print(("Q_goal = ", Q_goal))
# quick sanity check
assert abs(Q_mean - np.sum(Q_goal)) < tol
ratio_prox_to_distal_resistors = 77.0 / 1185.0 # constant from https://www.physiology.org/doi/10.1152/jappl.1990.69.1.112
R_p, C, R_d, R_total = compute_rcr_parameters(area, Q_goal, P_min, P_max, P_mean, Q_mean, ratio_prox_to_distal_resistors, decay_time, C_prefactor)
if debug:
print("\n\n\n")
print("name,\tr_p,\tC,\t,R_d,")
for name, R_p_tmp, C_tmp, R_d_tmp in zip(names, R_p, C, R_d):
print((name + "\t" + str('{:.6f}'.format(R_p_tmp)) + "," + str('{:.6f}'.format(C_tmp)) + "," + str('{:.6f}'.format(R_d_tmp))))
| StarcoderdataPython |
174233 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 10 09:48:18 2018
@author: a002028
"""
import yaml
import numpy as np
import pandas as pd
class YAMLwriter(dict):
"""Writer of yaml files."""
# TODO Ever used?
def __init__(self):
"""Initialize."""
super().__init__()
def _check_format(self, data):
"""Check format of data."""
if isinstance(data, dict):
return data
elif isinstance(data, pd.DataFrame):
return data.to_dict()
elif isinstance(data, np.ndarray):
raise NotImplementedError('Array to dictionary?')
# FIXME possible in-format?
else:
return None
def write_yaml(self, data, out_path='', indent=4):
"""Write to yaml-file.
Args:
data: Preferably dictionary or pd.DataFrame
out_path: Full path to file
indent (int): Indent length
"""
data = self._check_format(data)
with open(out_path, 'w') as path:
yaml.safe_dump(
data,
path,
indent=indent,
default_flow_style=False
)
| StarcoderdataPython |
3325715 | <filename>molecool/io/__init__.py<gh_stars>0
"""
IO subpackage
molssi workshop: A python package for analyzing and visualizing xyz file.
"""
# Add imports here
from .pdb import open_pdb
from .xyz import open_xyz, write_xyz | StarcoderdataPython |
4930317 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-10-24 17:59
from __future__ import unicode_literals
import django.core.files.storage
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TemporaryUpload',
fields=[
('file_id',
models.CharField(
max_length=22, primary_key=True, serialize=False,
validators=[
django.core.validators.MinLengthValidator(22)])),
('file',
models.FileField(
storage=django.core.files.storage.FileSystemStorage(
location=b'/tmp/filepond_uploads'), upload_to=b'')),
('upload_name', models.CharField(max_length=512)),
('uploaded', models.DateTimeField(auto_now_add=True)),
('upload_type',
models.CharField(
choices=[('F', 'Uploaded file data'),
('U', 'Remote file URL')],
max_length=1)),
],
),
]
| StarcoderdataPython |
1813046 | # Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import warnings
from typing import Dict, List, Optional, Union
import pandas as pd
import mlrun
import mlrun.api.schemas
from ..config import config as mlconf
from ..datastore import get_store_uri
from ..datastore.targets import (
TargetTypes,
default_target_names,
get_offline_target,
get_target_driver,
validate_target_list,
validate_target_placement,
)
from ..features import Entity, Feature
from ..model import (
DataSource,
DataTarget,
DataTargetBase,
ModelObj,
ObjectList,
VersionedObjMetadata,
)
from ..runtimes.function_reference import FunctionReference
from ..serving.states import BaseStep, RootFlowStep, previous_step
from ..utils import StorePrefix
from .common import verify_feature_set_permissions
aggregates_step = "Aggregates"
class FeatureAggregation(ModelObj):
"""feature aggregation requirements"""
def __init__(
self, name=None, column=None, operations=None, windows=None, period=None
):
self.name = name
self.column = column
self.operations = operations or []
self.windows = windows or []
self.period = period
class FeatureSetSpec(ModelObj):
def __init__(
self,
owner=None,
description=None,
entities=None,
features=None,
partition_keys=None,
timestamp_key=None,
label_column=None,
relations=None,
source=None,
targets=None,
graph=None,
function=None,
analysis=None,
engine=None,
output_path=None,
):
self._features: ObjectList = None
self._entities: ObjectList = None
self._targets: ObjectList = None
self._graph: RootFlowStep = None
self._source = None
self._engine = None
self._function: FunctionReference = None
self.owner = owner
self.description = description
self.entities: List[Union[Entity, str]] = entities or []
self.features: List[Feature] = features or []
self.partition_keys = partition_keys or []
self.timestamp_key = timestamp_key
self.relations = relations or {}
self.source = source
self.targets = targets or []
self.graph = graph
self.label_column = label_column
self.function = function
self.analysis = analysis or {}
self.engine = engine
self.output_path = output_path or mlconf.artifact_path
@property
def entities(self) -> List[Entity]:
"""feature set entities (indexes)"""
return self._entities
@entities.setter
def entities(self, entities: List[Union[Entity, str]]):
if entities:
# if the entity is a string, convert it to Entity class
for i, entity in enumerate(entities):
if isinstance(entity, str):
entities[i] = Entity(entity)
self._entities = ObjectList.from_list(Entity, entities)
@property
def features(self) -> List[Feature]:
"""feature set features list"""
return self._features
@features.setter
def features(self, features: List[Feature]):
self._features = ObjectList.from_list(Feature, features)
@property
def targets(self) -> List[DataTargetBase]:
"""list of desired targets (material storage)"""
return self._targets
@targets.setter
def targets(self, targets: List[DataTargetBase]):
self._targets = ObjectList.from_list(DataTargetBase, targets)
@property
def engine(self) -> str:
"""feature set processing engine (storey, pandas, spark)"""
return self._engine
@engine.setter
def engine(self, engine: str):
engine_list = ["pandas", "spark", "storey"]
if engine and engine not in engine_list:
raise mlrun.errors.MLRunInvalidArgumentError(
f"engine must be one of {','.join(engine_list)}"
)
self.graph.engine = "sync" if engine and engine in ["pandas", "spark"] else None
self._engine = engine
@property
def graph(self) -> RootFlowStep:
"""feature set transformation graph/DAG"""
return self._graph
@graph.setter
def graph(self, graph):
self._graph = self._verify_dict(graph, "graph", RootFlowStep)
self._graph.engine = (
"sync" if self.engine and self.engine in ["pandas", "spark"] else None
)
@property
def function(self) -> FunctionReference:
"""reference to template graph processing function"""
return self._function
@function.setter
def function(self, function):
self._function = self._verify_dict(function, "function", FunctionReference)
@property
def source(self) -> DataSource:
"""feature set data source definitions"""
return self._source
@source.setter
def source(self, source: DataSource):
self._source = self._verify_dict(source, "source", DataSource)
def require_processing(self):
return len(self._graph.steps) > 0
class FeatureSetStatus(ModelObj):
def __init__(
self,
state=None,
targets=None,
stats=None,
preview=None,
function_uri=None,
run_uri=None,
):
self.state = state or "created"
self._targets: ObjectList = None
self.targets = targets or []
self.stats = stats or {}
self.preview = preview or []
self.function_uri = function_uri
self.run_uri = run_uri
@property
def targets(self) -> List[DataTarget]:
"""list of material storage targets + their status/path"""
return self._targets
@targets.setter
def targets(self, targets: List[DataTarget]):
self._targets = ObjectList.from_list(DataTarget, targets)
def update_target(self, target: DataTarget):
self._targets.update(target)
def update_last_written_for_target(
self, target_path: str, last_written: datetime.datetime
):
for target in self._targets:
if target.path == target_path or target.path.rstrip("/") == target_path:
target.last_written = last_written
class FeatureSet(ModelObj):
"""Feature set object, defines a set of features and their data pipeline"""
kind = mlrun.api.schemas.ObjectKind.feature_set.value
_dict_fields = ["kind", "metadata", "spec", "status"]
def __init__(
self,
name: str = None,
description: str = None,
entities: List[Union[Entity, str]] = None,
timestamp_key: str = None,
engine: str = None,
):
"""Feature set object, defines a set of features and their data pipeline
example::
import mlrun.feature_store as fstore
ticks = fstore.FeatureSet("ticks", entities=["stock"], timestamp_key="timestamp")
fstore.ingest(ticks, df)
:param name: name of the feature set
:param description: text description
:param entities: list of entity (index key) names or :py:class:`~mlrun.features.FeatureSet.Entity`
:param timestamp_key: timestamp column name
:param engine: name of the processing engine (storey, pandas, or spark), defaults to storey
"""
self._spec: FeatureSetSpec = None
self._metadata = None
self._status = None
self._api_client = None
self._run_db = None
self.spec = FeatureSetSpec(
description=description,
entities=entities,
timestamp_key=timestamp_key,
engine=engine,
)
self.metadata = VersionedObjMetadata(name=name)
self.status = None
self._last_state = ""
self._aggregations = {}
@property
def spec(self) -> FeatureSetSpec:
return self._spec
@spec.setter
def spec(self, spec):
self._spec = self._verify_dict(spec, "spec", FeatureSetSpec)
@property
def metadata(self) -> VersionedObjMetadata:
return self._metadata
@metadata.setter
def metadata(self, metadata):
self._metadata = self._verify_dict(metadata, "metadata", VersionedObjMetadata)
@property
def status(self) -> FeatureSetStatus:
return self._status
@status.setter
def status(self, status):
self._status = self._verify_dict(status, "status", FeatureSetStatus)
@property
def uri(self):
"""fully qualified feature set uri"""
return get_store_uri(StorePrefix.FeatureSet, self.fullname)
@property
def fullname(self):
"""full name in the form project/name[:tag]"""
fullname = (
f"{self._metadata.project or mlconf.default_project}/{self._metadata.name}"
)
if self._metadata.tag:
fullname += ":" + self._metadata.tag
return fullname
def _override_run_db(
self, session,
):
# Import here, since this method only runs in API context. If this import was global, client would need
# API requirements and would fail.
from ..api.api.utils import get_run_db_instance
self._run_db = get_run_db_instance(session)
def _get_run_db(self):
if self._run_db:
return self._run_db
else:
return mlrun.get_run_db()
def get_target_path(self, name=None):
"""get the url/path for an offline or specified data target"""
target = get_offline_target(self, name=name)
if target:
return target.path
def set_targets(
self,
targets=None,
with_defaults=True,
default_final_step=None,
default_final_state=None,
):
"""set the desired target list or defaults
:param targets: list of target type names ('csv', 'nosql', ..) or target objects
CSVTarget(), ParquetTarget(), NoSqlTarget(), ..
:param with_defaults: add the default targets (as defined in the central config)
:param default_final_step: the final graph step after which we add the
target writers, used when the graph branches and
the end cant be determined automatically
:param default_final_state: *Deprecated* - use default_final_step instead
"""
if default_final_state:
warnings.warn(
"The default_final_state parameter is deprecated. Use default_final_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
default_final_step = default_final_step or default_final_state
if targets is not None and not isinstance(targets, list):
raise mlrun.errors.MLRunInvalidArgumentError(
"targets can only be None or a list of kinds or DataTargetBase derivatives"
)
targets = targets or []
if with_defaults:
targets.extend(default_target_names())
validate_target_list(targets=targets)
for target in targets:
kind = target.kind if hasattr(target, "kind") else target
if kind not in TargetTypes.all():
raise mlrun.errors.MLRunInvalidArgumentError(
f"target kind is not supported, use one of: {','.join(TargetTypes.all())}"
)
if not hasattr(target, "kind"):
target = DataTargetBase(target, name=str(target))
self.spec.targets.update(target)
if default_final_step:
self.spec.graph.final_step = default_final_step
def purge_targets(self, target_names: List[str] = None, silent: bool = False):
""" Delete data of specific targets
:param target_names: List of names of targets to delete (default: delete all ingested targets)
:param silent: Fail silently if target doesn't exist in featureset status """
verify_feature_set_permissions(
self, mlrun.api.schemas.AuthorizationAction.delete
)
try:
self.reload(update_spec=False)
except mlrun.errors.MLRunNotFoundError:
# If the feature set doesn't exist in DB there shouldn't be any target to delete
if silent:
return
else:
raise
if target_names:
purge_targets = ObjectList(DataTarget)
for target_name in target_names:
try:
purge_targets[target_name] = self.status.targets[target_name]
except KeyError:
if silent:
pass
else:
raise mlrun.errors.MLRunNotFoundError(
"Target not found in status (fset={0}, target={1})".format(
self.name, target_name
)
)
else:
purge_targets = self.status.targets
purge_target_names = list(purge_targets.keys())
for target_name in purge_target_names:
target = purge_targets[target_name]
driver = get_target_driver(target_spec=target, resource=self)
try:
driver.purge()
except FileNotFoundError:
pass
del self.status.targets[target_name]
self.save()
def has_valid_source(self):
"""check if object's spec has a valid (non empty) source definition"""
source = self.spec.source
return source is not None and source.path is not None and source.path != "None"
def add_entity(
self,
name: str,
value_type: mlrun.data_types.ValueType = None,
description: str = None,
labels: Optional[Dict[str, str]] = None,
):
"""add/set an entity (dataset index)
:param name: entity name
:param value_type: type of the entity (default to ValueType.STRING)
:param description: description of the entity
:param labels: label tags dict
"""
entity = Entity(name, value_type, description=description, labels=labels)
self._spec.entities.update(entity, name)
def add_feature(self, feature, name=None):
"""add/set a feature"""
self._spec.features.update(feature, name)
def link_analysis(self, name, uri):
"""add a linked file/artifact (chart, data, ..)"""
self._spec.analysis[name] = uri
@property
def graph(self):
"""feature set transformation graph/DAG"""
return self.spec.graph
def _add_agregation_to_existing(self, new_aggregation):
name = new_aggregation["name"]
if name in self._aggregations:
current_aggr = self._aggregations[name]
if current_aggr["windows"] != new_aggregation["windows"]:
raise mlrun.errors.MLRunInvalidArgumentError(
f"Aggregation with name {name} already exists but with window {current_aggr['windows']}. "
f"Please provide name for the aggregation"
)
if current_aggr["period"] != new_aggregation["period"]:
raise mlrun.errors.MLRunInvalidArgumentError(
f"Aggregation with name {name} already exists but with period {current_aggr['period']}. "
f"Please provide name for the aggregation"
)
if current_aggr["column"] != new_aggregation["column"]:
raise mlrun.errors.MLRunInvalidArgumentError(
f"Aggregation with name {name} already exists but for different column {current_aggr['column']}. "
f"Please provide name for the aggregation"
)
current_aggr["operations"] = list(
set(current_aggr["operations"] + new_aggregation["operations"])
)
return
self._aggregations[name] = new_aggregation
def add_aggregation(
self,
column,
operations,
windows,
period=None,
name=None,
step_name=None,
after=None,
before=None,
state_name=None,
):
"""add feature aggregation rule
example::
myset.add_aggregation("ask", ["sum", "max"], "1h", "10m", name="asks")
:param column: name of column/field aggregate
:param operations: aggregation operations, e.g. ['sum', 'std']
:param windows: time windows, can be a single window, e.g. '1h', '1d',
or a list of same unit windows e.g ['1h', '6h']
windows are transformed to fixed windows or
sliding windows depending whether period parameter
provided.
- Sliding window is fixed-size overlapping windows
that slides with time.
The window size determines the size of the sliding window
and the period determines the step size to slide.
Period must be integral divisor of the window size.
If the period is not provided then fixed windows is used.
- Fixed window is fixed-size, non-overlapping, gap-less window.
The window is referred to as a tumbling window.
In this case, each record on an in-application stream belongs
to a specific window. It is processed only once
(when the query processes the window to which the record belongs).
:param period: optional, sliding window granularity, e.g. '20s' '10m' '3h' '7d'
:param name: optional, aggregation name/prefix. Must be unique per feature set.If not passed,
the column will be used as name.
:param step_name: optional, graph step name
:param state_name: *Deprecated* - use step_name instead
:param after: optional, after which graph step it runs
:param before: optional, comes before graph step
"""
if isinstance(operations, str):
raise mlrun.errors.MLRunInvalidArgumentError(
"Invalid parameters provided - operations must be a list."
)
if state_name:
warnings.warn(
"The state_name parameter is deprecated. Use step_name instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
step_name = step_name or state_name
name = name or column
if isinstance(windows, str):
windows = [windows]
if isinstance(operations, str):
operations = [operations]
aggregation = FeatureAggregation(
name, column, operations, windows, period
).to_dict()
def upsert_feature(name):
if name in self.spec.features:
self.spec.features[name].aggregate = True
else:
self.spec.features[name] = Feature(name=column, aggregate=True)
step_name = step_name or aggregates_step
graph = self.spec.graph
if step_name in graph.steps:
step = graph.steps[step_name]
self._add_agregation_to_existing(aggregation)
step.class_args["aggregates"] = list(self._aggregations.values())
else:
class_args = {}
self._aggregations[aggregation["name"]] = aggregation
step = graph.add_step(
name=step_name,
after=after or previous_step,
before=before,
class_name="storey.AggregateByKey",
aggregates=[aggregation],
table=".",
**class_args,
)
for operation in operations:
for window in windows:
upsert_feature(f"{name}_{operation}_{window}")
return step
def get_stats_table(self):
"""get feature statistics table (as dataframe)"""
if self.status.stats:
return pd.DataFrame.from_dict(self.status.stats, orient="index")
def __getitem__(self, name):
return self._spec.features[name]
def __setitem__(self, key, item):
self._spec.features.update(item, key)
def plot(self, filename=None, format=None, with_targets=False, **kw):
"""generate graphviz plot"""
graph = self.spec.graph
_, default_final_step, _ = graph.check_and_process_graph(allow_empty=True)
targets = None
if with_targets:
validate_target_list(targets=targets)
validate_target_placement(graph, default_final_step, self.spec.targets)
targets = [
BaseStep(
target.kind,
after=target.after_step or default_final_step,
shape="cylinder",
)
for target in self.spec.targets
]
return graph.plot(filename, format, targets=targets, **kw)
def to_dataframe(
self,
columns=None,
df_module=None,
target_name=None,
start_time=None,
end_time=None,
time_column=None,
**kwargs,
):
"""return featureset (offline) data as dataframe
:param columns: list of columns to select (if not all)
:param df_module: py module used to create the DataFrame (pd for Pandas, dd for Dask, ..)
:param target_name: select a specific target (material view)
:param start_time: filter by start time
:param end_time: filter by end time
:param time_column: specify the time column name in the file
:param kwargs: additional reader (csv, parquet, ..) args
:return: DataFrame
"""
entities = list(self.spec.entities.keys())
if columns:
if self.spec.timestamp_key and self.spec.timestamp_key not in entities:
columns = [self.spec.timestamp_key] + columns
columns = entities + columns
driver = get_offline_target(self, name=target_name)
if not driver:
raise mlrun.errors.MLRunNotFoundError(
"there are no offline targets for this feature set"
)
return driver.as_df(
columns=columns,
df_module=df_module,
entities=entities,
start_time=start_time,
end_time=end_time,
time_column=time_column,
**kwargs,
)
def save(self, tag="", versioned=False):
"""save to mlrun db"""
db = self._get_run_db()
self.metadata.project = self.metadata.project or mlconf.default_project
tag = tag or self.metadata.tag or "latest"
as_dict = self.to_dict()
as_dict["spec"]["features"] = as_dict["spec"].get(
"features", []
) # bypass DB bug
db.store_feature_set(as_dict, tag=tag, versioned=versioned)
def reload(self, update_spec=True):
"""reload/sync the feature vector status and spec from the DB"""
feature_set = self._get_run_db().get_feature_set(
self.metadata.name, self.metadata.project, self.metadata.tag
)
if isinstance(feature_set, dict):
feature_set = FeatureSet.from_dict(feature_set)
self.status = feature_set.status
if update_spec:
self.spec = feature_set.spec
| StarcoderdataPython |
11222072 | <reponame>billyrrr/onto
from unittest import mock
from unittest.mock import Mock, patch
import flask
import pytest
from flask import Flask
from onto import auth
import flask_restful
from flask_restful import Resource, ResponseBase
from firebase_admin import auth as firebase_admin_auth
import firebase_admin
from .fixtures import CTX
def get_mock_auth_headers(uid="testuid1"):
# user = auth.get_user(uid)
# userIdToken = user.tokens_valid_after_timestamp
# # userIdToken = auth.create_custom_token(uid=uid, app=firebaseApp)
# userIdTokenMock = userIdToken
# warnings.warn("Note that userIdTokenMock is temporary and the test may fail when the token is no longer valid.")
user_id_token_mock = uid # For mocking decoding token as uid
headers = {'Authorization': user_id_token_mock}
return headers
# @pytest.mark.usefixtures("CTX")
def test_auth(CTX):
assert CTX.debug
class IntegerResource(Resource):
@auth.authenticate
def get(self, uid):
assert uid == "test_user_id_2"
flask_app = flask.app.Flask(__name__)
api = flask_restful.Api(flask_app)
api.add_resource(IntegerResource, "/int_resource")
app = flask_app.test_client()
app.get(
path="int_resource/",
headers=get_mock_auth_headers(uid="test_user_id_2"))
def test_auth_no_headers(CTX):
assert CTX.debug
class IntegerResource(Resource):
@auth.authenticate
def get(self, uid):
assert uid == "test_user_id_2"
flask_app = flask.app.Flask(__name__)
api = flask_restful.Api(flask_app)
api.add_resource(IntegerResource, "/int_resource")
app = flask_app.test_client()
res = app.get(path="/int_resource")
assert res.status_code == 401
@pytest.fixture
def vit(monkeypatch):
# mock_auth = Mock()
verify_id_token = Mock(
return_value={
"uid": "uid_1"
})
monkeypatch.setattr(
firebase_admin_auth,
"verify_id_token",
verify_id_token
)
return verify_id_token
@pytest.fixture
def MockProductionCTX(CTX, monkeypatch):
monkeypatch.setattr(
CTX.config, "TESTING", False
)
def test_authenticate(MockProductionCTX, vit):
class IntegerResource(Resource):
@auth.authenticate
def get(self, uid):
assert uid == "uid_1"
flask_app = flask.app.Flask(__name__)
api = flask_restful.Api(flask_app)
api.add_resource(IntegerResource, "/int_resource")
app = flask_app.test_client()
res = app.get(
path="/int_resource",
headers={
'Authorization': "correct_id_token_for_uid_1"
}
)
assert vit.call_args[0][0] == "correct_id_token_for_uid_1"
def test_authenticate_testing_config(CTX, vit):
""" Tests that auth is skipped when CTX.testing is True
:param CTX:
:param vit:
:return:
"""
class IntegerResource(Resource):
@auth.authenticate
def get(self, uid):
assert uid == "uid_1"
flask_app = flask.app.Flask(__name__)
api = flask_restful.Api(flask_app)
api.add_resource(IntegerResource, "/int_resource")
app = flask_app.test_client()
res = app.get(
path="/int_resource",
headers={
'Authorization': "uid_1"
}
)
assert vit.call_count == 0
@pytest.fixture
def vit_bomb(monkeypatch):
def auth_bomb(*args, **kwargs):
raise firebase_admin_auth.RevokedIdTokenError("mock message")
verify_id_token = Mock(wraps=auth_bomb)
monkeypatch.setattr(
firebase_admin_auth,
"verify_id_token",
verify_id_token
)
return verify_id_token
def test_authenticate_error(MockProductionCTX, vit_bomb):
class IntegerResource(Resource):
@auth.authenticate
def get(self, uid):
assert uid == "uid_1"
flask_app = flask.app.Flask(__name__)
api = flask_restful.Api(flask_app)
api.add_resource(IntegerResource, "/int_resource")
app = flask_app.test_client()
res = app.get(
path="/int_resource",
headers={
'Authorization': "correct_id_token_for_uid_1"
}
)
assert res.status_code == 401
| StarcoderdataPython |
6412320 | from flask import Flask, jsonify, request
# from flask.ext.store import Store
import json
from .DataUploadAPI import data_upload_api
from .DataReqAPI import data_req_api
from .NewTaskAPI import new_task_api
from .GetModelAPI import get_model_api
from .InferAPI import infer_api
from .ModelUploadAPI import model_upload_api
import os
app = Flask(__name__)
# app.config['STORE_DOMAIN'] = 'http://127.0.0.1:5000'
# app.config['STORE_PATH'] = '/some/path/to/somewhere'
# store = Store(app)
app.register_blueprint(data_upload_api)
app.register_blueprint(data_req_api)
app.register_blueprint(new_task_api)
app.register_blueprint(get_model_api)
app.register_blueprint(infer_api)
app.register_blueprint(model_upload_api)
@app.route("/", methods=['GET', 'POST', 'PUT', 'DELETE'])
def home():
return "21 APIs"
@app.route("/livecheck", methods=['GET'])
def livecheck():
return "Server is online"
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0') | StarcoderdataPython |
1863856 | from django.conf.urls import url
from django.views.generic import TemplateView
from django.views.generic import RedirectView
from rest_framework_jwt.views import (obtain_jwt_token,
verify_jwt_token,
refresh_jwt_token)
from restLogin import views
urlpatterns = [
url(r'^api-token-auth', obtain_jwt_token),
url(r'^api-token-refresh', refresh_jwt_token),
url(r'^api-token-verify', verify_jwt_token),
# url(r'^api-register-user', views.CreateUserView.as_view()),
url(r'^api-login-user', views.LoginUserView.as_view()),
# url(r'^(?P<path>.*\..*)$', RedirectView.as_view(url='/static/%(path)s')),
# url(r'^', TemplateView.as_view(template_name='angular/index.html')),
] | StarcoderdataPython |
3370015 | """
A Collection of custom types for static type checking
"""
from typing import Union
RealNumber = Union[int, float]
Number = Union[RealNumber, complex]
| StarcoderdataPython |
11288411 | <filename>server/stylegan2_hypotheses_explorer/logic/evaluator/evaluator.py
from pathlib import Path
from typing import List, Type
import torch
from ...models import Evaluator as EvaluatorModel
from ..backend_lazy_loader import BackendLazyLoader
from .evaluator_backend import EvaluatorBackendT
class Evaluator(BackendLazyLoader[EvaluatorBackendT]):
def __init__(self,
model: EvaluatorModel,
backend_class: Type[EvaluatorBackendT],
backend_file: Path,
width: int,
height: int):
super().__init__()
self._model = model
self._backend_class = backend_class
self._backend_file = backend_file
self._width = width
self._height = height
@property
def model(self) -> EvaluatorModel:
return self._model
def rate(self, images: List[torch.Tensor]) -> List[float]:
return self.backend.rate_image_batch(torch.stack(images)).squeeze(1).tolist()
############################################
@property
def max_allocated_backends(self) -> int:
return 5
def construct_backend(self) -> EvaluatorBackendT:
return self._backend_class(self._width, self._height, self._backend_file)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.