input
stringlengths
2.65k
237k
output
stringclasses
1 value
""" ******************************************************************************** * Name: gen_commands.py * Author: <NAME> * Created On: 2015 * Copyright: (c) Brigham Young University 2015 * License: BSD 2-Clause ******************************************************************************** """ import os import string import random from tethys_apps.utilities import get_tethys_home_dir, get_tethys_src_dir from distro import linux_distribution from django.conf import settings from jinja2 import Template os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tethys_portal.settings") GEN_SETTINGS_OPTION = 'settings' GEN_APACHE_OPTION = 'apache' GEN_ASGI_SERVICE_OPTION = 'asgi_service' GEN_NGINX_OPTION = 'nginx' GEN_NGINX_SERVICE_OPTION = 'nginx_service' GEN_PORTAL_OPTION = 'portal' GEN_SERVICES_OPTION = 'services' GEN_INSTALL_OPTION = 'install' GEN_SITE_YAML_OPTION = 'site_content' FILE_NAMES = { GEN_SETTINGS_OPTION: 'settings.py', GEN_APACHE_OPTION: 'tethys-default.conf', GEN_ASGI_SERVICE_OPTION: 'asgi_supervisord.conf', GEN_NGINX_OPTION: 'tethys_nginx.conf', GEN_NGINX_SERVICE_OPTION: 'nginx_supervisord.conf', GEN_PORTAL_OPTION: 'portal.yml', GEN_SERVICES_OPTION: 'services.yml', GEN_INSTALL_OPTION: 'install.yml', GEN_SITE_YAML_OPTION: 'site_content.yml', } VALID_GEN_OBJECTS = ( GEN_SETTINGS_OPTION, # GEN_APACHE_OPTION, GEN_ASGI_SERVICE_OPTION, GEN_NGINX_OPTION, GEN_NGINX_SERVICE_OPTION, GEN_PORTAL_OPTION, GEN_SERVICES_OPTION, GEN_INSTALL_OPTION, GEN_SITE_YAML_OPTION ) TETHYS_SRC = get_tethys_src_dir() def add_gen_parser(subparsers): # Setup generate command gen_parser = subparsers.add_parser('gen', help='Aids the installation of Tethys by automating the ' 'creation of supporting files.') gen_parser.add_argument('type', help='The type of object to generate.', choices=VALID_GEN_OBJECTS) gen_parser.add_argument('-d', '--directory', help='Destination directory for the generated object.') gen_parser.add_argument('--allowed-hosts', dest='allowed_hosts', nargs='+', help='Add one or more hostnames or IP addresses to ALLOWED_HOSTS in the settings file. ' 'e.g.: 127.0.0.1 localhost') gen_parser.add_argument('--client-max-body-size', dest='client_max_body_size', help='Populate the client_max_body_size parameter for nginx config. Defaults to "75M".') gen_parser.add_argument('--asgi-processes', dest='asgi_processes', help='The maximum number of asgi worker processes. Defaults to 4.') gen_parser.add_argument('--db-name', dest='db_name', help='Name for the Tethys database to be set in the settings file.') gen_parser.add_argument('--db-username', dest='db_username', help='Username for the Tethys Database server to be set in the settings file.') gen_parser.add_argument('--db-password', dest='db_password', help='Password for the Tethys Database server to be set in the settings file.') gen_parser.add_argument('--db-host', dest='db_host', help='Host for the Tethys Database server to be set in the settings file.') gen_parser.add_argument('--db-port', dest='db_port', help='Port for the Tethys Database server to be set in the settings file.') gen_parser.add_argument('--db-dir', dest='db_dir', help='Directory where the local Tethys Database server is created.') gen_parser.add_argument('--production', dest='production', action='store_true', help='Generate a new settings file for a production server.') gen_parser.add_argument('--open-portal', dest='open_portal', action='store_true', help='Enable open portal mode. Defaults to False') gen_parser.add_argument('--open-signup', dest='open_signup', action='store_true', help='Enable open account signup. Defaults to False') gen_parser.add_argument('--tethys-port', dest='tethys_port', help='Port for the Tethys Server to run on in production. This is used when generating the ' 'Daphne and nginx configuration files. Defaults to 8000.') gen_parser.add_argument('--overwrite', dest='overwrite', action='store_true', help='Overwrite existing file without prompting.') gen_parser.add_argument('--add-apps', dest='add_apps', nargs='+', help='Enable additional Django apps by adding them to the INSTALLED_APPS in settings.py. ' 'e.g.: grappelli django_registration') gen_parser.add_argument('--session-persist', dest='session_persist', action='store_true', help='Disable forced user logout once the browser has been closed. Defaults to False') gen_parser.add_argument('--session-warning', dest='session_warning', help='Warn user of forced logout after indicated number of seconds. Defaults to 840') gen_parser.add_argument('--session-expire', dest='session_expire', help='Force user logout after a specified number of seconds. Defaults to 900') gen_parser.add_argument('--static-root', dest='static_root', help='Path to static files directory for production configuration. ' 'Defaults to ${TETHYS_HOME}/static. ' 'Applies default if directory does not exist.') gen_parser.add_argument('--workspaces-root', dest='workspaces_root', help='Path to workspaces directory for production configuration. ' 'Defaults to ${TETHYS_HOME}/workspaces. Applies default if directory does not exist.') gen_parser.add_argument('--bypass-portal-home', dest='bypass_portal_home', action='store_true', help='Enable bypassing the Tethys Portal home page. When the home page is accessed, ' 'users are redirected to the Apps Library page. Defaults to False') gen_parser.add_argument('--add-quota-handlers', dest='add_quota_handlers', nargs='+', help='Add one or more dot-formatted paths to custom ResourceQuotaHandler classes. ' 'Defaults to tethys_quotas.handlers.workspace.WorkspaceQuotaHandler. ' 'e.g.: tethysapp.dam_inventory.dam_quota_handler.DamQuotaHandler') gen_parser.add_argument('--django-analytical', dest='django_analytical', nargs='+', help='Provide one or more KEY:VALUE pairs for django analytical options in settings.py. ' 'All VALUEs default to False. Available KEYs: CLICKMAP_TRACKER_ID, CLICKY_SITE_ID, ' 'CRAZY_EGG_ACCOUNT_NUMBER, GAUGES_SITE_ID, GOOGLE_ANALYTICS_JS_PROPERTY_ID, ' 'GOSQUARED_SITE_TOKEN, HOTJAR_SITE_ID, HUBSPOT_PORTAL_ID, INTERCOM_APP_ID, ' 'KISSINSIGHTS_ACCOUNT_NUMBER, KISSINSIGHTS_SITE_CODE, KISS_METRICS_API_KEY, ' 'MIXPANEL_API_TOKEN, OLARK_SITE_ID, OPTIMIZELY_ACCOUNT_NUMBER, PERFORMABLE_API_KEY, ' 'PIWIK_DOMAIN_PATH, PIWIK_SITE_ID, RATING_MAILRU_COUNTER_ID, SNAPENGAGE_WIDGET_ID, ' 'SPRING_METRICS_TRACKING_ID, USERVOICE_WIDGET_KEY, WOOPRA_DOMAIN, ' 'YANDEX_METRICA_COUNTER_ID. ' 'e.g.: CLICKMAP_TRACKER_ID:123456 CLICKY_SITE_ID:789123') gen_parser.add_argument('--add-backends', dest='add_backends', nargs='+', help='Add one or more authentication backends to settings.py. Provide the dot-formatted ' 'python path to a custom backend or one of the following keys: hydroshare, linkedin, ' 'google, facebook. The default backends are django.contrib.auth.backends.ModelBackend ' 'and guardian.backends.ObjectPermissionBackend. ' 'e.g.: project.backend.CustomBackend hydroshare') gen_parser.add_argument('--oauth-options', dest='oauth_options', nargs='+', help='Provide KEY:VALUE pairs of parameters for oauth providers in the settings.py. ' 'Available Keys are: SOCIAL_AUTH_GOOGLE_OAUTH2_KEY, ' 'SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET, SOCIAL_AUTH_FACEBOOK_KEY, ' 'SOCIAL_AUTH_FACEBOOK_SECRET, SOCIAL_AUTH_FACEBOOK_SCOPE, ' 'SOCIAL_AUTH_LINKEDIN_OAUTH2_KEY, SOCIAL_AUTH_LINKEDIN_OAUTH2_SECRET, ' 'SOCIAL_AUTH_HYDROSHARE_KEY, SOCIAL_AUTH_HYDROSHARE_SECRET. ' 'e.g.: SOCIAL_AUTH_GOOGLE_OAUTH2_KEY:123456 SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET:789123') gen_parser.add_argument('--channel-layer', dest='channel_layer', help='Specify a backend to handle communication between apps via websockets. ' 'The default backend is channels.layers.InMemoryChannelLayer. ' 'For production, it is recommended to install channels_redis and use ' 'channels_redis.core.RedisChannelLayer instead. ' 'A custom backend can be added using a dot-formatted path.') gen_parser.add_argument('--captcha', dest='captcha', action='store_true', help='Enable captcha verification. Choices include an image captcha or Google recaptcha. ' 'The default is True when the --production flag is used and False when it is omitted. ' 'If no recaptcha keys are provided, the image captcha will be used. See ' '--recaptcha-private-key and --recaptcha-public-key arguments.') gen_parser.add_argument('--recaptcha-private-key', dest='recaptcha_private_key', help='Provide a private key to enable Google Recaptcha. ' 'The Default is None. A private key can be obtained ' 'from https://www.google.com/recaptcha/admin') gen_parser.add_argument('--recaptcha-public-key', dest='recaptcha_public_key', help='Provide a public key to enable Google Recaptcha. ' 'The Default is None. A public key can be obtained ' 'from https://www.google.com/recaptcha/admin') gen_parser.set_defaults(func=generate_command, allowed_hosts=None, client_max_body_size='75M', asgi_processes=4, db_name='tethys_platform', db_username='tethys_default', db_password='<PASSWORD>', db_host='127.0.0.1', db_port=5436, db_dir='psql', production=False, open_portal=False, open_signup=False, tethys_port=8000, overwrite=False, add_apps=None, session_persist=False, session_warning=840, session_expire=900, bypass_portal_home=False, channel_layer='', recaptcha_private_key=None, recaptcha_public_key=None) def get_environment_value(value_name): value = os.environ.get(value_name) if value is not None: return value else: raise EnvironmentError(f'Environment value "{value_name}" must be set before generating this file.') def get_settings_value(value_name): value = getattr(settings, value_name, None) if value is not None: return value else: raise ValueError(f'Settings value "{value_name}" must be set before generating this file.') def gen_settings(args): TETHYS_HOME = get_tethys_home_dir() # Generate context variables secret_key = ''.join([random.choice(string.ascii_letters + string.digits) for _ in range(50)]) installed_apps = ['django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_gravatar', 'bootstrap3', 'termsandconditions', 'tethys_config', 'tethys_apps', 'tethys_gizmos', 'tethys_services', 'tethys_compute', 'tethys_quotas', 'social_django', 'guardian', 'session_security', 'captcha', 'snowpenguin.django.recaptcha2', 'rest_framework', 'rest_framework.authtoken', 'analytical', 'channels'] resource_quota_handlers = ['tethys_quotas.handlers.workspace.WorkspaceQuotaHandler'] django_analytical = dict(CLICKMAP_TRACKER_ID=False, CLICKY_SITE_ID=False, CRAZY_EGG_ACCOUNT_NUMBER=False, GAUGES_SITE_ID=False, GOOGLE_ANALYTICS_JS_PROPERTY_ID=False, GOSQUARED_SITE_TOKEN=False, HOTJAR_SITE_ID=False, HUBSPOT_PORTAL_ID=False, INTERCOM_APP_ID=False, KISSINSIGHTS_ACCOUNT_NUMBER=False, KISSINSIGHTS_SITE_CODE=False, KISS_METRICS_API_KEY=False, MIXPANEL_API_TOKEN=False, OLARK_SITE_ID=False, OPTIMIZELY_ACCOUNT_NUMBER=False, PERFORMABLE_API_KEY=False, PIWIK_DOMAIN_PATH=False, PIWIK_SITE_ID=False, RATING_MAILRU_COUNTER_ID=False, SNAPENGAGE_WIDGET_ID=False, SPRING_METRICS_TRACKING_ID=False, USERVOICE_WIDGET_KEY=False, WOOPRA_DOMAIN=False, YANDEX_METRICA_COUNTER_ID=False) backends = ['django.contrib.auth.backends.ModelBackend', 'guardian.backends.ObjectPermissionBackend'] custom_backends = dict(hydroshare='tethys_services.backends.hydroshare.HydroShareOAuth2', linkedin='social_core.backends.linkedin.LinkedinOAuth2', google='social_core.backends.google.GoogleOAuth2', facebook='social_core.backends.facebook.FacebookOAuth2') oauth_options = dict(SOCIAL_AUTH_GOOGLE_OAUTH2_KEY='', SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET='', SOCIAL_AUTH_FACEBOOK_KEY='', SOCIAL_AUTH_FACEBOOK_SECRET='', SOCIAL_AUTH_FACEBOOK_SCOPE='email', SOCIAL_AUTH_LINKEDIN_OAUTH2_KEY='', SOCIAL_AUTH_LINKEDIN_OAUTH2_SECRET='', SOCIAL_AUTH_HYDROSHARE_KEY='', SOCIAL_AUTH_HYDROSHARE_SECRET='') if args.add_apps and args.add_apps != ['None']: installed_apps += [i for i in args.add_apps if i not in installed_apps] if args.add_quota_handlers: resource_quota_handlers += [i for i in args.add_quota_handlers if i not in resource_quota_handlers and i != 'None'] try: session_warning = int(args.session_warning) except Exception: session_warning = 840 try: session_expire = int(args.session_expire) except Exception: session_expire = 900 if args.static_root and os.path.exists(args.static_root): static_root = args.static_root else: static_root = os.path.join(TETHYS_HOME, 'static') if args.workspaces_root and os.path.exists(args.workspaces_root): workspaces_root = args.workspaces_root else: workspaces_root = os.path.join(TETHYS_HOME, 'workspaces') if args.django_analytical: for pair in args.django_analytical: if pair != 'None': try: key, value = pair.split(':') django_analytical[key.upper()] = value except ValueError: raise ValueError('Provide key-value pairs in the form of KEY:VALUE') if args.add_backends: c = 0 for item in args.add_backends: if item != 'None': if item in custom_backends: backends.insert(c, custom_backends[item]) else: backends.insert(c, item) c += 1 if args.oauth_options: for pair in args.oauth_options: if pair != 'None': try: key, value = pair.split(':') oauth_options[key.upper()] = value except ValueError: raise ValueError('Provide key-value pairs in the form of KEY:VALUE') context = { 'secret_key': secret_key, 'allowed_hosts': args.allowed_hosts, 'db_name': args.db_name, 'db_username': args.db_username, 'db_password': args.db_password, 'db_host': args.db_host, 'db_port': args.db_port, 'db_dir': args.db_dir, 'tethys_home': TETHYS_HOME, 'production': args.production, 'open_portal': args.open_portal, 'open_signup': args.open_signup, 'installed_apps': installed_apps, 'session_expire_browser': not args.session_persist, 'session_warning': session_warning, 'session_expire': session_expire, 'static_root': static_root, 'workspaces_root': workspaces_root, 'bypass_portal_home': args.bypass_portal_home, 'resource_quota_handlers': resource_quota_handlers, 'django_analytical': django_analytical, 'backends': backends, 'oauth_options': oauth_options, 'channel_layer': args.channel_layer, 'captcha': args.captcha if args.captcha else args.production, 'recaptcha_private_key': args.recaptcha_private_key, 'recaptcha_public_key': args.recaptcha_public_key } return context def gen_nginx(args): hostname = str(settings.ALLOWED_HOSTS[0]) if len(settings.ALLOWED_HOSTS) > 0 else '127.0.0.1' workspaces_root = get_settings_value('TETHYS_WORKSPACES_ROOT') static_root = get_settings_value('STATIC_ROOT') context = { 'hostname': hostname, 'workspaces_root': workspaces_root, 'static_root': static_root, 'client_max_body_size': args.client_max_body_size, 'port': args.tethys_port } return context def gen_asgi_service(args): nginx_user = '' nginx_conf_path = '/etc/nginx/nginx.conf' if os.path.exists(nginx_conf_path): with open(nginx_conf_path, 'r') as nginx_conf: for line in nginx_conf.readlines(): tokens = line.split() if len(tokens) > 0 and tokens[0] == 'user': nginx_user = tokens[1].strip(';') break conda_home = get_environment_value('CONDA_HOME') conda_env_name = get_environment_value('CONDA_ENV_NAME') user_option_prefix = '' try: linux_distro = linux_distribution(full_distribution_name=0)[0] if linux_distro in ['redhat', 'centos']: user_option_prefix = 'http-' except Exception: pass context = { 'nginx_user': nginx_user, 'port': args.tethys_port, 'asgi_processes': args.asgi_processes, 'conda_home': conda_home, 'conda_env_name': conda_env_name, 'tethys_src': TETHYS_SRC, 'user_option_prefix': user_option_prefix } return context def gen_nginx_service(args): context = {} return context def gen_portal_yaml(args): context = {} return context def gen_services_yaml(args): context = {} return context def gen_install(args): print('Please review the generated install.yml file and fill in the
possible values are ``host-header`` and ``path-pattern`` . - **Values** *(list) --* The condition value. If the field name is ``host-header`` , you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters. * A-Z, a-z, 0-9 * - . * * (matches 0 or more characters) * ? (matches exactly 1 character) If the field name is ``path-pattern`` , you can specify a single path pattern (for example, /img/*). A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters. * A-Z, a-z, 0-9 * _ - . $ / ~ \" \' @ : + * & (using &amp;) * * (matches 0 or more characters) * ? (matches exactly 1 character) - *(string) --* - **HostHeaderConfig** *(dict) --* - **Values** *(list) --* - *(string) --* - **PathPatternConfig** *(dict) --* - **Values** *(list) --* - *(string) --* - **HttpHeaderConfig** *(dict) --* - **HttpHeaderName** *(string) --* - **Values** *(list) --* - *(string) --* - **QueryStringConfig** *(dict) --* - **Values** *(list) --* - *(dict) --* - **Key** *(string) --* - **Value** *(string) --* - **HttpRequestMethodConfig** *(dict) --* - **Values** *(list) --* - *(string) --* - **SourceIpConfig** *(dict) --* - **Values** *(list) --* - *(string) --* :type Priority: integer :param Priority: **[REQUIRED]** The rule priority. A listener can\'t have multiple rules with the same priority. :type Actions: list :param Actions: **[REQUIRED]** The actions. Each rule must include exactly one of the following types of actions: ``forward`` , ``fixed-response`` , or ``redirect`` . If the action type is ``forward`` , you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP or TLS for a Network Load Balancer. [HTTPS listeners] If the action type is ``authenticate-oidc`` , you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant. [HTTPS listeners] If the action type is ``authenticate-cognito`` , you authenticate users through the user pools supported by Amazon Cognito. [Application Load Balancer] If the action type is ``redirect`` , you redirect specified client requests from one URL to another. [Application Load Balancer] If the action type is ``fixed-response`` , you drop specified client requests and return a custom HTTP response. - *(dict) --* Information about an action. - **Type** *(string) --* **[REQUIRED]** The type of action. Each rule must include exactly one of the following types of actions: ``forward`` , ``fixed-response`` , or ``redirect`` . - **TargetGroupArn** *(string) --* The Amazon Resource Name (ARN) of the target group. Specify only when ``Type`` is ``forward`` . - **AuthenticateOidcConfig** *(dict) --* [HTTPS listeners] Information about an identity provider that is compliant with OpenID Connect (OIDC). Specify only when ``Type`` is ``authenticate-oidc`` . - **Issuer** *(string) --* **[REQUIRED]** The OIDC issuer identifier of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path. - **AuthorizationEndpoint** *(string) --* **[REQUIRED]** The authorization endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path. - **TokenEndpoint** *(string) --* **[REQUIRED]** The token endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path. - **UserInfoEndpoint** *(string) --* **[REQUIRED]** The user info endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path. - **ClientId** *(string) --* **[REQUIRED]** The OAuth 2.0 client identifier. - **ClientSecret** *(string) --* The OAuth 2.0 client secret. This parameter is required if you are creating a rule. If you are modifying a rule, you can omit this parameter if you set ``UseExistingClientSecret`` to true. - **SessionCookieName** *(string) --* The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie. - **Scope** *(string) --* The set of user claims to be requested from the IdP. The default is ``openid`` . To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP. - **SessionTimeout** *(integer) --* The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days). - **AuthenticationRequestExtraParams** *(dict) --* The query parameters (up to 10) to include in the redirect request to the authorization endpoint. - *(string) --* - *(string) --* - **OnUnauthenticatedRequest** *(string) --* The behavior if the user is not authenticated. The following are possible values: * deny- Return an HTTP 401 Unauthorized error. * allow- Allow the request to be forwarded to the target. * authenticate- Redirect the request to the IdP authorization endpoint. This is the default value. - **UseExistingClientSecret** *(boolean) --* Indicates whether to use the existing client secret when modifying a rule. If you are creating a rule, you can omit this parameter or set it to false. - **AuthenticateCognitoConfig** *(dict) --* [HTTPS listeners] Information for using Amazon Cognito to authenticate users. Specify only when ``Type`` is ``authenticate-cognito`` . - **UserPoolArn** *(string) --* **[REQUIRED]** The Amazon Resource Name (ARN) of the Amazon Cognito user pool. - **UserPoolClientId** *(string) --* **[REQUIRED]** The ID of the Amazon Cognito user pool client. - **UserPoolDomain** *(string) --* **[REQUIRED]** The domain prefix or fully-qualified domain name of the Amazon Cognito user pool. - **SessionCookieName** *(string) --* The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie. - **Scope** *(string) --* The set of user claims to be requested from the IdP. The default is ``openid`` . To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP. - **SessionTimeout** *(integer) --* The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days). - **AuthenticationRequestExtraParams** *(dict) --* The query parameters (up to 10) to include in the redirect request to the authorization endpoint. - *(string) --* - *(string) --* - **OnUnauthenticatedRequest** *(string) --* The behavior if the user is not authenticated. The following are possible values: * deny- Return an HTTP 401 Unauthorized error. * allow- Allow the request to be forwarded to the target. * authenticate- Redirect the request to the IdP authorization endpoint. This is the default value. - **Order** *(integer) --* The order for the action. This value is required for rules with multiple actions. The action with the lowest value for order is performed first. The final action to be performed must be a ``forward`` or a ``fixed-response`` action. - **RedirectConfig** *(dict) --* [Application Load Balancer] Information for creating a redirect action. Specify only when ``Type`` is ``redirect`` . - **Protocol** *(string) --* The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP. - **Port** *(string) --* The port. You can specify a value from 1 to 65535 or #{port}. - **Host** *(string) --* The hostname. This component is not percent-encoded. The hostname can contain #{host}. - **Path** *(string) --* The absolute path, starting with the leading \"/\". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}. - **Query** *(string) --* The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading \"?\", as it is automatically added. You can specify any of the reserved keywords. - **StatusCode** *(string) --* **[REQUIRED]** The HTTP redirect code. The redirect is either permanent (HTTP 301) or temporary (HTTP 302). - **FixedResponseConfig** *(dict) --* [Application Load Balancer] Information for creating an action that returns a custom HTTP response.
from the original dataset and returns a transformed version. :param target_transform: A function/transform that takes in the target and transforms it. :param transform_groups: A dictionary containing the transform groups. Transform groups are used to quickly switch between training and eval (test) transformations. This becomes useful when in need to test on the training dataset as test transformations usually don't contain random augmentations. ``AvalancheDataset`` natively supports the 'train' and 'eval' groups by calling the ``train()`` and ``eval()`` methods. When using custom groups one can use the ``with_transforms(group_name)`` method instead. Defaults to None, which means that the current transforms will be used to handle both 'train' and 'eval' groups (just like in standard ``torchvision`` datasets). :param initial_transform_group: The name of the initial transform group to be used. Defaults to None, which means that if all AvalancheDatasets in the input datasets list agree on a common group (the "current group" is the same for all datasets), then that group will be used as the initial one. If the list of input datasets does not contain an AvalancheDataset or if the AvalancheDatasets do not agree on a common group, then 'train' will be used. :param targets: The label of each pattern. Can either be a sequence of labels or, alternatively, a sequence containing sequences of labels (one for each dataset to be concatenated). Defaults to None, which means that the targets will be retrieved from the datasets (if possible). :param task_labels: The task labels for each pattern. Must be a sequence of ints, one for each pattern in the dataset. Alternatively, task labels can be expressed as a sequence containing sequences of ints (one for each dataset to be concatenated) or even a single int, in which case that value will be used as the task label for all instances. Defaults to None, which means that the dataset will try to obtain the task labels from the original datasets. If no task labels could be found for a dataset, a default task label "0" will be applied to all patterns of that dataset. :param dataset_type: The type of the dataset. Defaults to None, which means that the type will be inferred from the list of input datasets. When `dataset_type` is None and the list of datasets contains incompatible types, an error will be raised. A list of datasets is compatible if they all have the same type. Datasets that are not instances of `AvalancheDataset` and instances of `AvalancheDataset` with type `UNDEFINED` are always compatible with other types. When the `dataset_type` is different than UNDEFINED, a proper value for `collate_fn` and `targets_adapter` will be set. If the `dataset_type` is different than UNDEFINED, then `collate_fn` and `targets_adapter` must not be set. :param collate_fn: The function to use when slicing to merge single patterns. In the future this function may become the function used in the data loading process, too. If None, the constructor will check if a `collate_fn` field exists in the first dataset. If no such field exists, the default collate function will be used. Beware that the chosen collate function will be applied to all the concatenated datasets even if a different collate is defined in different datasets. :param targets_adapter: A function used to convert the values of the targets field. Defaults to None. Note: the adapter will not change the value of the second element returned by `__getitem__`. The adapter is used to adapt the values of the targets field only. """ dataset_list = list(datasets) dataset_type, collate_fn, targets_adapter = \ self._get_dataset_type_collate_and_adapter( dataset_list, dataset_type, collate_fn, targets_adapter) self._dataset_list = dataset_list self._datasets_lengths = [len(dataset) for dataset in dataset_list] self._datasets_cumulative_lengths = ConcatDataset.cumsum(dataset_list) self._overall_length = sum(self._datasets_lengths) if initial_transform_group is None: uniform_group = None for d_set in self._dataset_list: if isinstance(d_set, AvalancheDataset): if uniform_group is None: uniform_group = d_set.current_transform_group else: if uniform_group != d_set.current_transform_group: uniform_group = None break if uniform_group is None: initial_transform_group = 'train' else: initial_transform_group = uniform_group if task_labels is not None: task_labels = self._concat_task_labels(task_labels) if targets is not None: targets = self._concat_targets(targets) self._adapt_concat_datasets() super().__init__(ClassificationDataset(), # not used transform=transform, target_transform=target_transform, transform_groups=transform_groups, initial_transform_group=initial_transform_group, task_labels=task_labels, targets=targets, dataset_type=dataset_type, collate_fn=collate_fn, targets_adapter=targets_adapter) def _get_dataset_type_collate_and_adapter( self, datasets, dataset_type, collate_fn, targets_adapter): if dataset_type is not None: return dataset_type, collate_fn, targets_adapter identified_types = set() first_collate_fn = None for dataset in datasets: if isinstance(dataset, AvalancheDataset): if dataset.dataset_type != AvalancheDatasetType.UNDEFINED: identified_types.add(dataset.dataset_type) if first_collate_fn is None: first_collate_fn = getattr(dataset, 'collate_fn', None) if len(identified_types) > 1: raise ValueError( 'Error trying to infer a common dataset type while ' 'concatenating different datasets. ' 'Incompatible types: {}'.format(list(identified_types))) elif len(identified_types) == 0: dataset_type = AvalancheDatasetType.UNDEFINED else: # len(identified_types) == 1 dataset_type = next(iter(identified_types)) if dataset_type != AvalancheDatasetType.UNDEFINED and \ (collate_fn is not None or targets_adapter is not None): raise ValueError( 'dataset_type {} was inferred from the list of ' 'concatenated dataset. This dataset type can\'t be used ' 'with custom collate_fn or targets_adapter ' 'parameters. Only the UNDEFINED type supports ' 'custom collate_fn or targets_adapter ' 'parameters.'.format(dataset_type)) if collate_fn is None and \ dataset_type == AvalancheDatasetType.UNDEFINED: collate_fn = first_collate_fn return dataset_type, collate_fn, targets_adapter def __len__(self) -> int: return self._overall_length def _get_single_item(self, idx: int): dataset_idx, internal_idx = find_list_from_index( idx, self._datasets_lengths, self._overall_length, cumulative_sizes=self._datasets_cumulative_lengths) single_element = self._dataset_list[dataset_idx][internal_idx] return self._process_pattern(single_element, idx) def _fork_dataset(self: TAvalancheDataset) -> TAvalancheDataset: dataset_copy = super()._fork_dataset() dataset_copy._dataset_list = list(dataset_copy._dataset_list) # Note: there is no need to duplicate _datasets_lengths return dataset_copy def _initialize_targets_sequence( self, dataset, targets, dataset_type, targets_adapter) -> \ Sequence[TTargetType]: if targets is not None: if len(targets) != self._overall_length: raise ValueError( 'Invalid amount of target labels. It must be equal to the ' 'number of patterns in the dataset. Got {}, expected ' '{}!'.format(len(targets), self._overall_length)) return targets targets_list = [] # Could be easily done with a single line of code # This however, allows the user to better check which was the # problematic dataset by using a debugger. for dataset_idx, single_dataset in enumerate(self._dataset_list): targets_list.append(super()._initialize_targets_sequence( single_dataset, None, dataset_type, targets_adapter )) return LazyConcatTargets(targets_list) def _initialize_task_labels_sequence( self, dataset, task_labels: Optional[Sequence[int]]) \ -> Sequence[int]: if task_labels is not None: # task_labels has priority over the dataset fields if isinstance(task_labels, int): return ConstantSequence(task_labels, self._overall_length) elif len(task_labels) != self._overall_length: raise ValueError( 'Invalid amount of task labels. It must be equal to the ' 'number of patterns in the dataset. Got {}, expected ' '{}!'.format(len(task_labels), self._overall_length)) return task_labels concat_t_labels = [] for dataset_idx, single_dataset in enumerate(self._dataset_list): concat_t_labels.append(super()._initialize_task_labels_sequence( single_dataset, None)) return LazyConcatTargets(concat_t_labels) def _initialize_collate_fn(self, dataset, dataset_type, collate_fn): if collate_fn is not None: return collate_fn if len(self._dataset_list) > 0 and \ hasattr(self._dataset_list[0], 'collate_fn'): return getattr(self._dataset_list[0], 'collate_fn') return default_collate def _set_original_dataset_transform_group( self, group_name: str) -> None: for dataset_idx, dataset in enumerate(self._dataset_list): if isinstance(dataset, AvalancheDataset): if dataset.current_transform_group == group_name: # Prevents a huge slowdown in some corner cases # (apart from being actually more performant) continue self._dataset_list[dataset_idx] = \ dataset.with_transforms(group_name) def _freeze_original_dataset( self, group_name: str) -> None: for dataset_idx, dataset in enumerate(self._dataset_list): if isinstance(dataset, AvalancheDataset): self._dataset_list[dataset_idx] = \ dataset.freeze_group_transforms(group_name) def _replace_original_dataset_group( self, transform: XTransform, target_transform: YTransform) -> None: for dataset_idx, dataset in enumerate(self._dataset_list): if isinstance(dataset, AvalancheDataset): self._dataset_list[dataset_idx] = \ dataset.replace_transforms(transform, target_transform) def _add_original_dataset_group( self, group_name: str) -> None: for dataset_idx, dataset in enumerate(self._dataset_list): if isinstance(dataset, AvalancheDataset): self._dataset_list[dataset_idx] = \ dataset.add_transforms_group(group_name, None, None) def _add_groups_from_original_dataset( self, dataset, transform_groups) -> None: for dataset_idx, dataset in enumerate(self._dataset_list): if isinstance(dataset, AvalancheDataset): for original_dataset_group in dataset.transform_groups.keys(): if original_dataset_group not in transform_groups: transform_groups[original_dataset_group] = (None, None) def _adapt_concat_datasets(self): all_groups = set() for dataset in self._dataset_list: if isinstance(dataset, AvalancheDataset): all_groups.update(dataset.transform_groups.keys()) for dataset_idx, dataset in enumerate(self._dataset_list): if isinstance(dataset, AvalancheDataset): for group_name in all_groups: if group_name not in dataset.transform_groups: self._dataset_list[dataset_idx] = \ dataset.add_transforms_group(group_name, None, None) @staticmethod def _concat_task_labels(task_labels: Union[int, Sequence[int], Sequence[Sequence[int]]]): if isinstance(task_labels, int): # A single value has been passed -> use it for all instances # The value is returned as is because it's already managed when in # this form (in _initialize_task_labels_sequence). return task_labels elif isinstance(task_labels[0], int): # Flat list of task labels -> just return it. # The
all the differences between connected nodes of the graph. penalty_start : Non-negative integer. The number of columns, variables etc., to be exempt from penalisation. Equivalently, the first index to be penalised. Default is 0, all columns are included. """ def __init__(self, l=1.0, A=None, penalty_start=0): self.l = float(l) self.c = 0 self.M = A # for QuadraticConstraint self.N = A # for QuadraticConstraint self.A = A self.penalty_start = penalty_start self._lambda_max = None # TODO: Redefine grad and f, without inheritance from QuadraticConstraint # to speed up computing of f matrix-vector multiplication only needs to be # performed once, def L(self): """ Lipschitz constant of the gradient. From the interface "LipschitzContinuousGradient". """ if self.l < consts.TOLERANCE: return 0.0 lmaxA = self.lambda_max() # The (largest) Lipschitz constant of the gradient would be the operator # norm of 2A'A, which thus is the square of the largest singular value # of 2A'A. return self.l * (2 * lmaxA) ** 2 def lambda_max(self): """ Largest eigenvalue of the corresponding covariance matrix. From the interface "Eigenvalues". """ # From functions.nesterov.tv.TotalVariation.L # Note that we can save the state here since lmax(A) does not change. # TODO: This only work if the elements of self._A are scipy.sparse. We # should allow dense matrices as well. if self._lambda_max is None: from parsimony.algorithms.nipals import RankOneSparseSVD A = self.A # TODO: Add max_iter here! v = RankOneSparseSVD().run(A) # , max_iter=max_iter) us = A.dot(v) self._lambda_max = np.sum(us ** 2) return self._lambda_max class RGCCAConstraint(QuadraticConstraint, properties.ProjectionOperator): """Represents the quadratic function f(x) = l * (x'(tau * I + ((1 - tau) / n) * X'X)x - c), where tau is a given regularisation constant. The constrained version has the form x'(tau * I + ((1 - tau) / n) * X'X)x <= c. Parameters ---------- l : Non-negative float. The Lagrange multiplier, or regularisation constant, of the function. c : Float. The limit of the constraint. The function is feasible if x'(tau * I + ((1 - tau) / n) * X'X)x <= c. The default value is c=0, i.e. the default is a regularisation formulation. tau : Non-negative float. The regularisation constant. X : Numpy array, n-by-p. The associated data matrix. The first penalty_start columns will be excluded. unbiased : Boolean. Whether the sample variance should be unbiased or not. Default is True, i.e. unbiased. penalty_start : Non-negative integer. The number of columns, variables etc., to be exempt from penalisation. Equivalently, the first index to be penalised. Default is 0, all columns are included. """ def __init__(self, l=1.0, c=0.0, tau=1.0, X=None, unbiased=True, penalty_start=0): self.l = max(0.0, float(l)) self.c = float(c) self.tau = max(0.0, min(float(tau), 1.0)) if penalty_start > 0: self.X = X[:, penalty_start:] # NOTE! We slice X here! else: self.X = X self.unbiased = bool(unbiased) self.penalty_start = max(0, int(penalty_start)) self.reset() def reset(self): self._U = None self._S = None self._V = None def f(self, beta): """Function value. """ if self.penalty_start > 0: beta_ = beta[self.penalty_start:, :] else: beta_ = beta xtMx = self._compute_value(beta_) return self.l * (xtMx - self.c) def grad(self, beta): """Gradient of the function. From the interface "Gradient". """ if self.penalty_start > 0: beta_ = beta[self.penalty_start:, :] else: beta_ = beta if self.unbiased: n = float(self.X.shape[0] - 1.0) else: n = float(self.X.shape[0]) if self.tau < 1.0: XtXbeta = np.dot(self.X.T, np.dot(self.X, beta_)) grad = (self.tau * 2.0) * beta_ \ + ((1.0 - self.tau) * 2.0 / n) * XtXbeta else: grad = (self.tau * 2.0) * beta_ if self.penalty_start > 0: grad = np.vstack(np.zeros((self.penalty_start, 1)), grad) # approx_grad = utils.approx_grad(self.f, beta, eps=1e-4) # print maths.norm(grad - approx_grad) return grad def feasible(self, beta): """Feasibility of the constraint. From the interface "Constraint". """ if self.penalty_start > 0: beta_ = beta[self.penalty_start:, :] else: beta_ = beta xtMx = self._compute_value(beta_) return xtMx <= self.c def proj(self, beta, **kwargs): """The projection operator corresponding to the function. From the interface "ProjectionOperator". Examples -------- >>> import parsimony.functions.penalties as penalties >>> import numpy as np >>> np.random.seed(42) >>> >>> X = np.random.randn(10, 10) >>> x = np.random.randn(10, 1) >>> L2 = penalties.RGCCAConstraint(c=1.0, tau=1.0, X=X, unbiased=True) >>> np.abs(L2.f(x) - 5.7906381220390024) < 5e-16 True >>> y = L2.proj(x) >>> abs(L2.f(y)) <= 2.0 * consts.FLOAT_EPSILON True >>> np.abs(np.linalg.norm(y) - 0.99999999999999989) < 5e-16 True """ if self.penalty_start > 0: beta_ = beta[self.penalty_start:, :] else: beta_ = beta xtMx = self._compute_value(beta_) if xtMx <= self.c + consts.FLOAT_EPSILON: return beta n, p = self.X.shape if self.unbiased: n_ = float(n - 1.0) else: n_ = float(n) if self.tau == 1.0: sqnorm = np.dot(beta_.T, beta_) eps = consts.FLOAT_EPSILON y = beta_ * np.sqrt((self.c - eps) / sqnorm) else: if self._U is None or self._S is None or self._V is None: # self._U, self._S, self._V = np.linalg.svd(X_, full_matrices=0) # numpy.linalg.svd runs faster on the transpose. self._V, self._S, self._U = np.linalg.svd(self.X.T, full_matrices=0) self._V = self._V.T self._U = self._U.T self._S = ((1.0 - self.tau) / n_) * (self._S ** 2) + self.tau self._S = self._S.reshape((min(n, p), 1)) atilde = np.dot(self._V, beta_) atilde2 = atilde ** 2 ssdiff = np.dot(beta_.T, beta_)[0, 0] - np.sum(atilde2) atilde2lambdas = atilde2 * self._S atilde2lambdas2 = atilde2lambdas * self._S tau2 = self.tau ** 2 from parsimony.algorithms.utils import NewtonRaphson newton = NewtonRaphson(force_negative=True, parameter_positive=True, parameter_negative=False, parameter_zero=True, eps=consts.TOLERANCE, max_iter=30) class F(properties.Function, properties.Gradient): def __init__(self, tau, S, c): self.tau = tau self.S = S self.c = c self.precomp = None self.precomp_mu = None def f(self, mu): term1 = (self.tau / ((1.0 + 2.0 * mu * self.tau) ** 2)) \ * ssdiff self.precomp = 1.0 + (2.0 * mu) * self.S self.precomp_mu = mu term2 = np.sum(atilde2lambdas * (self.precomp ** -2)) return term1 + term2 - self.c def grad(self, mu): term1 = (-4.0 * tau2 \ / ((1.0 + 2.0 * mu * self.tau) ** 3.0)) * ssdiff if self.precomp is None or self.precomp_mu != mu: self.precomp = 1.0 + (2.0 * mu) * self.S term2 = -4.0 * np.sum(atilde2lambdas2 \ * (self.precomp ** -3.0)) self.precomp = None self.precomp_mu = None return term1 + term2 # if max(n, p) >= 1000: # # A rough heuristic for finding a start value. Works well in # # many cases, and when it does not work we have only lost one # # iteration and restart at 0.0. # start_mu = np.sqrt(min(n, p)) \ # / max(1.0, self.c) \ # / max(0.1, self.tau) # elif max(n, p) >= 100: # start_mu = 1.0 # else: start_mu = 0.0 mu = newton.run(F(self.tau, self._S, self.c), start_mu) # Seems to be possible because of machine precision. if mu <= consts.FLOAT_EPSILON: return beta if p > n: l2 = ((self._S - self.tau) \ * (1.0 / ((1.0 - self.tau) / n_))).reshape((n,)) a = 1.0 + 2.0 * mu * self.tau b = 2.0 * mu * (1.0 - self.tau) / n_ y = (beta_ - np.dot(self.X.T, np.dot(self._U, (np.reciprocal(l2 + (a / b)) \ * np.dot(self._U.T, np.dot(self.X, beta_)).T).T))) * (1. / a) else: # The case when n >= p l2 = ((self._S - self.tau) * (1.0 / ((1.0 - self.tau) / n_))).reshape((p,)) a = 1.0 + 2.0 * mu * self.tau b = 2.0 * mu * (1.0 - self.tau) / n_ y = np.dot(self._V.T, (np.reciprocal(a + b * l2) * atilde.T).T) if self.penalty_start > 0: y = np.vstack((beta[:self.penalty_start, :], y)) return y def _compute_value(self, beta): """Helper function to compute the function value. Note that beta must already be sliced! """ if self.unbiased: n = float(self.X.shape[0] - 1.0) else: n = float(self.X.shape[0]) Xbeta = np.dot(self.X, beta) val = self.tau * np.dot(beta.T, beta) \ + ((1.0 - self.tau) / n) * np.dot(Xbeta.T, Xbeta) return val[0, 0] class RidgeSquaredError(properties.CompositeFunction, properties.Gradient, properties.StronglyConvex, properties.Penalty, properties.ProximalOperator): """Represents a ridge squared error penalty, i.e. a representation of f(x) = l.((1 / (2 * n)) * ||Xb - y||²_2 + (k / 2) * ||b||²_2), where ||.||²_2 is the L2 norm. Parameters ---------- l : Non-negative float. The Lagrange multiplier, or regularisation constant, of the function. X
<filename>src/keri/app/agenting.py<gh_stars>0 # -*- encoding: utf-8 -*- """ KERI keri.app.agenting module """ import random from hio.base import doing from hio.core import http from hio.core.tcp import clienting from hio.help import decking from . import httping from .. import help from .. import kering from ..app import obtaining from ..core import eventing, parsing, coring from ..db import dbing logger = help.ogler.getLogger() class WitnessReceiptor(doing.DoDoer): """ Sends messages to all current witnesses of given identifier (from hab) and waits for receipts from each of those witnesses and propagates those receipts to each of the other witnesses after receiving the complete set. Removes all Doers and exits as Done once all witnesses have been sent the entire receipt set. Could be enhanced to have a `once` method that runs once and cleans up and an `all` method that runs and waits for more messages to receipt. """ def __init__(self, hab, msg=None, klas=None, **kwa): """ For the current event, gather the current set of witnesses, send the event, gather all receipts and send them to all other witnesses Parameters: hab: Habitat of the identifier to populate witnesses msg: is the message to send to all witnesses. Defaults to sending the latest KEL event if msg is None """ self.hab = hab self.msg = msg self.klas = klas if klas is not None else HttpWitnesser super(WitnessReceiptor, self).__init__(doers=[doing.doify(self.receiptDo)], **kwa) def receiptDo(self, tymth=None, tock=0.0, **opts): """ Returns doifiable Doist compatible generator method (doer dog) Usage: add result of doify on this method to doers list Parameters: tymth is injected function wrapper closure returned by .tymen() of Tymist instance. Calling tymth() returns associated Tymist .tyme. tock is injected initial tock value opts is dict of injected optional additional parameters """ self.wind(tymth) self.tock = tock _ = (yield self.tock) sn = self.hab.kever.sn wits = self.hab.kever.wits if len(wits) == 0: return True msg = self.msg if self.msg is not None else self.hab.makeOwnEvent(sn=sn) ser = coring.Serder(raw=msg) witers = [] for wit in wits: witer = self.klas(hab=self.hab, wit=wit) witers.append(witer) witer.msgs.append(bytearray(msg)) # make a copy self.extend([witer]) _ = (yield self.tock) dgkey = dbing.dgKey(ser.preb, ser.saidb) while True: wigs = self.hab.db.getWigs(dgkey) if len(wigs) == len(wits): break _ = yield self.tock # generate all rct msgs to send to all witnesses wigers = [coring.Siger(qb64b=bytes(wig)) for wig in wigs] rserder = eventing.receipt(pre=ser.pre, sn=sn, said=ser.said) rctMsg = eventing.messagize(serder=rserder, wigers=wigers) # this is a little brute forcey and can be improved by gathering receipts # along the way and passing them out as we go and only sending the # required ones here for witer in witers: witer.msgs.append(bytearray(rctMsg)) _ = (yield self.tock) total = len(witers) * 2 count = 0 while count < total: for witer in witers: count += len(witer.sent) _ = (yield self.tock) self.remove(witers) return True class WitnessInquisitor(doing.DoDoer): """ Sends messages to all current witnesses of given identifier (from hab) and waits for receipts from each of those witnesses and propagates those receipts to each of the other witnesses after receiving the complete set. Removes all Doers and exits as Done once all witnesses have been sent the entire receipt set. Could be enhanced to have a `once` method that runs once and cleans up and an `all` method that runs and waits for more messages to receipt. """ def __init__(self, hab, reger=None, msgs=None, wits=None, klas=None, **kwa): """ For all msgs, select a random witness from Habitat's current set of witnesses send the msg and process all responses (KEL replays, RCTs, etc) Parameters: hab: Habitat of the identifier to use to identify witnesses msgs: is the message buffer to process and send to one random witness. """ self.hab = hab self.reger = reger self.wits = wits self.klas = klas if klas is not None else HttpWitnesser self.msgs = msgs if msgs is not None else decking.Deck() super(WitnessInquisitor, self).__init__(doers=[doing.doify(self.msgDo)], **kwa) def msgDo(self, tymth=None, tock=1.0, **opts): """ Returns doifiable Doist compatible generator method (doer dog) Usage: add result of doify on this method to doers list """ self.wind(tymth) self.tock = tock _ = (yield self.tock) wits = self.wits if self.wits is not None else self.hab.kever.wits if len(wits) == 0: raise kering.ConfigurationError("Must be used with an identifier that has witnesses") witers = [] for wit in wits: witer = self.klas(hab=self.hab, wit=wit, lax=True, local=False) witers.append(witer) self.extend(witers) while True: while not self.msgs: yield self.tock msg = self.msgs.popleft() witer = random.choice(witers) witer.msgs.append(bytearray(msg)) yield self.tock def query(self, pre, r="logs", sn=0, **kwa): msg = self.hab.query(pre, route=r, query=dict(), **kwa) # Query for remote pre Event self.msgs.append(bytes(msg)) # bytes not bytearray so set membership compare works def telquery(self, ri, i=None, r="tels", **kwa): msg = self.hab.query(i, route=r, query=dict(ri=ri), **kwa) # Query for remote pre Event self.msgs.append(bytes(msg)) # bytes not bytearray so set membership compare works def backoffQuery(self, pre, sn=None, anc=None): backoff = BackoffWitnessQuery(hab=self.hab, pre=pre, sn=sn, anc=anc) self.extend([backoff]) def backoffTelQuery(self, ri=None, i=None): backoff = BackoffWitnessTelQuery(hab=self.hab, reger=self.reger, ri=ri, i=i, wits=self.hab.kever.wits) self.extend([backoff]) class WitnessPublisher(doing.DoDoer): """ Sends messages to all current witnesses of given identifier (from hab) and exits. Removes all Doers and exits as Done once all witnesses have been sent the message. Could be enhanced to have a `once` method that runs once and cleans up and an `all` method that runs and waits for more messages to receipt. """ def __init__(self, hab, msg, wits=None, klas=None, **kwa): """ For the current event, gather the current set of witnesses, send the event, gather all receipts and send them to all other witnesses Parameters: hab: Habitat of the identifier to populate witnesses msg: is the message to send to all witnesses. Defaults to sending the latest KEL event if msg is None """ self.hab = hab self.msg = msg self.wits = wits if wits is not None else self.hab.kever.wits self.klas = klas if klas is not None else HttpWitnesser super(WitnessPublisher, self).__init__(doers=[doing.doify(self.sendDo)], **kwa) def sendDo(self, tymth=None, tock=0.0, **opts): """ Returns doifiable Doist compatible generator method (doer dog) Usage: add result of doify on this method to doers list """ self.wind(tymth) self.tock = tock _ = (yield self.tock) if len(self.wits) == 0: return True witers = [] for wit in self.wits: witer = self.klas(hab=self.hab, wit=wit) witers.append(witer) witer.msgs.append(bytearray(self.msg)) # make a copy so everyone munges their own self.extend([witer]) _ = (yield self.tock) total = len(witers) count = 0 while count < total: for witer in witers: count += len(witer.sent) _ = (yield self.tock) self.remove(witers) return True class TCPWitnesser(doing.DoDoer): """ """ def __init__(self, hab, wit, msgs=None, sent=None, doers=None, **kwa): """ For the current event, gather the current set of witnesses, send the event, gather all receipts and send them to all other witnesses Parameters: hab: Habitat of the identifier to populate witnesses """ self.hab = hab self.wit = wit self.msgs = msgs if msgs is not None else decking.Deck() self.sent = sent if sent is not None else decking.Deck() self.parser = None doers = doers if doers is not None else [] doers.extend([doing.doify(self.receiptDo)]) self.kevery = eventing.Kevery(db=self.hab.db, **kwa) super(TCPWitnesser, self).__init__(doers=doers) def receiptDo(self, tymth=None, tock=0.0): """ Returns doifiable Doist compatible generator method (doer dog) Usage: add result of doify on this method to doers list """ self.wind(tymth) self.tock = tock _ = (yield self.tock) loc = obtaining.getwitnessbyprefix(self.wit) client = clienting.Client(host=loc.ip4, port=loc.tcp) self.parser = parsing.Parser(ims=client.rxbs, framed=True, kvy=self.kevery) clientDoer = clienting.ClientDoer(client=client) self.extend([clientDoer, doing.doify(self.msgDo)]) while True: while not self.msgs: yield self.tock msg = self.msgs.popleft() client.tx(msg) # send to connected remote while client.txbs: yield self.tock self.sent.append(msg) yield self.tock def msgDo(self, tymth=None, tock=0.0, **opts): """ Returns doifiable Doist compatible generator method (doer dog) to process incoming message stream of .kevery Doist Injected Attributes: g.tock = tock # default tock attributes g.done = None # default done state g.opts Parameters: tymth is injected function wrapper closure returned by .tymen() of Tymist instance. Calling tymth() returns associated Tymist .tyme. tock is injected initial tock value opts is dict of injected optional additional parameters Usage: add result of doify on this method to doers list """ yield from self.parser.parsator() # process messages continuously class HttpWitnesser(doing.DoDoer): """ Interacts with Witnesses on HTTP and SSE for sending events and receiving receipts """ def
<reponame>ciresdem/noslib<filename>noslib.py #!/usr/bin/env python ### noslib.py ## ## Copyright (c) 2012, 2013, 2014, 2016, 2017, 2018 <NAME> <<EMAIL>> ## ## Permission is hereby granted, free of charge, to any person obtaining a copy ## of this software and associated documentation files (the "Software"), to deal ## in the Software without restriction, including without limitation the rights ## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies ## of the Software, and to permit persons to whom the Software is furnished to do so, ## subject to the following conditions: ## ## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. ## ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, ## INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ## PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE ## FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ## ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ## ### Code: import os import sys import urllib2 from xml.dom import minidom import nos_bounds _version = '1.0.4' _license = """ version %s Copyright (c) 2012, 2013, 2014, 2016, 2017, 2018 <NAME> <<EMAIL>> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ %(_version) _nos_dtypes = ['BAG', 'DR', 'Smooth_Sheets', 'TIDES', 'project_sketches', 'Bottom_Samples', 'XML', 'GEODAS', 'GeoImagePDF', 'XYZ'] _nos_directories = ["B00001-B02000/", "D00001-D02000/", "F00001-F02000/", "H00001-H02000/", "H02001-H04000/", "H04001-H06000/", "H06001-H08000/", "H08001-H10000/", "H10001-H12000/", "H12001-H14000/", "L00001-L02000/", "L02001-L04000/", "T00001-T02000/", "W00001-W02000/"] _nos_extentions = {'BAG':[".bag.gz"], 'GEODAS': [".xyz.gz", ".a93.gz"], 'XYZ': [".xyz.gz", ".a93.gz"], 'DR':[".pdf"], 'Smooth_Sheets':[".pdf",".sid.gz",".tif.gz"], 'TIDES':[".pdf"], 'project_sketches':[".jpg"], 'Bottom_Samples':[".kml", ".txt.", ".pdf"], 'XML':[".xml"], 'GeoImagePDF':[".pdf"], 'ALL':[".xyz.gz",".bag.gz",".pdf",".sid.gz",".tif.gz",".jpg",".kml",".a93"]} _nos_xml_base_url = "https://www.ngdc.noaa.gov/metadata/published/NOAA/NESDIS/NGDC/MGG/NOS/" _nos_xml_end_url = "iso/xml/" _nos_bd_url = "https://data.ngdc.noaa.gov/platforms/ocean/nos/coast/" _nos_data_url = "https://www.ngdc.noaa.gov/nos/" _out_dir = os.path.dirname(os.path.realpath(__file__)) def _set_out_dir(out_dir): global _out_dir _out_dir = out_dir class nosBounds: def __init__(self, append=True, nb_file="nos_bounds.py"): self._append = append self._nb_file = nb_file self.nos_directories = _nos_directories self._reset() def _reset(self): reload(nos_bounds) if self._append: self.s_list = nos_bounds.nos_surveys else: self.s_list = [] def _reload(self): reload(nos_bounds) def _readDir(self, nosdir): nos_dir = urllib2.urlopen(_nos_xml_base_url+nosdir+_nos_xml_end_url).readlines() return nos_dir def _itemInSurveys(self, survey_id): for s in nos_bounds.nos_surveys: if survey_id in s: return True return False def _appendItem(self, survey_id): s = nosSurvey(survey_id) #print s._id #print s._xml_url if s._valid: print("noslib: Appending survey: %s" %(s._id)) s_entry = [s._id, s._extents, s._date] self.s_list.append(s_entry) def _updateItem(self, item): sis = False if '.xml' in item: survey_id = item[item.index(".xml\">"):item.index("</a>")][6:-4] if self._itemInSurveys(survey_id): sis = True if not sis: self._appendItem(survey_id) sis = False def _updateLines(self, item): sis = False if ".xml" in item: survey_id = item[item.index(".xml\">"):item.index("</a>")][6:-4] #print survey_id if self._itemInSurveys(survey_id): sis = True if not sis: self._appendItem(survey_id) sis = False def _updateDir(self, nosdir): sis = False survey_lines = self._readDir(nosdir) for h,item in enumerate(survey_lines): if ".xml" in item: survey_id = item[item.index(".xml\">"):item.index("</a>")][6:-4] if self._itemInSurveys(survey_id): sis = True if not sis: self._appendItem(survey_id) sis = False def _update(self): sis = False for j in self.nos_directories: survey_lines = urllib2.urlopen(_nos_xml_base_url+j+_nos_xml_end_url).readlines() for h,item in enumerate(survey_lines): if ".xml" in item: survey_id = item[item.index(".xml\">"):item.index("</a>")][6:-4] if self._itemInSurveys(survey_id): sis = True if not sis: self._appendItem(survey_id) sis = False def _write(self): nos_ufile = open(os.path.join(_out_dir,"nos_bounds.py"), 'w') print nos_ufile nos_ufile.write("nos_surveys = ") nos_ufile.write(str(self.s_list)) nos_ufile.close() class nosSurvey: def __init__(self, surveyID): self._verbose = False self._id = surveyID self.ndirs = _nos_directories self.dtypes = _nos_dtypes self._directory = self.surveyDirectory() self._xml_url = _nos_xml_base_url+self._directory+_nos_xml_end_url+self._id+".xml" self._data_url = _nos_data_url+self._directory+self._id+".html" self._dir_url = _nos_bd_url+self._directory+self._id+"/" self._valid = self.surveyp() #self._valid = True self._dtypes = self.which_nos() self._extents = self.get_extents() self._title = self.get_title() self._date = self.get_date() self._datums = self.get_datums() self._resolution = self.get_resolution() self._cancel = False def _fcancel(self): self._cancel = True def _fcanceled(self): self._cancel = False def floatp(self, val): try: float(val) return True except: return False def surveyp(self): try: self._dir_lines = urllib2.urlopen(self._dir_url).readlines() self._xml_lines = urllib2.urlopen(self._xml_url).read() self._xml_doc = minidom.parseString(self._xml_lines) #self.get_extents() return True except: self._dir_lines = [] self._xml_lines = "" self._xml_doc = None return False # output is [survey Letter, survey Number, survey NLetter] def processSurveyID(self): si = self._id sl = si[0] sn = int(si[1:6])*0.00001 snl = si[6:] if snl == "IA" or snl == "IB": snl = "" return [sl,sn,snl] def surveyDirectory(self): sl,sn,sn1 = self.processSurveyID() if sl.upper() == "B": return self.ndirs[0] elif sl.upper() == "D": return self.ndirs[1] elif sl.upper() == "F": return self.ndirs[2] elif sl.upper() == "H": if sn <= 0.02: return self.ndirs[3] elif sn > 0.02 and sn <= 0.04: return self.ndirs[4] elif sn > 0.04 and sn <= 0.06: return self.ndirs[5] elif sn > 0.06 and sn <= 0.08: return self.ndirs[6] elif sn > 0.08 and sn <= 0.10: return self.ndirs[7] elif sn > 0.10 and sn <= 0.12: return self.ndirs[8] elif sn > 0.12 and sn <= 0.14: return self.ndirs[9] elif sl.upper() == "L": if sn <= 0.02: return self.ndirs[10] elif sn > 0.02 and sn <= 0.04: return self.ndirs[11] elif sl.upper() == "T": return self.ndirs[12] elif sl.upper() == "W": return self.ndirs[13] def which_nos(self): survey_types = {} if self._valid: survey_types['XML'] = [[self._xml_url], [self._id+".xml"]] for item in self._dir_lines: for _dt in _nos_dtypes: if _dt in item: _dturls, _dtnames = [], [] _dtlines = urllib2.urlopen(self._dir_url+_dt).readlines() for _dtline in _dtlines: for exts in _nos_extentions[_dt]: if exts in _dtline: _dtfile = _dtline[_dtline.index(exts+"\">"):_dtline.index("</a>")][len(exts)+2:] _dturls.append(self._dir_url+_dt+"/"+_dtfile) _dtnames.append(_dtfile) survey_types[_dt] = [_dturls,_dtnames] return survey_types def get_extents(self): # Extent wl, el, sl, nl = -9999, -9999, -9999, -9999 if self._valid: bounding = self._xml_doc.getElementsByTagName("gmd:EX_GeographicBoundingBox") else: bounding = [] if len(bounding) > 1: for node in bounding: wl_xgc = node.getElementsByTagName("gmd:westBoundLongitude") for i in wl_xgc: wl = i.getElementsByTagName("gco:Decimal")[0].firstChild.nodeValue el_xgc = node.getElementsByTagName("gmd:eastBoundLongitude") for i in el_xgc: el = i.getElementsByTagName("gco:Decimal")[0].firstChild.nodeValue sl_xgc = node.getElementsByTagName("gmd:southBoundLatitude") for i in sl_xgc: sl = i.getElementsByTagName("gco:Decimal")[0].firstChild.nodeValue nl_xgc = node.getElementsByTagName("gmd:northBoundLatitude") for i in nl_xgc: nl = i.getElementsByTagName("gco:Decimal")[0].firstChild.nodeValue return [wl,el,sl,nl] def get_title(self): xmlt = "None" if self._valid: xml_ident = self._xml_doc.getElementsByTagName("gmd:MD_DataIdentification") else: xml_ident = [] for t in xml_ident: xml_citation = t.getElementsByTagName("gmd:CI_Citation") xml_title = xml_citation[0].getElementsByTagName("gmd:title") xmlt = xml_title[0].getElementsByTagName("gco:CharacterString")[0].firstChild.nodeValue return xmlt def get_date(self): xmld = "Unknown" if self._valid: xml_ident = self._xml_doc.getElementsByTagName("gmd:MD_DataIdentification") else: xml_ident = [] for t in xml_ident: xml_citation = t.getElementsByTagName("gmd:CI_Citation") xml_date = xml_citation[0].getElementsByTagName("gmd:date") for n in xml_date: xmld = n.getElementsByTagName("gco:Date")[0].firstChild.nodeValue return xmld def get_datums(self): hdatum, vdatum = "Unknown", "Unknown" if self._valid: datums = self._xml_doc.getElementsByTagName("gmd:MD_ReferenceSystem") else: datums = [] if len(datums) >= 1: datum_strings=[] for node in datums: datum_strings.append(node.getElementsByTagName("gco:CharacterString")[0].firstChild.nodeValue) if len(datum_strings) == 1: hdatum = datum_strings[0].strip() if len(datum_strings) == 2: hdatum = datum_strings[0].strip() vdatum = datum_strings[1].strip() return [hdatum, vdatum] def get_resolution(self): # Resolution xmres = "Unknown" if self._valid: xml_resolution = self._xml_doc.getElementsByTagName("gmd:spatialResolution") else: xml_resolution = [] if len(xml_resolution) >= 1: for node in xml_resolution: if node.nodeValue: xmres = node.getElementsByTagName("gco:Integer")[0].firstChild.nodeValue return xmres def fetch(self, dtype): if self._cancel: self._fcanceled() if dtype in self._dtypes: for h,i in enumerate(self._dtypes[dtype][0]): if self._cancel: self._fcanceled() break f = urllib2.urlopen(i) #print("Fetching %s" %(self._dtypes[dtype][1][h])) outf = open(os.path.join(_out_dir,self._dtypes[dtype][1][h]), 'wb') outf.write(f.read()) f.close() outf.close() class nosLib: def __init__(self): self._verbose = False self._dtypes = _nos_dtypes self.surveys = nos_bounds.nos_surveys self._cancel = False def _fcancel(self): self._cancel = True def _fcanceled(self): self._cancel = False def dic_key_list(self, dic): key_list = [] for i in dic: key_list.append(i) return key_list def process_ns(self): ol = [] for i in self.surveys: if "_" in i[0]: ol.append(i[0].split("_")[0]) elif "." in i[0]: ol.append(i[0].split(".")[0]) return ol def _reload_bounds(self): reload(nos_bounds) def _reset(self): self.surveys = nos_bounds.nos_surveys def _set_dtypes(self, args): self._dtypes = [] for
helper for :func:`convertFromString` that adds attributes from ``m21Attr`` to the appropriate elements in ``documentRoot``. The input is a :class:`MeiToM21Converter` with data about the file currently being processed. This function reads from ``theConverter.m21Attr`` and writes into ``theConverter.documentRoot``. :param theConverter: The object responsible for storing data about this import. :type theConverter: :class:`MeiToM21Converter`. **Example of ``m21Attr``** The ``m21Attr`` argument must be a defaultdict that returns an empty (regular) dict for non-existent keys. The defaultdict stores the @xml:id attribute of an element; the dict holds attribute names and their values that should be added to the element with the given @xml:id. For example, if the value of ``m21Attr['fe93129e']['tie']`` is ``'i'``, then this means the element with an @xml:id of ``'fe93129e'`` should have the @tie attribute set to ``'i'``. **This Preprocessor** The slur preprocessor adds all attributes from the ``m21Attr`` to the appropriate element in ``documentRoot``. In effect, it finds the element corresponding to each key in ``m21Attr``, then iterates the keys in its dict, *appending* the ``m21Attr``-specified value to any existing value. ''' environLocal.printDebug('*** concluding pre-processing') # for readability, we use a single-letter variable c = theConverter # pylint: disable=invalid-name # conclude pre-processing by adding music21-specific attributes to their respective elements for eachObject in c.documentRoot.iterfind('*//*'): # we have a defaultdict, so this "if" isn't strictly necessary; but without it, every single # element with an @xml:id creates a new, empty dict, which would consume a lot of memory if eachObject.get(_XMLID) in c.m21Attr: for eachAttr in c.m21Attr[eachObject.get(_XMLID)]: eachObject.set(eachAttr, (eachObject.get(eachAttr, '') + c.m21Attr[eachObject.get(_XMLID)][eachAttr])) # Helper Functions # ----------------------------------------------------------------------------- def _processEmbeddedElements( elements: List[Element], mapping, callerTag=None, slurBundle=None ): # noinspection PyShadowingNames ''' From an iterable of MEI ``elements``, use functions in the ``mapping`` to convert each element to its music21 object. This function was designed for use with elements that may contain other elements; the contained elements will be converted as appropriate. If an element itself has embedded elements (i.e., its converter function in ``mapping`` returns a sequence), those elements will appear in the returned sequence in order---there are no hierarchic lists. :param elements: A list of :class:`Element` objects to convert to music21 objects. :type elements: iterable of :class:`~xml.etree.ElementTree.Element` :param mapping: A dictionary where keys are the :attr:`Element.tag` attribute and values are the function to call to convert that :class:`Element` to a music21 object. :type mapping: mapping of str to function :param str callerTag: The tag of the element on behalf of which this function is processing sub-elements (e.g., 'note' or 'staffDef'). Do not include < and >. This is used in a warning message on finding an unprocessed element. :param slurBundle: A slur bundle, as used by the other :func:`*fromElements` functions. :type slurBundle: :class:`music21.spanner.SlurBundle` :returns: A list of the music21 objects returned by the converter functions, or an empty list if no objects were returned. :rtype: sequence of :class:`~music21.base.Music21Object` **Examples:** Because there is no ``'rest'`` key in the ``mapping``, that :class:`Element` is ignored. >>> from xml.etree.ElementTree import Element >>> from music21 import * >>> elements = [Element('note'), Element('rest'), Element('note')] >>> mapping = {'note': lambda x, y: note.Note('D2')} >>> mei.base._processEmbeddedElements(elements, mapping, 'doctest') [<music21.note.Note D>, <music21.note.Note D>] If debugging is enabled for the previous example, this warning would be displayed: ``mei.base: Found an unprocessed <rest> element in a <doctest>. The "beam" element holds "note" elements. All elements appear in a single level of the list: >>> elements = [Element('note'), Element('beam'), Element('note')] >>> mapping = {'note': lambda x, y: note.Note('D2'), ... 'beam': lambda x, y: [note.Note('E2') for _ in range(2)]} >>> mei.base._processEmbeddedElements(elements, mapping) [<music21.note.Note D>, <music21.note.Note E>, <music21.note.Note E>, <music21.note.Note D>] ''' processed = [] for eachElem in elements: if eachElem.tag in mapping: result = mapping[eachElem.tag](eachElem, slurBundle) if isinstance(result, (tuple, list)): for eachObject in result: processed.append(eachObject) else: processed.append(result) elif eachElem.tag not in _IGNORE_UNPROCESSED: environLocal.printDebug(_UNPROCESSED_SUBELEMENT.format(eachElem.tag, callerTag)) return processed def _timeSigFromAttrs(elem): ''' From any tag with @meter.count and @meter.unit attributes, make a :class:`TimeSignature`. :param :class:`~xml.etree.ElementTree.Element` elem: An :class:`Element` with @meter.count and @meter.unit attributes. :returns: The corresponding time signature. :rtype: :class:`~music21.meter.TimeSignature` ''' return meter.TimeSignature(f"{elem.get('meter.count')!s}/{elem.get('meter.unit')!s}") def _keySigFromAttrs(elem: Element) -> Union[key.Key, key.KeySignature]: ''' From any tag with (at minimum) either @key.pname or @key.sig attributes, make a :class:`KeySignature` or :class:`Key`, as possible. elem is an :class:`Element` with either the @key.pname or @key.sig attribute. Returns the key or key signature. ''' if elem.get('key.pname') is not None: # @key.accid, @key.mode, @key.pname # noinspection PyTypeChecker mode = elem.get('key.mode', '') step = elem.get('key.pname') accidental = _accidentalFromAttr(elem.get('key.accid')) if accidental is None: tonic = step else: tonic = step + accidental return key.Key(tonic=tonic, mode=mode) else: # @key.sig, @key.mode # If @key.mode is null, assume it is a 'major' key (default for ks.asKey) ks = key.KeySignature(sharps=_sharpsFromAttr(elem.get('key.sig'))) # noinspection PyTypeChecker return ks.asKey(mode=elem.get('key.mode', 'major')) def _transpositionFromAttrs(elem): ''' From any element with the @trans.diat and @trans.semi attributes, make an :class:`Interval` that represents the interval of transposition from written to concert pitch. :param :class:`~xml.etree.ElementTree.Element` elem: An :class:`Element` with the @trans.diat and @trans.semi attributes. :returns: The interval of transposition from written to concert pitch. :rtype: :class:`music21.interval.Interval` ''' # noinspection PyTypeChecker transDiat = int(elem.get('trans.diat', 0)) # noinspection PyTypeChecker transSemi = int(elem.get('trans.semi', 0)) # If the difference between transSemi and transDiat is greater than five per octave... # noinspection SpellCheckingInspection if abs(transSemi - transDiat) > 5 * (abs(transSemi) // 12 + 1): # ... we need to add octaves to transDiat so it's the proper size. Otherwise, # intervalFromGenericAndChromatic() tries to create things like AAAAAAAAA5. Except it # actually just fails. # NB: we test this against transSemi because transDiat could be 0 when transSemi is a # multiple of 12 *either* greater or less than 0. if transSemi < 0: transDiat -= 7 * (abs(transSemi) // 12) elif transSemi > 0: transDiat += 7 * (abs(transSemi) // 12) # NB: MEI uses zero-based unison rather than 1-based unison, so for music21 we must make every # diatonic interval one greater than it was. E.g., '@trans.diat="2"' in MEI means to # "transpose up two diatonic steps," which music21 would rephrase as "transpose up by a # diatonic third." if transDiat < 0: transDiat -= 1 elif transDiat > 0: transDiat += 1 return interval.intervalFromGenericAndChromatic(interval.GenericInterval(transDiat), interval.ChromaticInterval(transSemi)) # noinspection SpellCheckingInspection def _barlineFromAttr(attr): ''' Use :func:`_attrTranslator` to convert the value of a "left" or "right" attribute to a :class:`Barline` or :class:`Repeat` or occasionally a list of :class:`Repeat`. The only time a list is returned is when "attr" is ``'rptboth'``, in which case the end and start barlines are both returned. :param str attr: The MEI @left or @right attribute to convert to a barline. :returns: The barline. :rtype: :class:`music21.bar.Barline` or :class:`~music21.bar.Repeat` or list of them ''' # NB: the MEI Specification says @left is used only for legacy-format conversions, so we'll # just assume it's a @right attribute. Not a huge deal if we get this wrong (I hope). if attr.startswith('rpt'): if 'rptboth' == attr: return _barlineFromAttr('rptend'), _barlineFromAttr('rptstart') elif 'rptend' == attr: return bar.Repeat('end', times=2) else: return bar.Repeat('start') else: return bar.Barline(_attrTranslator(attr, 'right', _BAR_ATTR_DICT)) def _tieFromAttr(attr): ''' Convert a @tie attribute to the required :class:`Tie` object. :param str attr: The MEI @tie attribute to convert. :return: The relevant :class:`Tie` object. :rtype: :class:`music21.tie.Tie` ''' if 'm' in attr or ('t' in attr and 'i' in attr): return tie.Tie('continue') elif 'i' in attr: return tie.Tie('start') else: return tie.Tie('stop') def addSlurs(elem, obj, slurBundle): ''' If relevant, add a slur to an ``obj`` (object) that was created from an ``elem`` (element). :param elem: The :class:`Element` that caused creation of the ``obj``. :type elem: :class:`xml.etree.ElementTree.Element` :param obj: The musical object (:class:`Note`, :class:`Chord`, etc.) created from ``elem``, to which a slur might be attached. :type obj: :class:`music21.base.Music21Object` :param slurBundle: The :class:`Slur`-holding :class:`SpannerBundle` associated with the :class:`Stream` that holds ``obj``. :type slurBundle: :class:`music21.spanner.SpannerBundle` :returns: Whether at least one slur was added. :rtype: bool **A Note about Importing Slurs** Because of how the MEI format specifies slurs, the strategy required for proper import to music21 is not obvious. There are two ways to
<filename>captum/attr/_core/occlusion.py #!/usr/bin/env python3 import torch import numpy as np from .._utils.common import ( _format_input, _format_and_verify_strides, _format_and_verify_sliding_window_shapes, ) from .feature_ablation import FeatureAblation class Occlusion(FeatureAblation): def __init__(self, forward_func): r""" Args: forward_func (callable): The forward function of the model or any modification of it """ FeatureAblation.__init__(self, forward_func) self.use_weights = True def attribute( self, inputs, sliding_window_shapes, strides=None, baselines=None, target=None, additional_forward_args=None, ablations_per_eval=1, ): r"""" A perturbation based approach to computing attribution, involving replacing each contiguous rectangular region with a given baseline / reference, and computing the difference in output. For features located in multiple regions (hyperrectangles), the corresponding output differences are averaged to compute the attribution for that feature. The first patch is applied with the corner aligned with all indices 0, and strides are applied until the entire dimension range is covered. Note that this may cause the final patch applied in a direction to be cut-off and thus smaller than the target occlusion shape. More details regarding the occlusion (or grey-box / sliding window) method can be found in the original paper and in the DeepExplain implementation. https://arxiv.org/abs/1311.2901 https://github.com/marcoancona/DeepExplain/blob/master/deepexplain\ /tensorflow/methods.py#L401 Args: inputs (tensor or tuple of tensors): Input for which occlusion attributions are computed. If forward_func takes a single tensor as input, a single input tensor should be provided. If forward_func takes multiple tensors as input, a tuple of the input tensors should be provided. It is assumed that for all given input tensors, dimension 0 corresponds to the number of examples (aka batch size), and if multiple input tensors are provided, the examples must be aligned appropriately. sliding_window_shapes (tuple or tuple of tuples): Shape of patch (hyperrectangle) to occlude each input. For a single input tensor, this must be a tuple of length equal to the number of dimensions of the input tensor - 1, defining the dimensions of the patch. If the input tensor is 1-d, this should be an empty tuple. For multiple input tensors, this must be a tuple containing one tuple for each input tensor defining the dimensions of the patch for that input tensor, as described for the single tensor case. strides (int or tuple or tuple of ints or tuple of tuples, optional): This defines the step by which the occlusion hyperrectangle should be shifted by in each direction for each iteration. For a single tensor input, this can be either a single integer, which is used as the step size in each direction, or a tuple of integers matching the number of dimensions in the occlusion shape, defining the step size in the corresponding dimension. For multiple tensor inputs, this can be either a tuple of integers, one for each input tensor (used for all dimensions of the corresponding tensor), or a tuple of tuples, providing the stride per dimension for each tensor. To ensure that all inputs are covered by at least one sliding window, the stride for any dimension must be <= the corresponding sliding window dimension if the sliding window dimension is less than the input dimension. If None is provided, a stride of 1 is used for each dimension of each input tensor. Default: None baselines (scalar, tensor, tuple of scalars or tensors, optional): Baselines define reference value which replaces each feature when occluded. Baselines can be provided as: - a single tensor, if inputs is a single tensor, with exactly the same dimensions as inputs or broadcastable to match the dimensions of inputs - a single scalar, if inputs is a single tensor, which will be broadcasted for each input value in input tensor. - a tuple of tensors or scalars, the baseline corresponding to each tensor in the inputs' tuple can be: - either a tensor with exactly the same dimensions as inputs or broadcastable to match the dimensions of inputs - or a scalar, corresponding to a tensor in the inputs' tuple. This scalar value is broadcasted for corresponding input tensor. In the cases when `baselines` is not provided, we internally use zero scalar corresponding to each input tensor. Default: None target (int, tuple, tensor or list, optional): Output indices for which difference is computed (for classification cases, this is usually the target class). If the network returns a scalar value per example, no target index is necessary. For general 2D outputs, targets can be either: - a single integer or a tensor containing a single integer, which is applied to all input examples - a list of integers or a 1D tensor, with length matching the number of examples in inputs (dim 0). Each integer is applied as the target for the corresponding example. For outputs with > 2 dimensions, targets can be either: - A single tuple, which contains #output_dims - 1 elements. This target index is applied to all examples. - A list of tuples with length equal to the number of examples in inputs (dim 0), and each tuple containing #output_dims - 1 elements. Each tuple is applied as the target for the corresponding example. Default: None additional_forward_args (any, optional): If the forward function requires additional arguments other than the inputs for which attributions should not be computed, this argument can be provided. It must be either a single additional argument of a Tensor or arbitrary (non-tuple) type or a tuple containing multiple additional arguments including tensors or any arbitrary python types. These arguments are provided to forward_func in order following the arguments in inputs. For a tensor, the first dimension of the tensor must correspond to the number of examples. For all other types, the given argument is used for all forward evaluations. Note that attributions are not computed with respect to these arguments. Default: None ablations_per_eval (int, optional): Allows multiple occlusions to be included in one batch (one call to forward_fn). By default, ablations_per_eval is 1, so each occlusion is processed individually. Each forward pass will contain a maximum of ablations_per_eval * #examples samples. For DataParallel models, each batch is split among the available devices, so evaluations on each available device contain at most (ablations_per_eval * #examples) / num_devices samples. Default: 1 Returns: *tensor* or tuple of *tensors* of **attributions**: - **attributions** (*tensor* or tuple of *tensors*): The attributions with respect to each input feature. Attributions will always be the same size as the provided inputs, with each value providing the attribution of the corresponding input index. If a single tensor is provided as inputs, a single tensor is returned. If a tuple is provided for inputs, a tuple of corresponding sized tensors is returned. Examples:: >>> # SimpleClassifier takes a single input tensor of size Nx4x4, >>> # and returns an Nx3 tensor of class probabilities. >>> net = SimpleClassifier() >>> # Generating random input with size 2 x 4 x 4 >>> input = torch.randn(2, 4, 4) >>> # Defining Occlusion interpreter >>> ablator = Occlusion(net) >>> # Computes occlusion attribution, ablating each 3x3 patch, >>> # shifting in each direction by the default of 1. >>> attr = ablator.attribute(input, target=1, sliding_window_shapes=(3,3)) """ formatted_inputs = _format_input(inputs) # Formatting strides strides = _format_and_verify_strides(strides, formatted_inputs) # Formatting sliding window shapes sliding_window_shapes = _format_and_verify_sliding_window_shapes( sliding_window_shapes, formatted_inputs ) # Construct tensors from sliding window shapes sliding_window_tensors = tuple( torch.ones(window_shape, device=formatted_inputs[i].device) for i, window_shape in enumerate(sliding_window_shapes) ) # Construct counts, defining number of steps to make of occlusion block in # each dimension. shift_counts = [] for i, inp in enumerate(formatted_inputs): current_shape = np.subtract(inp.shape[1:], sliding_window_shapes[i]) # Verify sliding window doesn't exceed input dimensions. assert (np.array(current_shape) >= 0).all(), ( "Sliding window dimensions {} cannot exceed input dimensions" "{}." ).format(sliding_window_shapes[i], tuple(inp.shape[1:])) # Stride cannot be larger than sliding window for any dimension where # the sliding window doesn't cover the entire input. assert np.logical_or( np.array(current_shape) == 0, np.array(strides[i]) <= sliding_window_shapes[i], ).all(), ( "Stride dimension {} cannot be larger than sliding window " "shape dimension
atomName1="NE2" atomName2="HE21"/> <Bond atomName1="NE2" atomName2="HE22"/> <Bond atomName1="C" atomName2="O"/> <ExternalBond atomName="N"/> <ExternalBond atomName="C"/> </Residue> <Residue name="GLU"> <Atom charge="-0.5163" name="N" type="N238"/> <Atom charge="0.2936" name="H" type="H241"/> <Atom charge="0.0397" name="CA" type="C224"/> <Atom charge="0.1105" name="HA" type="H140"/> <Atom charge="0.056" name="CB" type="C136"/> <Atom charge="-0.0173" name="HB2" type="H140"/> <Atom charge="-0.0173" name="HB3" type="H140"/> <Atom charge="0.0136" name="CG" type="C274"/> <Atom charge="-0.0425" name="HG2" type="H140"/> <Atom charge="-0.0425" name="HG3" type="H140"/> <Atom charge="0.8054" name="CD" type="C271"/> <Atom charge="-0.8188" name="OE1" type="O272"/> <Atom charge="-0.8188" name="OE2" type="O272"/> <Atom charge="0.5366" name="C" type="C235"/> <Atom charge="-0.5819" name="O" type="O236"/> <Bond atomName1="N" atomName2="H"/> <Bond atomName1="N" atomName2="CA"/> <Bond atomName1="CA" atomName2="HA"/> <Bond atomName1="CA" atomName2="CB"/> <Bond atomName1="CA" atomName2="C"/> <Bond atomName1="CB" atomName2="HB2"/> <Bond atomName1="CB" atomName2="HB3"/> <Bond atomName1="CB" atomName2="CG"/> <Bond atomName1="CG" atomName2="HG2"/> <Bond atomName1="CG" atomName2="HG3"/> <Bond atomName1="CG" atomName2="CD"/> <Bond atomName1="CD" atomName2="OE1"/> <Bond atomName1="CD" atomName2="OE2"/> <Bond atomName1="C" atomName2="O"/> <ExternalBond atomName="N"/> <ExternalBond atomName="C"/> </Residue> <Residue name="GLY"> <Atom charge="-0.4157" name="N" type="N238"/> <Atom charge="0.2719" name="H" type="H241"/> <Atom charge="-0.0252" name="CA" type="C223"/> <Atom charge="0.0698" name="HA2" type="H140"/> <Atom charge="0.0698" name="HA3" type="H140"/> <Atom charge="0.5973" name="C" type="C235"/> <Atom charge="-0.5679" name="O" type="O236"/> <Bond atomName1="N" atomName2="H"/> <Bond atomName1="N" atomName2="CA"/> <Bond atomName1="CA" atomName2="HA2"/> <Bond atomName1="CA" atomName2="HA3"/> <Bond atomName1="CA" atomName2="C"/> <Bond atomName1="C" atomName2="O"/> <ExternalBond atomName="N"/> <ExternalBond atomName="C"/> </Residue> <Residue name="HID"> <Atom charge="-0.4157" name="N" type="N238"/> <Atom charge="0.2719" name="H" type="H241"/> <Atom charge="0.0188" name="CA" type="C224"/> <Atom charge="0.0881" name="HA" type="H140"/> <Atom charge="-0.0462" name="CB" type="C505"/> <Atom charge="0.0402" name="HB2" type="H140"/> <Atom charge="0.0402" name="HB3" type="H140"/> <Atom charge="-0.0266" name="CG" type="C508"/> <Atom charge="-0.3811" name="ND1" type="N503"/> <Atom charge="0.3649" name="HD1" type="H504"/> <Atom charge="0.2057" name="CE1" type="C506"/> <Atom charge="0.1392" name="HE1" type="H146"/> <Atom charge="-0.5727" name="NE2" type="N511"/> <Atom charge="0.1292" name="CD2" type="C507"/> <Atom charge="0.1147" name="HD2" type="H146"/> <Atom charge="0.5973" name="C" type="C235"/> <Atom charge="-0.5679" name="O" type="O236"/> <Bond atomName1="N" atomName2="H"/> <Bond atomName1="N" atomName2="CA"/> <Bond atomName1="CA" atomName2="HA"/> <Bond atomName1="CA" atomName2="CB"/> <Bond atomName1="CA" atomName2="C"/> <Bond atomName1="CB" atomName2="HB2"/> <Bond atomName1="CB" atomName2="HB3"/> <Bond atomName1="CB" atomName2="CG"/> <Bond atomName1="CG" atomName2="ND1"/> <Bond atomName1="CG" atomName2="CD2"/> <Bond atomName1="ND1" atomName2="HD1"/> <Bond atomName1="ND1" atomName2="CE1"/> <Bond atomName1="CE1" atomName2="HE1"/> <Bond atomName1="CE1" atomName2="NE2"/> <Bond atomName1="NE2" atomName2="CD2"/> <Bond atomName1="CD2" atomName2="HD2"/> <Bond atomName1="C" atomName2="O"/> <ExternalBond atomName="N"/> <ExternalBond atomName="C"/> </Residue> <Residue name="HIE"> <Atom charge="-0.4157" name="N" type="N238"/> <Atom charge="0.2719" name="H" type="H241"/> <Atom charge="-0.0581" name="CA" type="C224"/> <Atom charge="0.136" name="HA" type="H140"/> <Atom charge="-0.0074" name="CB" type="C505"/> <Atom charge="0.0367" name="HB2" type="H140"/> <Atom charge="0.0367" name="HB3" type="H140"/> <Atom charge="0.1868" name="CG" type="C507"/> <Atom charge="-0.5432" name="ND1" type="N511"/> <Atom charge="0.1635" name="CE1" type="C506"/> <Atom charge="0.1435" name="HE1" type="H146"/> <Atom charge="-0.2795" name="NE2" type="N503"/> <Atom charge="0.3339" name="HE2" type="H504"/> <Atom charge="-0.2207" name="CD2" type="C508"/> <Atom charge="0.1862" name="HD2" type="H146"/> <Atom charge="0.5973" name="C" type="C235"/> <Atom charge="-0.5679" name="O" type="O236"/> <Bond atomName1="N" atomName2="H"/> <Bond atomName1="N" atomName2="CA"/> <Bond atomName1="CA" atomName2="HA"/> <Bond atomName1="CA" atomName2="CB"/> <Bond atomName1="CA" atomName2="C"/> <Bond atomName1="CB" atomName2="HB2"/> <Bond atomName1="CB" atomName2="HB3"/> <Bond atomName1="CB" atomName2="CG"/> <Bond atomName1="CG" atomName2="ND1"/> <Bond atomName1="CG" atomName2="CD2"/> <Bond atomName1="ND1" atomName2="CE1"/> <Bond atomName1="CE1" atomName2="HE1"/> <Bond atomName1="CE1" atomName2="NE2"/> <Bond atomName1="NE2" atomName2="HE2"/> <Bond atomName1="NE2" atomName2="CD2"/> <Bond atomName1="CD2" atomName2="HD2"/> <Bond atomName1="C" atomName2="O"/> <ExternalBond atomName="N"/> <ExternalBond atomName="C"/> </Residue> <Residue name="HIP"> <Atom charge="-0.3479" name="N" type="N238"/> <Atom charge="0.2747" name="H" type="H241"/> <Atom charge="-0.1354" name="CA" type="C224"/> <Atom charge="0.1212" name="HA" type="H140"/> <Atom charge="-0.0414" name="CB" type="C505"/> <Atom charge="0.081" name="HB2" type="H140"/> <Atom charge="0.081" name="HB3" type="H140"/> <Atom charge="-0.0012" name="CG" type="C510"/> <Atom charge="-0.1513" name="ND1" type="N512"/> <Atom charge="0.3866" name="HD1" type="H513"/> <Atom charge="-0.017" name="CE1" type="C509"/> <Atom charge="0.2681" name="HE1" type="H146"/> <Atom charge="-0.1718" name="NE2" type="N512"/> <Atom charge="0.3911" name="HE2" type="H513"/> <Atom charge="-0.1141" name="CD2" type="C510"/> <Atom charge="0.2317" name="HD2" type="H146"/> <Atom charge="0.7341" name="C" type="C235"/> <Atom charge="-0.5894" name="O" type="O236"/> <Bond atomName1="N" atomName2="H"/> <Bond atomName1="N" atomName2="CA"/> <Bond atomName1="CA" atomName2="HA"/> <Bond atomName1="CA" atomName2="CB"/> <Bond atomName1="CA" atomName2="C"/> <Bond atomName1="CB" atomName2="HB2"/> <Bond atomName1="CB" atomName2="HB3"/> <Bond atomName1="CB" atomName2="CG"/> <Bond atomName1="CG" atomName2="ND1"/> <Bond atomName1="CG" atomName2="CD2"/> <Bond atomName1="ND1" atomName2="HD1"/> <Bond atomName1="ND1" atomName2="CE1"/> <Bond atomName1="CE1" atomName2="HE1"/> <Bond atomName1="CE1" atomName2="NE2"/> <Bond atomName1="NE2" atomName2="HE2"/> <Bond atomName1="NE2" atomName2="CD2"/> <Bond atomName1="CD2" atomName2="HD2"/> <Bond atomName1="C" atomName2="O"/> <ExternalBond atomName="N"/> <ExternalBond atomName="C"/> </Residue> <Residue name="ILE"> <Atom charge="-0.4157" name="N" type="N238"/> <Atom charge="0.2719" name="H" type="H241"/> <Atom charge="-0.0597" name="CA" type="C224I"/> <Atom charge="0.0869" name="HA" type="H140"/> <Atom charge="0.1303" name="CB" type="C137"/> <Atom charge="0.0187" name="HB" type="H140"/> <Atom charge="-0.3204" name="CG2" type="C135"/> <Atom charge="0.0882" name="HG21" type="H140"/> <Atom charge="0.0882" name="HG22" type="H140"/> <Atom charge="0.0882" name="HG23" type="H140"/> <Atom charge="-0.043" name="CG1" type="C136"/> <Atom charge="0.0236" name="HG12" type="H140"/> <Atom charge="0.0236" name="HG13" type="H140"/> <Atom charge="-0.066" name="CD1" type="C135"/> <Atom charge="0.0186" name="HD11" type="H140"/> <Atom charge="0.0186" name="HD12" type="H140"/> <Atom charge="0.0186" name="HD13" type="H140"/> <Atom charge="0.5973" name="C" type="C235"/> <Atom charge="-0.5679" name="O" type="O236"/> <Bond atomName1="N" atomName2="H"/> <Bond atomName1="N" atomName2="CA"/> <Bond atomName1="CA" atomName2="HA"/> <Bond atomName1="CA" atomName2="CB"/> <Bond atomName1="CA" atomName2="C"/> <Bond atomName1="CB" atomName2="HB"/> <Bond atomName1="CB" atomName2="CG2"/> <Bond atomName1="CB" atomName2="CG1"/> <Bond atomName1="CG2" atomName2="HG21"/> <Bond atomName1="CG2" atomName2="HG22"/> <Bond atomName1="CG2" atomName2="HG23"/> <Bond atomName1="CG1" atomName2="HG12"/> <Bond atomName1="CG1" atomName2="HG13"/> <Bond atomName1="CG1" atomName2="CD1"/> <Bond atomName1="CD1" atomName2="HD11"/> <Bond atomName1="CD1" atomName2="HD12"/> <Bond atomName1="CD1" atomName2="HD13"/> <Bond atomName1="C" atomName2="O"/> <ExternalBond atomName="N"/> <ExternalBond atomName="C"/> </Residue> <Residue name="LEU"> <Atom charge="-0.6104" name="N" type="N238"/> <Atom charge="0.3534" name="H" type="H241"/> <Atom charge="0.1797" name="CA" type="C224"/> <Atom charge="0.0778" name="HA" type="H140"/> <Atom charge="-0.2488" name="CB" type="C136"/> <Atom charge="0.0848" name="HB2" type="H140"/> <Atom charge="0.0637" name="HB3" type="H140"/> <Atom charge="0.3273" name="CG" type="C137"/> <Atom charge="-0.0217" name="HG" type="H140"/> <Atom charge="-0.3691" name="CD1" type="C135"/> <Atom charge="0.0868" name="HD11" type="H140"/> <Atom charge="0.0868" name="HD12" type="H140"/> <Atom charge="0.0868" name="HD13" type="H140"/> <Atom charge="-0.3739" name="CD2" type="C135"/> <Atom charge="0.0939" name="HD21" type="H140"/> <Atom charge="0.0939" name="HD22" type="H140"/> <Atom charge="0.0939" name="HD23" type="H140"/> <Atom charge="0.4661" name="C" type="C235"/> <Atom charge="-0.6200" name="O" type="O236"/> <Bond atomName1="N" atomName2="H"/> <Bond atomName1="N" atomName2="CA"/> <Bond atomName1="CA" atomName2="HA"/> <Bond atomName1="CA" atomName2="CB"/> <Bond atomName1="CA" atomName2="C"/> <Bond atomName1="CB" atomName2="HB2"/> <Bond atomName1="CB" atomName2="HB3"/> <Bond atomName1="CB" atomName2="CG"/> <Bond atomName1="CG" atomName2="HG"/> <Bond atomName1="CG" atomName2="CD1"/> <Bond atomName1="CG" atomName2="CD2"/> <Bond atomName1="CD1" atomName2="HD11"/> <Bond atomName1="CD1" atomName2="HD12"/> <Bond atomName1="CD1" atomName2="HD13"/> <Bond atomName1="CD2" atomName2="HD21"/> <Bond atomName1="CD2" atomName2="HD22"/> <Bond atomName1="CD2" atomName2="HD23"/> <Bond atomName1="C" atomName2="O"/> <ExternalBond atomName="N"/> <ExternalBond atomName="C"/> </Residue> <Residue name="LYS"> <Atom charge="-0.3479" name="N" type="N238"/> <Atom charge="0.2747" name="H" type="H241"/> <Atom charge="-0.24" name="CA" type="C224K"/> <Atom charge="0.1426" name="HA" type="H140"/> <Atom charge="-0.0094" name="CB" type="C136"/> <Atom charge="0.0362" name="HB2" type="H140"/> <Atom charge="0.0362" name="HB3" type="H140"/> <Atom charge="0.0187" name="CG" type="C136"/> <Atom charge="0.0103" name="HG2" type="H140"/> <Atom charge="0.0103" name="HG3" type="H140"/> <Atom charge="-0.0479" name="CD" type="C136"/> <Atom charge="0.0621" name="HD2" type="H140"/> <Atom charge="0.0621" name="HD3" type="H140"/> <Atom charge="-0.0143" name="CE" type="C292"/> <Atom charge="0.1135" name="HE2" type="H140"/> <Atom charge="0.1135" name="HE3" type="H140"/> <Atom charge="-0.3854" name="NZ" type="N287"/> <Atom charge="0.34" name="HZ1" type="H290"/> <Atom charge="0.34" name="HZ2" type="H290"/> <Atom charge="0.34" name="HZ3" type="H290"/> <Atom charge="0.7341" name="C" type="C235"/> <Atom charge="-0.5894" name="O" type="O236"/> <Bond atomName1="N" atomName2="H"/> <Bond atomName1="N" atomName2="CA"/> <Bond atomName1="CA" atomName2="HA"/> <Bond atomName1="CA" atomName2="CB"/> <Bond atomName1="CA" atomName2="C"/> <Bond atomName1="CB" atomName2="HB2"/> <Bond atomName1="CB" atomName2="HB3"/> <Bond atomName1="CB" atomName2="CG"/> <Bond atomName1="CG" atomName2="HG2"/> <Bond atomName1="CG" atomName2="HG3"/> <Bond atomName1="CG" atomName2="CD"/> <Bond atomName1="CD" atomName2="HD2"/> <Bond atomName1="CD" atomName2="HD3"/> <Bond atomName1="CD" atomName2="CE"/> <Bond atomName1="CE" atomName2="HE2"/> <Bond atomName1="CE" atomName2="HE3"/> <Bond atomName1="CE" atomName2="NZ"/> <Bond atomName1="NZ" atomName2="HZ1"/> <Bond atomName1="NZ" atomName2="HZ2"/> <Bond atomName1="NZ" atomName2="HZ3"/> <Bond atomName1="C" atomName2="O"/> <ExternalBond atomName="N"/> <ExternalBond atomName="C"/> </Residue> <Residue name="MET"> <Atom charge="-0.4157" name="N" type="N238"/> <Atom charge="0.2719" name="H" type="H241"/> <Atom charge="-0.0237" name="CA" type="C224"/> <Atom charge="0.088" name="HA" type="H140"/> <Atom charge="0.0342" name="CB" type="C136"/> <Atom charge="0.0241" name="HB2" type="H140"/> <Atom charge="0.0241" name="HB3" type="H140"/> <Atom charge="0.0018" name="CG" type="C210"/> <Atom charge="0.044" name="HG2" type="H140"/> <Atom charge="0.044" name="HG3" type="H140"/> <Atom charge="-0.2737" name="SD" type="S202"/> <Atom charge="-0.0536" name="CE" type="C209"/> <Atom charge="0.0684" name="HE1" type="H140"/> <Atom charge="0.0684" name="HE2" type="H140"/> <Atom charge="0.0684" name="HE3" type="H140"/> <Atom charge="0.5973" name="C" type="C235"/> <Atom charge="-0.5679" name="O" type="O236"/> <Bond atomName1="N" atomName2="H"/> <Bond atomName1="N" atomName2="CA"/> <Bond atomName1="CA" atomName2="HA"/> <Bond atomName1="CA" atomName2="CB"/> <Bond atomName1="CA" atomName2="C"/> <Bond atomName1="CB" atomName2="HB2"/> <Bond atomName1="CB" atomName2="HB3"/> <Bond atomName1="CB" atomName2="CG"/> <Bond atomName1="CG" atomName2="HG2"/> <Bond atomName1="CG" atomName2="HG3"/> <Bond atomName1="CG" atomName2="SD"/> <Bond atomName1="SD" atomName2="CE"/> <Bond atomName1="CE" atomName2="HE1"/> <Bond atomName1="CE" atomName2="HE2"/> <Bond atomName1="CE" atomName2="HE3"/> <Bond atomName1="C" atomName2="O"/> <ExternalBond atomName="N"/> <ExternalBond atomName="C"/> </Residue> <Residue name="PHE"> <Atom charge="-0.4157" name="N" type="N238"/> <Atom charge="0.2719" name="H" type="H241"/> <Atom charge="-0.0024" name="CA" type="C224"/> <Atom charge="0.0978" name="HA" type="H140"/> <Atom charge="-0.0343" name="CB" type="C149"/> <Atom charge="0.0295" name="HB2" type="H140"/> <Atom charge="0.0295" name="HB3" type="H140"/> <Atom charge="0.0118" name="CG" type="C145"/> <Atom charge="-0.1256" name="CD1" type="C145"/> <Atom charge="0.133" name="HD1" type="H146"/> <Atom charge="-0.1704" name="CE1" type="C145"/> <Atom charge="0.143" name="HE1" type="H146"/> <Atom charge="-0.1072" name="CZ" type="C145"/> <Atom charge="0.1297" name="HZ" type="H146"/> <Atom charge="-0.1704" name="CE2" type="C145"/> <Atom charge="0.143" name="HE2" type="H146"/> <Atom charge="-0.1256" name="CD2" type="C145"/> <Atom charge="0.133" name="HD2" type="H146"/> <Atom charge="0.5973" name="C" type="C235"/> <Atom charge="-0.5679" name="O" type="O236"/> <Bond atomName1="N" atomName2="H"/> <Bond atomName1="N" atomName2="CA"/> <Bond atomName1="CA" atomName2="HA"/> <Bond atomName1="CA" atomName2="CB"/> <Bond atomName1="CA" atomName2="C"/> <Bond atomName1="CB" atomName2="HB2"/> <Bond atomName1="CB" atomName2="HB3"/> <Bond atomName1="CB" atomName2="CG"/> <Bond atomName1="CG" atomName2="CD1"/> <Bond atomName1="CG" atomName2="CD2"/> <Bond atomName1="CD1" atomName2="HD1"/> <Bond atomName1="CD1" atomName2="CE1"/> <Bond atomName1="CE1" atomName2="HE1"/> <Bond atomName1="CE1" atomName2="CZ"/> <Bond atomName1="CZ" atomName2="HZ"/> <Bond atomName1="CZ" atomName2="CE2"/> <Bond atomName1="CE2" atomName2="HE2"/> <Bond atomName1="CE2" atomName2="CD2"/> <Bond atomName1="CD2" atomName2="HD2"/> <Bond atomName1="C" atomName2="O"/> <ExternalBond atomName="N"/> <ExternalBond atomName="C"/> </Residue> <Residue name="PRO"> <Atom charge="-0.2548" name="N" type="N239"/> <Atom charge="0.0192" name="CD" type="C245"/> <Atom charge="0.0391" name="HD2" type="H140"/> <Atom charge="0.0391" name="HD3" type="H140"/>
is present.") print(f"If the needed {object_type} is missing, please create it.") print("Once the issue has been resolved, re-attempt execution.\n") sys.exit(0) else: print("\nA configuration error has occurred!\n") print(f"The provided {object_type} named '{object_name}' was not " "found.") print(f"No requested {object_type} instance is currently available in " f"the Intersight account named {intersight_account_name}.") print("Please check the Intersight Account named " f"{intersight_account_name}.") print(f"Verify through the API or GUI that the needed {object_type} " "is present.") print(f"If the needed {object_type} is missing, please create it.") print("Once the issue has been resolved, re-attempt execution.\n") sys.exit(0) # Establish function to request login to UCS FI Device Console def _request_ucs_fi_device_console_login( ucs_fi_device_console_ip, ucs_fi_device_console_username, ucs_fi_device_console_password ): """This is a function to request an HTTP response for a login to a UCS Fabric Interconnect Device Console under Intersight Managed Mode (IMM). Args: ucs_fi_device_console_ip (str): The IP address of a UCS Fabric Interconnect under Intersight Managed Mode (IMM). ucs_fi_device_console_username (str): The admin username of a UCS Fabric Interconnect under Intersight Managed Mode (IMM). ucs_fi_device_console_password (str): The admin password of a UCS Fabric Interconnect under Intersight Managed Mode (IMM). Returns: A Response class instance for the UCS Fabric Interconnect Device Console login HTTP request. Raises: Exception: An exception occurred due to an issue with the UCS Fabric Interconnect login HTTP request. """ # Login to UCS FI Device Console ucs_fi_device_console_headers = { "Content-Type": "application/json" } ucs_fi_device_console_url = f"https://{ucs_fi_device_console_ip}/Login" ucs_fi_device_console_post_body = { "User": ucs_fi_device_console_username, "Password": <PASSWORD> } try: ucs_fi_device_console_login_request = requests.post( ucs_fi_device_console_url, headers=ucs_fi_device_console_headers, data=json.dumps(ucs_fi_device_console_post_body), verify=False ) return ucs_fi_device_console_login_request except Exception as exception_message: print("\nA configuration error has occurred!\n") print(f"Unable to login to the UCS FI Device Console for " f"{ucs_fi_device_console_ip}.\n") print("Exception Message: ") print(exception_message) # Establish function to request the UCS IMM FI Device Connector Claim Code def _request_ucs_fi_device_connector_claim_code( ucs_fi_device_console_ip, ucs_fi_device_console_login ): """"This is a function to request an HTTP response for a Claim Code to a UCS Fabric Interconnect under Intersight Managed Mode (IMM). Args: ucs_fi_device_console_ip (str): The IP address of a UCS Fabric Interconnect under Intersight Managed Mode (IMM). ucs_fi_device_console_login (Response): A Response class instance of a UCS Fabric Interconnect Device Console login HTTP request. Returns: A Response class instance for the UCS Fabric Interconnect Claim Code HTTP request. Raises: Exception: An exception occurred due to an issue with the UCS Fabric Interconnect Claim Code HTTP request. """ # Obtain Claim Code ucs_fi_device_connector_headers = { "Accept-Language": "application/json" } ucs_fi_device_connector_claim_code_url = f"https://{ucs_fi_device_console_ip}/connector/SecurityTokens" try: ucs_fi_device_connector_claim_code_request = requests.get( ucs_fi_device_connector_claim_code_url, cookies=ucs_fi_device_console_login.cookies, headers=ucs_fi_device_connector_headers, verify=False ) return ucs_fi_device_connector_claim_code_request except Exception as exception_message: print("\nA configuration error has occurred!\n") print("Unable to request the Device Connector Claim Code " f"of {ucs_fi_device_console_ip}.\n") print("Exception Message: ") print(exception_message) # Establish function to request a refresh of the UCS IMM FI Device Connector def _request_ucs_fi_device_connector_refresh( ucs_fi_device_console_ip, ucs_fi_device_console_login ): """"This is a function to request an HTTP response for refreshing the Device Connector of a UCS Fabric Interconnect under Intersight Managed Mode (IMM). Args: ucs_fi_device_console_ip (str): The IP address of a UCS Fabric Interconnect under Intersight Managed Mode (IMM). ucs_fi_device_console_login (Response): A Response class instance of a UCS Fabric Interconnect Device Console login HTTP request. Returns: A Response class instance for the UCS Fabric Interconnect Device Connector refresh HTTP request. Raises: Exception: An exception occurred due to an issue with the UCS Fabric Interconnect Device Connector refresh HTTP request. """ # Refresh the Device Connector ucs_fi_device_connector_headers = { "Content-Type": "application/json" } ucs_fi_device_connector_refresh_url = f"https://{ucs_fi_device_console_ip}/connector/Connect" ucs_fi_device_connector_post_body = {} try: ucs_fi_device_connector_refresh_request = requests.post( ucs_fi_device_connector_refresh_url, cookies=ucs_fi_device_console_login.cookies, headers=ucs_fi_device_connector_headers, data=json.dumps(ucs_fi_device_connector_post_body), verify=False ) return ucs_fi_device_connector_refresh_request except Exception as exception_message: print("\nA configuration error has occurred!\n") print("Unable to request the Device Connector refresh " f"of {ucs_fi_device_console_ip}.\n") print("Exception Message: ") print(exception_message) # Establish function to request UCS IMM FI Device Connector Device ID def _request_ucs_fi_device_connector_device_id( ucs_fi_device_console_ip, ucs_fi_device_console_login ): """"This is a function to request an HTTP response for the Device ID of a UCS Fabric Interconnect under Intersight Managed Mode (IMM). Args: ucs_fi_device_console_ip (str): The IP address of a UCS Fabric Interconnect under Intersight Managed Mode (IMM). ucs_fi_device_console_login (Response): A Response class instance of a UCS Fabric Interconnect Device Console login HTTP request. Returns: A Response class instance for the UCS Fabric Interconnect Device ID HTTP request. Raises: Exception: An exception occurred due to an issue with the UCS Fabric Interconnect Device ID HTTP request. """ # Obtain Device ID ucs_fi_device_connector_headers = { "Accept-Language": "application/json" } ucs_fi_device_connector_device_id_url = f"https://{ucs_fi_device_console_ip}/connector/DeviceIdentifiers" try: ucs_fi_device_connector_device_id_request = requests.get( ucs_fi_device_connector_device_id_url, cookies=ucs_fi_device_console_login.cookies, headers=ucs_fi_device_connector_headers, verify=False ) return ucs_fi_device_connector_device_id_request except Exception as exception_message: print("\nA configuration error has occurred!\n") print("Unable to obtain the Device ID for the Device Connector " f"of {ucs_fi_device_console_ip}.\n") print("Exception Message: ") print(exception_message) # Establish function to request UCS IMM FI Device Connector system information def _request_ucs_fi_device_connector_system_info( ucs_fi_device_console_ip, ucs_fi_device_console_login ): """"This is a function to request an HTTP response for the system information of a UCS Fabric Interconnect under Intersight Managed Mode (IMM). Args: ucs_fi_device_console_ip (str): The IP address of a UCS Fabric Interconnect under Intersight Managed Mode (IMM). ucs_fi_device_console_login (Response): A Response class instance of a UCS Fabric Interconnect Device Console login HTTP request. Returns: A Response class instance for the UCS Fabric Interconnect system information HTTP request. Raises: Exception: An exception occurred due to an issue with the UCS Fabric Interconnect system information HTTP request. """ # Obtain system information ucs_fi_device_connector_headers = { "Accept-Language": "application/json" } ucs_fi_device_connector_system_info_url = f"https://{ucs_fi_device_console_ip}/connector/Systems" try: ucs_fi_device_connector_system_info_request = requests.get( ucs_fi_device_connector_system_info_url, cookies=ucs_fi_device_console_login.cookies, headers=ucs_fi_device_connector_headers, verify=False ) return ucs_fi_device_connector_system_info_request except Exception as exception_message: print("\nA configuration error has occurred!\n") print("Unable to obtain the system information for the Device " f"Connector of {ucs_fi_device_console_ip}.\n") print("Exception Message: ") print(exception_message) # Establish function to login to UCS FI Device Console def obtain_ucs_fi_device_console_login_cookie( ucs_fi_device_console_ip, ucs_fi_device_console_username, ucs_fi_device_console_password ): """This is a function to login to the Device Console of a UCS Fabric Interconnect under Intersight Managed Mode (IMM) and obtain the cookies for the login session. Args: ucs_fi_device_console_ip (str): The IP address of a UCS Fabric Interconnect under Intersight Managed Mode (IMM). ucs_fi_device_console_username (str): The admin username of a UCS Fabric Interconnect under Intersight Managed Mode (IMM). ucs_fi_device_console_password (str): The admin password of a UCS Fabric Interconnect under Intersight Managed Mode (IMM). Returns: A CookieJar containing cookies for the established login session to the provided UCS Fabric Interconnect. The login cookies are valid for 30 minutes. Raises: Exception: An exception occurred due to an issue with accessing the provided UCS Fabric Interconnect. """ try: # Login to UCS FI Device Console print("\nAttempting login to the UCS FI Device Console for " f"{ucs_fi_device_console_ip}...") ucs_fi_device_console_login = _request_ucs_fi_device_console_login( ucs_fi_device_console_ip, ucs_fi_device_console_username, ucs_fi_device_console_password ) if ucs_fi_device_console_login.status_code == 200: print("Login to the UCS FI Device Console for " f"{ucs_fi_device_console_ip} was successful.\n") print("The login cookie for the Device Console of " f"{ucs_fi_device_console_ip} has been retrieved.") return ucs_fi_device_console_login.cookies else: print("\nA configuration error has occurred!\n") print("Unable to login to the UCS FI Device Console for " f"{ucs_fi_device_console_ip}.\n") print("Exception Message: ") print(ucs_fi_device_console_login.json()) except Exception: print("\nA configuration error has occurred!\n") print(f"Unable to login to the UCS FI Device Console for " f"{ucs_fi_device_console_ip}.\n") print("Exception Message: ") traceback.print_exc() # Establish function to obtain UCS IMM FI Device Connector Device ID def obtain_ucs_fi_device_connector_device_id( ucs_fi_device_console_ip, ucs_fi_device_console_username, ucs_fi_device_console_password ): """This is a function to obtain the Device Connector Device ID for a UCS Fabric Interconnect under Intersight Managed Mode (IMM). Args: ucs_fi_device_console_ip (str): The IP address of a UCS Fabric Interconnect under Intersight Managed Mode (IMM). ucs_fi_device_console_username (str): The admin username of a UCS Fabric Interconnect under Intersight Managed Mode (IMM). ucs_fi_device_console_password (str): The admin password of a UCS Fabric Interconnect under Intersight Managed Mode (IMM). Returns: A string of the Device Connector Device ID for the UCS Fabric Interconnect. Raises: Exception: An exception occurred due to an issue with accessing the provided UCS Fabric Interconnect. """ try: # Login to UCS FI Device Console print("\nAttempting login to the UCS FI Device Console for " f"{ucs_fi_device_console_ip}...") ucs_fi_device_console_login = _request_ucs_fi_device_console_login( ucs_fi_device_console_ip, ucs_fi_device_console_username, ucs_fi_device_console_password ) if ucs_fi_device_console_login.status_code == 200: print("Login to the UCS FI Device Console for " f"{ucs_fi_device_console_ip} was successful.\n") #
<filename>hvac/api/secrets_engines/kv_v2.py #!/usr/bin/env python # -*- coding: utf-8 -*- """KvV2 methods module.""" from hvac import exceptions, utils from hvac.api.vault_api_base import VaultApiBase DEFAULT_MOUNT_POINT = 'secret' class KvV2(VaultApiBase): """KV Secrets Engine - Version 2 (API). Reference: https://www.vaultproject.io/api/secret/kv/kv-v2.html """ def configure(self, max_versions=10, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): """Configure backend level settings that are applied to every key in the key-value store. Supported methods: POST: /{mount_point}/config. Produces: 204 (empty body) :param max_versions: The number of versions to keep per key. This value applies to all keys, but a key's metadata setting can overwrite this value. Once a key has more than the configured allowed versions the oldest version will be permanently deleted. Defaults to 10. :type max_versions: int :param cas_required: If true all keys will require the cas parameter to be set on all write requests. :type cas_required: bool :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ params = { 'max_versions': max_versions, } if cas_required is not None: params['cas_required'] = cas_required api_path = utils.format_url('/v1/{mount_point}/config', mount_point=mount_point) return self._adapter.post( url=api_path, json=params, ) def read_configuration(self, mount_point=DEFAULT_MOUNT_POINT): """Read the KV Version 2 configuration. Supported methods: GET: /auth/{mount_point}/config. Produces: 200 application/json :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: dict """ api_path = utils.format_url( '/v1/{mount_point}/config', mount_point=mount_point, ) response = self._adapter.get(url=api_path) return response.json() def read_secret_version(self, path, version=None, mount_point=DEFAULT_MOUNT_POINT): """Retrieve the secret at the specified location. Supported methods: GET: /{mount_point}/data/{path}. Produces: 200 application/json :param path: Specifies the path of the secret to read. This is specified as part of the URL. :type path: str | unicode :param version: Specifies the version to return. If not set the latest version is returned. :type version: int :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: dict """ params = {} if version is not None: params['version'] = version api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response = self._adapter.get( url=api_path, params=params, ) return response.json() def create_or_update_secret(self, path, secret, cas=None, mount_point=DEFAULT_MOUNT_POINT): """Create a new version of a secret at the specified location. If the value does not yet exist, the calling token must have an ACL policy granting the create capability. If the value already exists, the calling token must have an ACL policy granting the update capability. Supported methods: POST: /{mount_point}/data/{path}. Produces: 200 application/json :param path: Path :type path: str | unicode :param cas: Set the "cas" value to use a Check-And-Set operation. If not set the write will be allowed. If set to 0 a write will only be allowed if the key doesn't exist. If the index is non-zero the write will only be allowed if the key's current version matches the version specified in the cas parameter. :type cas: int :param secret: The contents of the "secret" dict will be stored and returned on read. :type secret: dict :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: dict """ params = { 'options': {}, 'data': secret, } if cas is not None: params['options']['cas'] = cas api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response = self._adapter.post( url=api_path, json=params, ) return response.json() def patch(self, path, secret, mount_point=DEFAULT_MOUNT_POINT): """Set or update data in the KV store without overwriting. :param path: Path :type path: str | unicode :param secret: The contents of the "secret" dict will be stored and returned on read. :type secret: dict :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the create_or_update_secret request. :rtype: dict """ # First, do a read. try: current_secret_version = self.read_secret_version( path=path, mount_point=mount_point, ) except exceptions.InvalidPath: raise exceptions.InvalidPath('No value found at "{path}"; patch only works on existing data.'.format(path=path)) # Update existing secret dict. patched_secret = current_secret_version['data']['data'] patched_secret.update(secret) # Write back updated secret. return self.create_or_update_secret( path=path, cas=current_secret_version['data']['metadata']['version'], secret=patched_secret, mount_point=mount_point, ) def delete_latest_version_of_secret(self, path, mount_point=DEFAULT_MOUNT_POINT): """Issue a soft delete of the secret's latest version at the specified location. This marks the version as deleted and will stop it from being returned from reads, but the underlying data will not be removed. A delete can be undone using the undelete path. Supported methods: DELETE: /{mount_point}/data/{path}. Produces: 204 (empty body) :param path: Specifies the path of the secret to delete. This is specified as part of the URL. :type path: str | unicode :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) return self._adapter.delete( url=api_path, ) def delete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): """Issue a soft delete of the specified versions of the secret. This marks the versions as deleted and will stop them from being returned from reads, but the underlying data will not be removed. A delete can be undone using the undelete path. Supported methods: POST: /{mount_point}/delete/{path}. Produces: 204 (empty body) :param path: Specifies the path of the secret to delete. This is specified as part of the URL. :type path: str | unicode :param versions: The versions to be deleted. The versioned data will not be deleted, but it will no longer be returned in normal get requests. :type versions: int :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ if not isinstance(versions, list) or len(versions) == 0: error_msg = 'argument to "versions" must be a list containing one or more integers, "{versions}" provided.'.format( versions=versions ) raise exceptions.ParamValidationError(error_msg) params = { 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/delete/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def undelete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): """Undelete the data for the provided version and path in the key-value store. This restores the data, allowing it to be returned on get requests. Supported methods: POST: /{mount_point}/undelete/{path}. Produces: 204 (empty body) :param path: Specifies the path of the secret to undelete. This is specified as part of the URL. :type path: str | unicode :param versions: The versions to undelete. The versions will be restored and their data will be returned on normal get requests. :type versions: list of int :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ if not isinstance(versions, list) or len(versions) == 0: error_msg = 'argument to "versions" must be a list containing one or more integers, "{versions}" provided.'.format( versions=versions ) raise exceptions.ParamValidationError(error_msg) params = { 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/undelete/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def destroy_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): """Permanently remove the specified version data and numbers for the provided path from the key-value store. Supported methods: POST: /{mount_point}/destroy/{path}. Produces: 204 (empty body) :param path: Specifies the path of the secret to destroy. This is specified as part of the URL. :type path: str | unicode :param versions: The versions to destroy. Their data will be permanently deleted. :type versions: list of int :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ if not isinstance(versions, list) or len(versions) == 0: error_msg = 'argument to "versions" must be a list containing one or more integers, "{versions}" provided.'.format( versions=versions ) raise exceptions.ParamValidationError(error_msg) params = { 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/destroy/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def list_secrets(self, path, mount_point=DEFAULT_MOUNT_POINT): """Return a list of key names at the specified location. Folders are suffixed with /. The input must be a folder; list on a file will not return a value. Note that no policy-based filtering is performed on keys; do not encode sensitive information in key names. The values
<reponame>sugatoray/alibi-detect import numpy as np from typing import Any, Callable, List, Optional, Union from alibi_detect.cd.base_online import BaseUniDriftOnline from alibi_detect.utils.misc import quantile import numba as nb from tqdm import tqdm import warnings class CVMDriftOnline(BaseUniDriftOnline): def __init__( self, x_ref: Union[np.ndarray, list], ert: float, window_sizes: List[int], preprocess_fn: Optional[Callable] = None, n_bootstraps: int = 10000, batch_size: int = 64, n_features: Optional[int] = None, verbose: bool = True, input_shape: Optional[tuple] = None, data_type: Optional[str] = None ) -> None: """ Online Cramer-von Mises (CVM) data drift detector using preconfigured thresholds, which tests for any change in the distribution of continuous univariate data. This detector is an adaption of that proposed by :cite:t:`Ross2012a`. For multivariate data, the detector makes a correction similar to the Bonferroni correction used for the offline detector. Given :math:`d` features, the detector configures thresholds by targeting the :math:`1-\\beta` quantile of test statistics over the simulated streams, where :math:`\\beta = 1 - (1-(1/ERT))^{(1/d)}`. For the univariate case, this simplifies to :math:`\\beta = 1/ERT`. At prediction time, drift is flagged if the test statistic of any feature stream exceed the thresholds. Note ---- In the multivariate case, for the ERT to be accurately targeted the feature streams must be independent. Parameters ---------- x_ref Data used as reference distribution. ert The expected run-time (ERT) in the absence of drift. For the univariate detectors, the ERT is defined as the expected run-time after the smallest window is full i.e. the run-time from t=min(windows_sizes). window_sizes window sizes for the sliding test-windows used to compute the test-statistic. Smaller windows focus on responding quickly to severe drift, larger windows focus on ability to detect slight drift. preprocess_fn Function to preprocess the data before computing the data drift metrics. n_bootstraps The number of bootstrap simulations used to configure the thresholds. The larger this is the more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude larger than the ERT. batch_size The maximum number of bootstrap simulations to compute in each batch when configuring thresholds. A smaller batch size reduces memory requirements, but can result in a longer configuration run time. n_features Number of features used in the statistical test. No need to pass it if no preprocessing takes place. In case of a preprocessing step, this can also be inferred automatically but could be more expensive to compute. verbose Whether or not to print progress during configuration. input_shape Shape of input data. data_type Optionally specify the data type (tabular, image or time-series). Added to metadata. """ super().__init__( x_ref=x_ref, ert=ert, window_sizes=window_sizes, preprocess_fn=preprocess_fn, n_bootstraps=n_bootstraps, n_features=n_features, verbose=verbose, input_shape=input_shape, data_type=data_type ) self.batch_size = n_bootstraps if batch_size is None else batch_size # Configure thresholds and initialise detector self._initialise() self._configure_thresholds() def _configure_ref(self) -> None: ids_ref_ref = self.x_ref[None, :, :] >= self.x_ref[:, None, :] self.ref_cdf_ref = np.sum(ids_ref_ref, axis=0) / self.n def _configure_thresholds(self) -> None: """ Private method to simulate trajectories of the Cramer-von Mises statistic for the desired reference set size and window sizes under the null distribution, where both the reference set and deployment stream follow the same distribution. It then uses these simulated trajectories to estimate thresholds. As the test statistics are rank based and independent of the underlying distribution, we may use any continuous distribution -- we use Gaussian. The thresholds should stop changing after t=(2*max-window-size - 1) and therefore we need only simulate trajectories and estimate thresholds up to this point. """ if self.verbose: print("Using %d bootstrap simulations to configure thresholds..." % self.n_bootstraps) # Assuming independent features, calibrate to beta = 1 - (1-FPR)^(1/n_features) beta = 1 - (1 - self.fpr) ** (1 / self.n_features) # Compute test statistic at each t_max number of t's, for each of the n_bootstrap number of streams # Only need to simulate streams for a single feature here. t_max = 2 * self.max_ws - 1 stats = self._simulate_streams(t_max) # At each t for each stream, find max stats. over window sizes with warnings.catch_warnings(): warnings.filterwarnings(action='ignore', message='All-NaN slice encountered') max_stats = np.nanmax(stats, -1) # Now loop through each t and find threshold (at each t) that satisfies eqn. (2) in Ross et al. thresholds = np.full((t_max, 1), np.nan) for t in range(np.min(self.window_sizes)-1, t_max): # Compute (1-beta) quantile of max_stats at a given t, over all streams threshold = quantile(max_stats[:, t], 1 - beta) # Remove streams for which a change point has already been detected max_stats = max_stats[max_stats[:, t] <= threshold] thresholds[t, 0] = threshold self.thresholds = thresholds def _simulate_streams(self, t_max: int) -> np.ndarray: """ Private method to simulate streams. _ids_to_stats is a decorated function that is vectorised over the parallel streams. Not sufficient just to write a normal vectorised numpy implementation as this can lead to OOM errors (when trying to store (n+t_max) x (n+t_max) x n_bootstraps matrices of floats). However, we will store the boolean matrix of this size as it faster to compute this way (and 64x smaller). To further reduce memory requirements, _ids_to_stats can be called for batches of streams, so that the ids array is of shape batch_size x (n+t_max) x (n+t_max). """ n_windows = len(self.window_sizes) stats = np.zeros((self.n_bootstraps, t_max, n_windows)) n_batches = int(np.ceil(self.n_bootstraps / self.batch_size)) idxs = np.array_split(np.arange(self.n_bootstraps), n_batches) batches = enumerate(tqdm(idxs, "Computing thresholds over %d batches" % n_batches)) if self.verbose \ else enumerate(idxs) for b, idx in batches: xs = np.random.randn(len(idx), self.n + t_max) ids = xs[:, None, :] >= xs[:, :, None] stats[idx, :, :] = _ids_to_stats(ids[:, :self.n, :], ids[:, self.n:, :], np.asarray(self.window_sizes)) # Remove stats prior to windows being full for k, ws in enumerate(self.window_sizes): stats[:, :ws-1, k] = np.nan return stats def _update_state(self, x_t: np.ndarray): self.t += 1 if self.t == 1: # Initialise stream self.xs = x_t self.ids_ref_wins = (x_t >= self.x_ref)[:, None, :] self.ids_wins_ref = (x_t <= self.x_ref)[None, :, :] self.ids_wins_wins = np.full((1, 1, self.n_features), 1) else: # Update stream self.xs = np.concatenate([self.xs, x_t]) self.ids_ref_wins = np.concatenate( [self.ids_ref_wins[:, -(self.max_ws - 1):, :], (x_t >= self.x_ref)[:, None, :]], 1 ) self.ids_wins_ref = np.concatenate( [self.ids_wins_ref[-(self.max_ws - 1):, :, :], (x_t <= self.x_ref)[None, :, :]], 0 ) self.ids_wins_wins = np.concatenate( [self.ids_wins_wins[-(self.max_ws - 1):, -(self.max_ws - 1):, :], (x_t >= self.xs[-self.max_ws:-1, :])[:, None, :]], 1 ) self.ids_wins_wins = np.concatenate( [self.ids_wins_wins, (x_t <= self.xs[-self.max_ws:, :])[None, :, :]], 0 ) def score(self, x_t: Union[np.ndarray, Any]) -> np.ndarray: """ Compute the test-statistic (CVM) between the reference window(s) and test window. If a given test-window is not yet full then a test-statistic of np.nan is returned for that window. Parameters ---------- x_t A single instance. Returns ------- Estimated CVM test statistics between reference window and test window(s). """ x_t = super()._preprocess_xt(x_t) self._update_state(x_t) stats = np.zeros((len(self.window_sizes), self.n_features), dtype=np.float32) for k, ws in enumerate(self.window_sizes): if self.t >= ws: ref_cdf_win = np.sum(self.ids_ref_wins[:, -ws:], axis=0) / self.n win_cdf_ref = np.sum(self.ids_wins_ref[-ws:], axis=0) / ws win_cdf_win = np.sum(self.ids_wins_wins[-ws:, -ws:], axis=0) / ws ref_cdf_diffs = self.ref_cdf_ref - win_cdf_ref win_cdf_diffs = ref_cdf_win - win_cdf_win sum_diffs_2 = np.sum(ref_cdf_diffs * ref_cdf_diffs, axis=0) \ + np.sum(win_cdf_diffs * win_cdf_diffs, axis=0) stats[k, :] = _normalise_stats(sum_diffs_2, self.n, ws) else: stats[k, :] = np.nan return stats def _check_drift(self, test_stats: np.ndarray, thresholds: np.ndarray) -> int: """ Private method to compare test stats to thresholds. The max stats over all windows are compute for each feature. Drift is flagged if `max_stats` for any feature exceeds the single `thresholds` set. Parameters ---------- test_stats Array of test statistics with shape (n_windows, n_features) thresholds Array of thresholds with shape (t_max, 1). Returns ------- An int equal to 1 if drift, 0 otherwise. """ with warnings.catch_warnings(): warnings.filterwarnings(action='ignore', message='All-NaN slice encountered') max_stats = np.nanmax(test_stats, axis=0) drift_pred = int((max_stats > thresholds).any()) return drift_pred @nb.njit(parallel=False, cache=True) def _normalise_stats(stats: np.ndarray, n: int, ws: int) -> np.ndarray: """ See Eqns 3 & 14 of https://www.projecteuclid.org/euclid.aoms/1177704477. """ mu = 1 / 6 + 1 / (6 * (n + ws)) var_num = (n + ws + 1) * (4 * n * ws * (n + ws) - 3 * (n * n + ws
<reponame>rjderosa/ImPlaneIA #! /usr/bin/env python # Mathematica nb from Alex & Laurent # <EMAIL> major reorg as LG++ 2018 01 # python3 required (int( (len(coeffs) -1)/2 )) because of float int/int result change from python2 import numpy as np import scipy.special import numpy.linalg as linalg import sys from scipy.misc import comb import os, pickle from uncertainties import unumpy # pip install if you need m = 1.0 mm = 1.0e-3 * m um = 1.0e-6 * m def scaling(img, photons): # RENAME this function # img gives a perfect psf to count its total flux # photons is the desired number of photons (total flux in data) total = np.sum(img) print("total", total) return photons / total def matrix_operations(img, model, flux = None, verbose=False): # least squares matrix operations to solve A x = b, where A is the model, # b is the data (image), and x is the coefficient vector we are solving for. # In 2-D data x = inv(At.A).(At.b) flatimg = img.reshape(np.shape(img)[0] * np.shape(img)[1]) nanlist = np.where(np.isnan(flatimg)) flatimg = np.delete(flatimg, nanlist) if flux is not None: flatimg = flux * flatimg / flatimg.sum() # A flatmodel_nan = model.reshape(np.shape(model)[0] * np.shape(model)[1], np.shape(model)[2]) flatmodel = np.zeros((len(flatimg), np.shape(model)[2])) if verbose: print("flat model dimensions ", np.shape(flatmodel)) print("flat image dimensions ", np.shape(flatimg)) for fringe in range(np.shape(model)[2]): flatmodel[:,fringe] = np.delete(flatmodel_nan[:,fringe], nanlist) # At (A transpose) flatmodeltransp = flatmodel.transpose() # At.A (makes square matrix) modelproduct = np.dot(flatmodeltransp, flatmodel) # At.b data_vector = np.dot(flatmodeltransp, flatimg) # inv(At.A) inverse = linalg.inv(modelproduct) cond = np.linalg.cond(inverse) x = np.dot(inverse, data_vector) res = np.dot(flatmodel, x) - flatimg naninsert = nanlist[0] - np.arange(len(nanlist[0])) res = np.insert(res, naninsert, np.nan) res = res.reshape(img.shape[0], img.shape[1]) if verbose: print('model flux', flux) print('data flux', flatimg.sum()) print("flat model dimensions ", np.shape(flatmodel)) print("model transpose dimensions ", np.shape(flatmodeltransp)) print("flat image dimensions ", np.shape(flatimg)) print("transpose * image data dimensions", np.shape(data_vector)) print("flat img * transpose dimensions", np.shape(inverse)) try: from linearfit import linearfit # dependent variables M = np.mat(flatimg) # photon noise noise = np.sqrt(np.abs(flatimg)) # this sets the weights of pixels fulfilling condition to zero weights = np.where(np.abs(flatimg)<=1.0, 0.0, 1.0/(noise**2)) # uniform weight wy = weights S = np.mat(np.diag(wy)); # matrix of independent variables C = np.mat(flatmodeltransp) # initialize object result = linearfit.LinearFit(M,S,C) # do the fit result.fit() # delete inverse_covariance_matrix to reduce size of pickled file result.inverse_covariance_matrix = [] linfit_result = result print("Returned linearfit result") except ImportError: linfit_result = None # if verbose: print("linearfit module not imported, no covariances saved.") return x, res, cond, linfit_result def weighted_operations(img, model, weights, verbose=False): # least squares matrix operations to solve A x = b, where A is the model, b is the data (image), and x is the coefficient vector we are solving for. In 2-D data x = inv(At.A).(At.b) clist = weights.reshape(weights.shape[0]*weights.shape[1])**2 flatimg = img.reshape(np.shape(img)[0] * np.shape(img)[1]) nanlist = np.where(np.isnan(flatimg)) flatimg = np.delete(flatimg, nanlist) clist = np.delete(clist, nanlist) # A flatmodel_nan = model.reshape(np.shape(model)[0] * np.shape(model)[1], np.shape(model)[2]) #flatmodel = model.reshape(np.shape(model)[0] * np.shape(model)[1], np.shape(model)[2]) flatmodel = np.zeros((len(flatimg), np.shape(model)[2])) for fringe in range(np.shape(model)[2]): flatmodel[:,fringe] = np.delete(flatmodel_nan[:,fringe], nanlist) # At (A transpose) flatmodeltransp = flatmodel.transpose() # At.C.A (makes square matrix) CdotA = flatmodel.copy() for i in range(flatmodel.shape[1]): CdotA[:,i] = clist * flatmodel[:,i] modelproduct = np.dot(flatmodeltransp, CdotA) # At.C.b Cdotb = clist * flatimg data_vector = np.dot(flatmodeltransp, Cdotb) # inv(At.C.A) inverse = linalg.inv(modelproduct) cond = np.linalg.cond(inverse) x = np.dot(inverse, data_vector) res = np.dot(flatmodel, x) - flatimg naninsert = nanlist[0] - np.arange(len(nanlist[0])) res = np.insert(res, naninsert, np.nan) res = res.reshape(img.shape[0], img.shape[1]) if verbose: print("flat model dimensions ", np.shape(flatmodel)) print("model transpose dimensions ", np.shape(flatmodeltransp)) print("flat image dimensions ", np.shape(flatimg)) print("transpose * image data dimensions", np.shape(data_vector)) print("flat img * transpose dimensions", np.shape(inverse)) return x, res,cond def deltapistons(pistons): # This function is used for comparison to calculate relative pistons from given pistons (only deltapistons are measured in the fit) N = len(pistons) # same alist as above to label holes alist = [] for i in range(N - 1): for j in range(N - 1): if j + i + 1 < N: alist = np.append(alist, i) alist = np.append(alist, j + i + 1) alist = alist.reshape(len(alist)/2, 2) delta = np.zeros(len(alist)) for q,r in enumerate(alist): delta[q] = pistons[r[0]] - pistons[r[1]] return delta def tan2visibilities(coeffs, verbose=False): """ Technically the fit measures phase AND amplitude, so to retrieve the phase we need to consider both sin and cos terms. Consider one fringe: A { cos(kx)cos(dphi) + sin(kx)sin(dphi) } = A(a cos(kx) + b sin(kx)), where a = cos(dphi) and b = sin(dphi) and A is the fringe amplitude, therefore coupling a and b In practice we measure A*a and A*b from the coefficients, so: Ab/Aa = b/a = tan(dphi) call a' = A*a and b' = A*b (we actually measure a', b') (A*sin(dphi))^2 + (A*cos(dphi)^2) = A^2 = a'^2 + b'^2 Edit 10/2014: pistons now returned in units of radians!! Edit 05/2017: <NAME> added support of uncertainty propagation """ if type(coeffs[0]).__module__ != 'uncertainties.core': # if uncertainties not present, proceed as usual # coefficients of sine terms mulitiplied by 2*pi delta = np.zeros(int( (len(coeffs) -1)/2 )) # py3 amp = np.zeros(int( (len(coeffs) -1)/2 )) # py3 for q in range(int( (len(coeffs) -1)/2 )): # py3 delta[q] = (np.arctan2(coeffs[2*q+2], coeffs[2*q+1])) amp[q] = np.sqrt(coeffs[2*q+2]**2 + coeffs[2*q+1]**2) if verbose: print("shape coeffs", np.shape(coeffs)) print("shape delta", np.shape(delta)) # returns fringe amplitude & phase return amp, delta else: # propagate uncertainties qrange = np.arange(int( (len(coeffs) -1)/2 )) # py3 fringephase = unumpy.arctan2(coeffs[2*qrange+2], coeffs[2*qrange+1]) fringeamp = unumpy.sqrt(coeffs[2*qrange+2]**2 + coeffs[2*qrange+1]**2) return fringeamp, fringephase def fixeddeltapistons(coeffs, verbose=False): delta = np.zeros(int( (len(coeffs) -1)/2 )) # py3 for q in range(int( (len(coeffs) -1)/2 )): # py3 delta[q] = np.arcsin((coeffs[2*q+1] + coeffs[2*q+2]) / 2) / (np.pi*2.0) if verbose: print("shape coeffs", np.shape(coeffs)) print("shape delta", np.shape(delta)) return delta def populate_antisymmphasearray(deltaps, N=7): if type(deltaps[0]).__module__ != 'uncertainties.core': fringephasearray = np.zeros((N,N)) else: fringephasearray = unumpy.uarray(np.zeros((N,N)),np.zeros((N,N))) step=0 n=N-1 for h in range(n): """ fringephasearray[0,q+1:] = coeffs[0:6] fringephasearray[1,q+2:] = coeffs[6:11] fringephasearray[2,q+3:] = coeffs[11:15] fringephasearray[3,q+4:] = coeffs[15:18] fringephasearray[4,q+5:] = coeffs[18:20] fringephasearray[5,q+6:] = coeffs[20:] """ fringephasearray[h, h+1:] = deltaps[step:step+n] step= step+n n=n-1 fringephasearray = fringephasearray - fringephasearray.T return fringephasearray def populate_symmamparray(amps, N=7): if type(amps[0]).__module__ != 'uncertainties.core': fringeamparray = np.zeros((N,N)) else: fringeamparray = unumpy.uarray(np.zeros((N,N)),np.zeros((N,N))) step=0 n=N-1 for h in range(n): fringeamparray[h,h+1:] = amps[step:step+n] step = step+n n=n-1 fringeamparray = fringeamparray + fringeamparray.T return fringeamparray def phases_and_amplitudes(solution_coefficients, N=7): # number of solution coefficients Nsoln = len(solution_coefficients) # normalise by intensity soln = np.array([solution_coefficients[i]/solution_coefficients[0] for i in range(Nsoln)]) # compute fringe quantitites fringeamp, fringephase = tan2visibilities( soln ) # import pdb # pdb.set_trace() # compute closure phases if type(solution_coefficients[0]).__module__ != 'uncertainties.core': redundant_closure_phases = redundant_cps(np.array(fringephase), N=N) else: redundant_closure_phases, fringephasearray = redundant_cps(np.array(fringephase), N=N) # compute closure amplitudes redundant_closure_amplitudes = return_CAs(np.array(fringephase), N=N) return fringephase, fringeamp, redundant_closure_phases, redundant_closure_amplitudes def redundant_cps(deltaps, N = 7): fringephasearray = populate_antisymmphasearray(deltaps, N=N) if type(deltaps[0]).__module__ != 'uncertainties.core': cps = np.zeros(int(comb(N,3))) else: cps = unumpy.uarray( np.zeros(np.int(comb(N,3))),np.zeros(np.int(comb(N,3))) ) nn=0 for kk in range(N-2): for ii in range(N-kk-2): for jj in range(N-kk-ii-2): cps[nn+jj] = fringephasearray[kk, ii+kk+1] \ + fringephasearray[ii+kk+1, jj+ii+kk+2] \ + fringephasearray[jj+ii+kk+2, kk] nn = nn+jj+1 if type(deltaps[0]).__module__ != 'uncertainties.core': return cps else: return cps, fringephasearray def closurephase(deltap, N=7): # N is number of holes in the mask # 7 and 10 holes available (JWST & GPI) # p is a triangular matrix set up to calculate closure phases if N == 7: p = np.array( [ deltap[:6], deltap[6:11], deltap[11:15], \ deltap[15:18], deltap[18:20], deltap[20:] ] ) elif N == 10: p = np.array( [ deltap[:9], deltap[9:17], deltap[17:24], \ deltap[24:30], deltap[30:35], deltap[35:39], \ deltap[39:42], deltap[42:44], deltap[44:] ] ) else: print("invalid hole number") # calculates closure phases for general N-hole mask (with p-array set up properly above) cps = np.zeros(int((N - 1)*(N - 2)/2)) #py3 for l1 in range(N - 2): for l2 in range(N - 2 - l1): cps[int(l1*((N + (N-3) -l1) / 2.0)) + l2] = \ p[l1][0] + p[l1+1][l2] - p[l1][l2+1] return cps def return_CAs(amps, N=7): fringeamparray = populate_symmamparray(amps, N=N) nn=0 if
<filename>interlib/example.py from interlib.utility import print_line ''' . This keyword file is an exmaple keyword that will show you how you can implement your own keyword to Pseudo. What you will need to know is the interpret_state's data, how to add to it before executing the interpretation on the GUI/IDE, setting up the help_manual string, how to write out python code to the output display (right screen of the GUI/IDE), and successful parse your keyword by the use of this file's handler() function +-+-+-+- INTERPRET STATE -+-+-+-+: . The interpret_state is a dictionary that contains all of the important variables that make the interpretation successful. . Here is a list of all of the keys in the interpret_state: - "all_variables": . This holds all of the interpreter's known variables and the datatype of those variables. . The default datatypes that Pseudo uses are: - number (which can contain integers or floats) - string - list - table - function . How you would store a variable named "x" into "all_variables" is like so: - interpret_state["all_variables"]["x"] = {"data_type": "number"} . Note that storing the value of the number is not mandatory because we let python do the evaluation and storage of the data to that variable name. . However, this doesn't mean that you can't store your own values into the variables. You can create your own values to store with your variables if you need them for other keywords that might use them. - For example: Say a keyword requires a variable that had a specific data called "password" and you have a keyword that creates a variable that contains a "password" data value, then you could do . all_variables = interpret_state[all_variables] . all_variables["some_key"] = {"data_type": "key", "password": "<PASSWORD>"} - Note that the "data_type" value does not need to have the standard Pseudo datatype, it could be whatever you want it to be as long as it is compatible with your other keywords. It will not work if you decide to use this variable with Pseudo's keywords though so be mindful of that. - I mean, heck you can put whatever you want for that variable really. It's up to you to how you want to use it. For example: - interpret_state["all_variables"][some_var] = {"Some_datatype": "Don't Care"} - The above code is valid, just not compatible with Pseudo's keywords is all - "line_numb": . This holds the current line number of the Pseudo line. Although, it's actually the current line number - 1 since we are parsing each line in a list and lists are indexed at 0 while the line number for Pseudo is indexed at 1. . You can set the line number to any number you want and the parsing will jump to that line of Pseudo code. Think of this as a program counter. This number will automatically be incremented if you do not change it, if you do however the interpreter will start at that line when the current keyword is done. Again, note that the line number in Pseudo is 1 more than line_numb. . For example, if you want to jump to line 20 in the Pseudo code, you can do this: - interpret_state[line_numb] = 19 # Pseudo line number - 1 - "indent": . This holds the python indenation size, it is always 2. I recommend that you do not change its value as it is needed to successfully parse Pseudo's standard library keywords - "pseudo_indent": . This holds the indentation level of each Pseudo line (the number of spaces in front of the keyword). You'll need to combine this and the "indent" to make sure that the python code generated from your keyword parses successfully. - py_lines: . This holds a list of lines that will be printed directly to the output screen of the GUI. You can append string that contain python code into here. There is no limit of how many lines you can append here. If your keyword requires 10 lines of python code to work, then you can append 10 lines of python to here. Just note that you will need to indent your lines so that python code can be executed successfully. This is why we have the "indent" and "pseudo_indent", the sum of the two is the actual indentation you will need to prepend your line first. - parse_success: . This holds the current evaluation state of the keyword being processed. For each keyword file you would need to return a True or False value. These values indicate whether a keyword has been successfully parsed. When returning either of those values, the interpreter will write them into interpret_state["parse_success"] and then it will check if the translation was completed. Inside your keyword file, it is RECOMMENDED that you do not modify this. - in_file_lines: . This holds a list of all of the lines of the Pseudo code. There's not much to say for this variable since we do not use it much in any of our basic keyword library. This is here for convience if you happen to need some line in the Pseudo code to work with the keyword being parsed. - keyword_dict: . This holds a dictionary that contains python modules for all keywords in the "interlib" folder. We use this for executing each keyword's handler() function. It is the reason why we can easily swap out keywords. You won't need to touch this variable at all unless you intend to run another keyword's handler() function. The only keyword in the basic library that utilizes this is the Import keyword, but it calls a utility function in utility.py that requires this variable. - import_queue: . This contains a special list that holds all files that the main pseudo code wants to import. This is needed to check for circular imports and error out. Only the Import keyword utilizes this variable as of right now. I'm not sure when you will need to check for imports of other pseudo files, but if you do use this then modify at your own risk. - pseudo_filepath: . This contains a string that is the filepath directory of the main Pseudo code file that will be interpreted. This is also required because the Import keyword only has the ability to import files from the same directory as your main Pseudo code file. I would like to extend this functionality to import pseudo files from anywhere in the computer or even import Pseudo code files within a given directory or filepath. Like the parse_success variable, it is RECOMMENDED that you do not touch this or else your imports will likely error out. - pseudo_file: . This contians a string of the actual Pseudo code file's name. This here is also used for the Import keyword to check a function is from this file or from the import file. Like pseudo_filepath, it is RECOMMENDED that you do not modify this. - plain_import_files: . This contains a list of filepaths to import to the translated Python code of the main Pseudo code. When we are executing the Python code that the main Pseudo code has generated, the import keywords in the Python code will try to import those files. On an IDE this works fine, however when running this through a binary the file does not import for reasons that I can't explain thoroughly. Just note it is RECOMMENDED that you should not touch this at all. - is_debug_on: . This contains a boolean to indicate that "debug mode" is on/off when the GUI has toggled it on/off. This is useful for developing your keywords locally on the GUI and not have to bother running the gui.py file through an
<reponame>gerow/hostap # wpa_supplicant control interface # Copyright (c) 2014, Qualcomm Atheros, Inc. # # This software may be distributed under the terms of the BSD license. # See README for more details. import logging logger = logging.getLogger() import subprocess import time import hostapd import hwsim_utils from wpasupplicant import WpaSupplicant from utils import alloc_fail def test_wpas_ctrl_network(dev): """wpa_supplicant ctrl_iface network set/get""" id = dev[0].add_network() if "FAIL" not in dev[0].request("SET_NETWORK " + str(id)): raise Exception("Unexpected success for invalid SET_NETWORK") if "FAIL" not in dev[0].request("SET_NETWORK " + str(id) + " name"): raise Exception("Unexpected success for invalid SET_NETWORK") if "FAIL" not in dev[0].request("SET_NETWORK " + str(id + 1) + " proto OPEN"): raise Exception("Unexpected success for invalid network id") if "FAIL" not in dev[0].request("GET_NETWORK " + str(id)): raise Exception("Unexpected success for invalid GET_NETWORK") if "FAIL" not in dev[0].request("GET_NETWORK " + str(id + 1) + " proto"): raise Exception("Unexpected success for invalid network id") tests = (("key_mgmt", "WPA-PSK WPA-EAP IEEE8021X NONE WPA-NONE FT-PSK FT-EAP WPA-PSK-SHA256 WPA-EAP-SHA256"), ("pairwise", "CCMP-256 GCMP-256 CCMP GCMP TKIP"), ("group", "CCMP-256 GCMP-256 CCMP GCMP TKIP WEP104 WEP40"), ("auth_alg", "OPEN SHARED LEAP"), ("scan_freq", "1 2 3 4 5 6 7 8 9 10 11 12 13 14 15"), ("freq_list", "2412 2417"), ("scan_ssid", "1"), ("bssid", "00:11:22:33:44:55"), ("proto", "WPA RSN OSEN"), ("eap", "TLS"), ("go_p2p_dev_addr", "22:33:44:55:66:aa"), ("p2p_client_list", "22:33:44:55:66:bb 02:11:22:33:44:55")) dev[0].set_network_quoted(id, "ssid", "test") for field, value in tests: dev[0].set_network(id, field, value) res = dev[0].get_network(id, field) if res != value: raise Exception("Unexpected response for '" + field + "': '" + res + "'") q_tests = (("identity", "hello"), ("anonymous_identity", "<EMAIL>")) for field, value in q_tests: dev[0].set_network_quoted(id, field, value) res = dev[0].get_network(id, field) if res != '"' + value + '"': raise Exception("Unexpected quoted response for '" + field + "': '" + res + "'") get_tests = (("foo", None), ("ssid", '"test"')) for field, value in get_tests: res = dev[0].get_network(id, field) if res != value: raise Exception("Unexpected response for '" + field + "': '" + res + "'") if dev[0].get_network(id, "password"): raise Exception("Unexpected response for 'password'") dev[0].set_network_quoted(id, "password", "<PASSWORD>") if dev[0].get_network(id, "password") != '*': raise Exception("Unexpected response for 'password' (expected *)") dev[0].set_network(id, "password", "<PASSWORD>:<PASSWORD>") if dev[0].get_network(id, "password") != '*': raise Exception("Unexpected response for 'password' (expected *)") dev[0].set_network(id, "password", "<PASSWORD>") if dev[0].get_network(id, "password"): raise Exception("Unexpected response for 'password'") if "FAIL" not in dev[0].request("SET_NETWORK " + str(id) + " password hash:12"): raise Exception("Unexpected success for invalid password hash") if "FAIL" not in dev[0].request("SET_NETWORK " + str(id) + " password hash:<PASSWORD>"): raise Exception("Unexpected success for invalid password hash") dev[0].set_network(id, "identity", "414243") if dev[0].get_network(id, "identity") != '"ABC"': raise Exception("Unexpected identity hex->text response") dev[0].set_network(id, "identity", 'P"abc\ndef"') if dev[0].get_network(id, "identity") != "6162630a646566": raise Exception("Unexpected identity printf->hex response") if "FAIL" not in dev[0].request("SET_NETWORK " + str(id) + ' identity P"foo'): raise Exception("Unexpected success for invalid identity string") if "FAIL" not in dev[0].request("SET_NETWORK " + str(id) + ' identity 12x3'): raise Exception("Unexpected success for invalid identity string") for i in range(0, 4): if "FAIL" in dev[0].request("SET_NETWORK " + str(id) + ' wep_key' + str(i) + ' aabbccddee'): raise Exception("Unexpected wep_key set failure") if dev[0].get_network(id, "wep_key" + str(i)) != '*': raise Exception("Unexpected wep_key get failure") if "FAIL" in dev[0].request("SET_NETWORK " + str(id) + ' psk_list P2P-00:11:22:33:44:55-<KEY>'): raise Exception("Unexpected failure for psk_list string") if "FAIL" not in dev[0].request("SET_NETWORK " + str(id) + ' psk_list 00:11:x2:33:44:55-0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'): raise Exception("Unexpected success for invalid psk_list string") if "FAIL" not in dev[0].request("SET_NETWORK " + str(id) + ' psk_list P2P-00:11:x2:33:44:55-0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'): raise Exception("Unexpected success for invalid psk_list string") if "FAIL" not in dev[0].request("SET_NETWORK " + str(id) + ' psk_list P2P-00:11:22:33:44:55+0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'): raise Exception("Unexpected success for invalid psk_list string") if "FAIL" not in dev[0].request("SET_NETWORK " + str(id) + ' psk_list P2P-00:11:22:33:44:55-0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcde'): raise Exception("Unexpected success for invalid psk_list string") if "FAIL" not in dev[0].request("SET_NETWORK " + str(id) + ' psk_list P2P-00:11:22:33:44:55-<KEY>'): raise Exception("Unexpected success for invalid psk_list string") if dev[0].get_network(id, "psk_list"): raise Exception("Unexpected psk_list get response") if dev[0].list_networks()[0]['ssid'] != "test": raise Exception("Unexpected ssid in LIST_NETWORKS") dev[0].set_network(id, "ssid", "NULL") if dev[0].list_networks()[0]['ssid'] != "": raise Exception("Unexpected ssid in LIST_NETWORKS after clearing it") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' ssid "0123456789abcdef0123456789abcdef0"'): raise Exception("Too long SSID accepted") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' scan_ssid qwerty'): raise Exception("Invalid integer accepted") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' scan_ssid 2'): raise Exception("Too large integer accepted") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' psk 12345678'): raise Exception("Invalid PSK accepted") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' psk "1234567"'): raise Exception("Too short PSK accepted") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' psk "1234567890123456789012345678901234567890123456789012345678901234"'): raise Exception("Too long PSK accepted") dev[0].set_network_quoted(id, "psk", "123456768"); dev[0].set_network_quoted(id, "psk", "123456789012345678901234567890123456789012345678901234567890123"); if dev[0].get_network(id, "psk") != '*': raise Exception("Unexpected psk read result"); if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' eap UNKNOWN'): raise Exception("Unknown EAP method accepted") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' password "foo'): raise Exception("Invalid password accepted") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' wep_key0 "foo'): raise Exception("Invalid WEP key accepted") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' wep_key0 "12345678901234567"'): raise Exception("Too long WEP key accepted") # too short WEP key is ignored dev[0].set_network_quoted(id, "wep_key0", "1234") dev[0].set_network_quoted(id, "wep_key1", "12345") dev[0].set_network_quoted(id, "wep_key2", "1234567890123") dev[0].set_network_quoted(id, "wep_key3", "1234567890123456") dev[0].set_network(id, "go_p2p_dev_addr", "any") if dev[0].get_network(id, "go_p2p_dev_addr") is not None: raise Exception("Unexpected go_p2p_dev_addr value") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' go_p2p_dev_addr 00:11:22:33:44'): raise Exception("Invalid go_p2p_dev_addr accepted") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' p2p_client_list 00:11:22:33:44'): raise Exception("Invalid p2p_client_list accepted") if "FAIL" in dev[0].request('SET_NETWORK ' + str(id) + ' p2p_client_list 00:11:22:33:44:55 00:1'): raise Exception("p2p_client_list truncation workaround failed") if dev[0].get_network(id, "p2p_client_list") != "00:11:22:33:44:55": raise Exception("p2p_client_list truncation workaround did not work") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' auth_alg '): raise Exception("Empty auth_alg accepted") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' auth_alg FOO'): raise Exception("Invalid auth_alg accepted") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' proto '): raise Exception("Empty proto accepted") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' proto FOO'): raise Exception("Invalid proto accepted") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' pairwise '): raise Exception("Empty pairwise accepted") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' pairwise FOO'): raise Exception("Invalid pairwise accepted") if "FAIL" not in dev[0].request('SET_NETWORK ' + str(id) + ' pairwise WEP40'): raise Exception("Invalid pairwise accepted") if "OK" not in dev[0].request('BSSID ' + str(id) + ' 00:11:22:33:44:55'): raise Exception("Unexpected BSSID failure") if dev[0].request("GET_NETWORK 0 bssid") != '00:11:22:33:44:55': raise Exception("BSSID command did not set network bssid") if "OK" not in dev[0].request('BSSID ' + str(id) + ' 00:00:00:00:00:00'): raise Exception("Unexpected BSSID failure") if "FAIL" not in dev[0].request("GET_NETWORK 0 bssid"): raise Exception("bssid claimed configured after clearing") if "FAIL" not in dev[0].request('BSSID 123 00:11:22:33:44:55'): raise Exception("Unexpected BSSID success") if "FAIL" not in dev[0].request('BSSID ' + str(id) + ' 00:11:22:33:44'): raise Exception("Unexpected BSSID success") if "FAIL" not in dev[0].request('BSSID ' + str(id)): raise Exception("Unexpected BSSID success") tests = [ "02:11:22:33:44:55", "02:11:22:33:44:55 02:ae:be:ce:53:77", "02:11:22:33:44:55/ff:00:ff:00:ff:00", "02:11:22:33:44:55/ff:00:ff:00:ff:00 f2:99:88:77:66:55", "f2:99:88:77:66:55 02:11:22:33:44:55/ff:00:ff:00:ff:00", "f2:99:88:77:66:55 02:11:22:33:44:55/ff:00:ff:00:ff:00 12:34:56:78:90:ab", "02:11:22:33:44:55/ff:ff:ff:00:00:00 02:ae:be:ce:53:77/00:00:00:00:00:ff" ] for val in tests: dev[0].set_network(id, "bssid_blacklist", val) res = dev[0].get_network(id, "bssid_blacklist") if res != val: raise Exception("Unexpected bssid_blacklist value: %s != %s" % (res, val)) dev[0].set_network(id, "bssid_whitelist", val) res = dev[0].get_network(id, "bssid_whitelist") if res != val: raise Exception("Unexpected bssid_whitelist value: %s != %s" % (res, val)) tests = [ "foo", "00:11:22:33:44:5", "00:11:22:33:44:55q", "00:11:22:33:44:55/", "00:11:22:33:44:55/66:77:88:99:aa:b" ] for val in tests: if "FAIL" not in dev[0].request("SET_NETWORK %d bssid_blacklist %s" % (id, val)): raise Exception("Invalid bssid_blacklist value accepted") def test_wpas_ctrl_many_networks(dev, apdev): """wpa_supplicant ctrl_iface LIST_NETWORKS with huge number of networks""" for i in range(1000): id = dev[0].add_network() res = dev[0].request("LIST_NETWORKS") if str(id) in res: raise Exception("Last added network was unexpectedly included") res = dev[0].request("LIST_NETWORKS LAST_ID=%d" % (id - 2)) if str(id) not in res: raise Exception("Last added network was not present when using LAST_ID") # This command can take a very long time under valgrind testing on
import os import time import cStringIO import subprocess import signal import libqtile import libqtile.layout import libqtile.bar import libqtile.command import libqtile.widget import libqtile.manager import libqtile.config import libqtile.hook import libqtile.confreader import utils from utils import Xephyr from nose.tools import assert_raises from nose.plugins.attrib import attr class TestConfig: auto_fullscreen = True groups = [ libqtile.config.Group("a"), libqtile.config.Group("b"), libqtile.config.Group("c"), libqtile.config.Group("d") ] layouts = [ libqtile.layout.stack.Stack(num_stacks=1), libqtile.layout.stack.Stack(num_stacks=2), libqtile.layout.max.Max() ] floating_layout = libqtile.layout.floating.Floating( float_rules=[dict(wmclass="xclock")]) keys = [ libqtile.config.Key( ["control"], "k", libqtile.command._Call([("layout", None)], "up") ), libqtile.config.Key( ["control"], "j", libqtile.command._Call([("layout", None)], "down") ), ] mouse = [] screens = [libqtile.config.Screen( bottom=libqtile.bar.Bar( [ libqtile.widget.GroupBox(), ], 20 ), )] main = None follow_mouse_focus = True class BareConfig: auto_fullscreen = True groups = [ libqtile.config.Group("a"), libqtile.config.Group("b"), libqtile.config.Group("c"), libqtile.config.Group("d") ] layouts = [ libqtile.layout.stack.Stack(num_stacks=1), libqtile.layout.stack.Stack(num_stacks=2) ] floating_layout = libqtile.layout.floating.Floating() keys = [ libqtile.config.Key( ["control"], "k", libqtile.command._Call([("layout", None)], "up") ), libqtile.config.Key( ["control"], "j", libqtile.command._Call([("layout", None)], "down") ), ] mouse = [] screens = [libqtile.config.Screen()] main = None follow_mouse_focus = False @Xephyr(True, TestConfig()) def test_screen_dim(self): #self.c.restart() self.testXclock() assert self.c.screen.info()["index"] == 0 assert self.c.screen.info()["x"] == 0 assert self.c.screen.info()["width"] == 800 assert self.c.group.info()["name"] == 'a' assert self.c.group.info()["focus"] == 'xclock' self.c.to_screen(1) self.testXeyes() assert self.c.screen.info()["index"] == 1 assert self.c.screen.info()["x"] == 800 assert self.c.screen.info()["width"] == 640 assert self.c.group.info()["name"] == 'b' assert self.c.group.info()["focus"] == 'xeyes' self.c.to_screen(0) assert self.c.screen.info()["index"] == 0 assert self.c.screen.info()["x"] == 0 assert self.c.screen.info()["width"] == 800 assert self.c.group.info()["name"] == 'a' assert self.c.group.info()["focus"] == 'xclock' @Xephyr(True, TestConfig(), xoffset=0) def test_clone_dim(self): self.testXclock() assert self.c.screen.info()["index"] == 0 assert self.c.screen.info()["x"] == 0 assert self.c.screen.info()["width"] == 800 assert self.c.group.info()["name"] == 'a' assert self.c.group.info()["focus"] == 'xclock' assert len(self.c.screens()) == 1 @Xephyr(True, TestConfig()) def test_to_screen(self): assert self.c.screen.info()["index"] == 0 self.c.to_screen(1) assert self.c.screen.info()["index"] == 1 self.testWindow("one") self.c.to_screen(0) self.testWindow("two") ga = self.c.groups()["a"] assert ga["windows"] == ["two"] gb = self.c.groups()["b"] assert gb["windows"] == ["one"] assert self.c.window.info()["name"] == "two" self.c.to_next_screen() assert self.c.window.info()["name"] == "one" self.c.to_next_screen() assert self.c.window.info()["name"] == "two" self.c.to_prev_screen() assert self.c.window.info()["name"] == "one" @Xephyr(True, TestConfig()) def test_togroup(self): self.testWindow("one") assert_raises(libqtile.command.CommandError, self.c.window.togroup, "nonexistent") assert self.c.groups()["a"]["focus"] == "one" self.c.window.togroup("a") assert self.c.groups()["a"]["focus"] == "one" self.c.window.togroup("b") assert self.c.groups()["b"]["focus"] == "one" assert self.c.groups()["a"]["focus"] == None self.c.to_screen(1) self.c.window.togroup("c") assert self.c.groups()["c"]["focus"] == "one" @Xephyr(True, TestConfig()) def test_resize(self): self.c.screen[0].resize(x=10, y=10, w=100, h=100) d = self.c.screen[0].info() assert d["width"] == d["height"] == 100 assert d["x"] == d["y"] == 10 @Xephyr(False, BareConfig()) def test_minimal(self): assert self.c.status() == "OK" @Xephyr(False, TestConfig()) def test_events(self): assert self.c.status() == "OK" # FIXME: failing test disabled. For some reason we don't seem # to have a keymap in Xnest or Xephyr 99% of the time. @Xephyr(False, TestConfig()) def test_keypress(self): self.testWindow("one") self.testWindow("two") v = self.c.simulate_keypress(["unknown"], "j") assert v.startswith("Unknown modifier") assert self.c.groups()["a"]["focus"] == "two" self.c.simulate_keypress(["control"], "j") assert self.c.groups()["a"]["focus"] == "one" @Xephyr(False, TestConfig()) def test_spawn(self): # Spawn something with a pid greater than init's assert int(self.c.spawn("true")) > 1 @Xephyr(False, TestConfig()) def test_kill(self): self.testWindow("one") self.testwindows = [] self.c.window[self.c.window.info()["id"]].kill() self.c.sync() for i in range(20): if len(self.c.windows()) == 0: break time.sleep(0.1) else: raise AssertionError("Window did not die...") @Xephyr(False, TestConfig()) def test_regression_groupswitch(self): self.c.group["c"].toscreen() self.c.group["d"].toscreen() assert self.c.groups()["c"]["screen"] == None @Xephyr(False, TestConfig()) def test_nextlayout(self): self.testWindow("one") self.testWindow("two") assert len(self.c.layout.info()["stacks"]) == 1 self.c.nextlayout() assert len(self.c.layout.info()["stacks"]) == 2 self.c.nextlayout() self.c.nextlayout() assert len(self.c.layout.info()["stacks"]) == 1 @Xephyr(False, TestConfig()) def test_setlayout(self): assert not self.c.layout.info()["name"] == "max" self.c.group.setlayout("max") assert self.c.layout.info()["name"] == "max" @Xephyr(False, TestConfig()) def test_adddelgroup(self): self.testWindow("one") self.c.addgroup("dummygroup") self.c.addgroup("testgroup") assert "testgroup" in self.c.groups().keys() self.c.window.togroup("testgroup") self.c.delgroup("testgroup") assert not "testgroup" in self.c.groups().keys() # Assert that the test window is still a member of some group. assert sum([len(i["windows"]) for i in self.c.groups().values()]) for i in self.c.groups().keys()[:len(self.c.groups())-1]: self.c.delgroup(i) assert_raises(libqtile.command.CommandException, self.c.delgroup, self.c.groups().keys()[0]) @Xephyr(False, TestConfig()) def test_nextprevgroup(self): start = self.c.group.info()["name"] ret = self.c.screen.nextgroup() assert self.c.group.info()["name"] != start assert self.c.group.info()["name"] == ret ret = self.c.screen.prevgroup() assert self.c.group.info()["name"] == start @Xephyr(False, TestConfig()) def test_togglegroup(self): self.c.group["a"].toscreen() self.c.group["b"].toscreen() self.c.screen.togglegroup("c") assert self.c.group.info()["name"] == "c" self.c.screen.togglegroup("c") assert self.c.group.info()["name"] == "b" self.c.screen.togglegroup() assert self.c.group.info()["name"] == "c" @Xephyr(False, TestConfig()) def test_inspect_xeyes(self): self.testXeyes() assert self.c.window.inspect() @Xephyr(False, TestConfig()) def test_inspect_xterm(self): self.testXterm() assert self.c.window.inspect()["wm_class"] @Xephyr(False, TestConfig()) def test_static(self): self.testXeyes() self.testWindow("one") self.c.window[self.c.window.info()["id"]].static(0, 0, 0, 100, 100) @Xephyr(False, TestConfig()) def test_match(self): self.testXeyes() assert self.c.window.match(wname="xeyes") assert not self.c.window.match(wname="nonexistent") @Xephyr(False, TestConfig()) def test_default_float(self): # change to 2 col stack self.c.nextlayout() assert len(self.c.layout.info()["stacks"]) == 2 self.testXclock() assert self.c.group.info()['focus'] == 'xclock' assert self.c.window.info()['width'] == 164 assert self.c.window.info()['height'] == 164 assert self.c.window.info()['x'] == 0 assert self.c.window.info()['y'] == 0 assert self.c.window.info()['floating'] == True self.c.window.move_floating(10, 20, 42, 42) assert self.c.window.info()['width'] == 164 assert self.c.window.info()['height'] == 164 assert self.c.window.info()['x'] == 10 assert self.c.window.info()['y'] == 20 assert self.c.window.info()['floating'] == True @Xephyr(False, TestConfig()) def test_last_float_size(self): """ When you re-float something it would be preferable to have it use the previous float size """ self.testXeyes() assert self.c.window.info()['name'] == 'xeyes' assert self.c.window.info()['width'] == 798 assert self.c.window.info()['height'] == 578 self.c.window.toggle_floating() assert self.c.window.info()['width'] == 150 assert self.c.window.info()['height'] == 100 # resize self.c.window.set_size_floating(50, 90, 42, 42) assert self.c.window.info()['width'] == 50 assert self.c.window.info()['height'] == 90 self.c.window.toggle_floating() assert self.c.window.info()['width'] == 798 assert self.c.window.info()['height'] == 578 # float again, should use last float size self.c.window.toggle_floating() assert self.c.window.info()['width'] == 50 assert self.c.window.info()['height'] == 90 # make sure it works through min and max self.c.window.toggle_maximize() self.c.window.toggle_minimize() self.c.window.toggle_minimize() self.c.window.toggle_floating() assert self.c.window.info()['width'] == 50 assert self.c.window.info()['height'] == 90 @Xephyr(False, TestConfig()) def test_float_max_min_combo(self): # change to 2 col stack self.c.nextlayout() assert len(self.c.layout.info()["stacks"]) == 2 self.testXterm() self.testXeyes() assert self.c.group.info()['focus'] == 'xeyes' assert self.c.window.info()['width'] == 398 assert self.c.window.info()['height'] == 578 assert self.c.window.info()['x'] == 400 assert self.c.window.info()['y'] == 0 assert self.c.window.info()['floating'] == False self.c.window.toggle_maximize() assert self.c.window.info()['floating'] == True assert self.c.window.info()['maximized'] == True assert self.c.window.info()['width'] == 800 assert self.c.window.info()['height'] == 580 assert self.c.window.info()['x'] == 0 assert self.c.window.info()['y'] == 0 self.c.window.toggle_minimize() assert self.c.group.info()['focus'] == 'xeyes' assert self.c.window.info()['floating'] == True assert self.c.window.info()['minimized'] == True assert self.c.window.info()['width'] == 800 assert self.c.window.info()['height'] == 580 assert self.c.window.info()['x'] == 0 assert self.c.window.info()['y'] == 0 self.c.window.toggle_floating() assert self.c.group.info()['focus'] == 'xeyes' assert self.c.window.info()['floating'] == False assert self.c.window.info()['minimized'] == False assert self.c.window.info()['maximized'] == False assert self.c.window.info()['width'] == 398 assert self.c.window.info()['height'] == 578 assert self.c.window.info()['x'] == 400 assert self.c.window.info()['y'] == 0 @Xephyr(False, TestConfig()) def test_toggle_fullscreen(self): # change to 2 col stack self.c.nextlayout() assert len(self.c.layout.info()["stacks"]) == 2 self.testXterm() self.testXeyes() assert self.c.group.info()['focus'] == 'xeyes' assert self.c.window.info()['width'] == 398 assert self.c.window.info()['height'] == 578 assert self.c.window.info()['float_info'] == { 'y': 0, 'x': 400, 'w': 150, 'h': 100} assert self.c.window.info()['x'] == 400 assert self.c.window.info()['y'] == 0 self.c.window.toggle_fullscreen() assert self.c.window.info()['floating'] == True assert self.c.window.info()['maximized'] == False assert self.c.window.info()['fullscreen'] == True assert self.c.window.info()['width'] == 800 assert self.c.window.info()['height'] == 600 assert self.c.window.info()['x'] == 0 assert self.c.window.info()['y'] == 0 self.c.window.toggle_fullscreen() assert self.c.window.info()['floating'] == False assert self.c.window.info()['maximized'] == False assert self.c.window.info()['fullscreen'] == False assert self.c.window.info()['width'] == 398 assert self.c.window.info()['height'] == 578 assert self.c.window.info()['x'] == 400 assert self.c.window.info()['y'] == 0 @Xephyr(False, TestConfig()) def test_toggle_max(self): # change to 2 col stack self.c.nextlayout() assert len(self.c.layout.info()["stacks"]) == 2 self.testXterm() self.testXeyes() assert self.c.group.info()['focus'] == 'xeyes' assert self.c.window.info()['width'] == 398 assert self.c.window.info()['height'] == 578 assert self.c.window.info()['float_info'] == { 'y': 0, 'x': 400, 'w': 150, 'h': 100} assert self.c.window.info()['x'] == 400 assert self.c.window.info()['y'] == 0 self.c.window.toggle_maximize() assert self.c.window.info()['floating'] == True assert self.c.window.info()['maximized'] == True assert self.c.window.info()['width'] == 800 assert self.c.window.info()['height'] == 580 assert self.c.window.info()['x'] == 0 assert self.c.window.info()['y'] == 0 self.c.window.toggle_maximize() assert self.c.window.info()['floating'] == False assert self.c.window.info()['maximized'] == False assert self.c.window.info()['width'] == 398 assert self.c.window.info()['height'] == 578 assert self.c.window.info()['x'] == 400 assert self.c.window.info()['y'] == 0 @Xephyr(False, TestConfig()) def test_toggle_min(self): # change to 2 col stack self.c.nextlayout() assert len(self.c.layout.info()["stacks"]) == 2 self.testXterm() self.testXeyes() assert self.c.group.info()['focus'] == 'xeyes' assert self.c.window.info()['width'] == 398 assert self.c.window.info()['height'] == 578 assert self.c.window.info()['float_info'] == { 'y': 0, 'x': 400, 'w': 150, 'h': 100} assert self.c.window.info()['x'] == 400 assert self.c.window.info()['y'] == 0 self.c.window.toggle_minimize() assert self.c.group.info()['focus'] == 'xeyes' assert self.c.window.info()['floating'] == True assert self.c.window.info()['minimized'] == True assert self.c.window.info()['width'] == 398 assert self.c.window.info()['height'] == 578 assert self.c.window.info()['x'] == 400 assert self.c.window.info()['y'] == 0 self.c.window.toggle_minimize() assert self.c.group.info()['focus'] == 'xeyes' assert self.c.window.info()['floating'] == False assert self.c.window.info()['minimized'] == False assert self.c.window.info()['width'] == 398 assert self.c.window.info()['height'] == 578 assert self.c.window.info()['x'] == 400 assert self.c.window.info()['y'] == 0 @Xephyr(False, TestConfig()) def test_toggle_floating(self): self.testXeyes() assert self.c.window.info()['floating'] == False self.c.window.toggle_floating() assert self.c.window.info()['floating'] == True self.c.window.toggle_floating() assert self.c.window.info()['floating'] == False self.c.window.toggle_floating() assert self.c.window.info()['floating'] == True #change layout (should still be floating) self.c.nextlayout() assert self.c.window.info()['floating'] == True @Xephyr(False, TestConfig()) def test_floating_focus(self): # change to 2 col stack self.c.nextlayout() assert len(self.c.layout.info()["stacks"]) == 2 self.testXterm() self.testXeyes() #self.testWindow("one") assert self.c.window.info()['width'] == 398 assert self.c.window.info()['height'] == 578 self.c.window.toggle_floating() self.c.window.move_floating(10, 20, 42, 42) assert self.c.window.info()['name'] == 'xeyes' assert self.c.group.info()['focus'] == 'xeyes' # check what stack thinks is focus assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0] # change focus to xterm self.c.group.next_window() assert self.c.window.info()['width'] == 398 assert self.c.window.info()['height'] == 578 assert self.c.window.info()['name'] != 'xeyes' assert self.c.group.info()['focus'] != 'xeyes'
is None: r = ( radius + max(old_distance, self.distance) * (self.n - 1) * 0.5 + max(old_w, self.w) ) number_of_points = max( 6, 2 + 2 * int( 0.5 * abs(final_angle - initial_angle) / numpy.arccos(1 - tolerance / r) + 0.5 ), ) pieces = ( 1 if max_points == 0 else int(numpy.ceil(number_of_points / float(max_points))) ) number_of_points = number_of_points // pieces widths = numpy.linspace(old_w, self.w, pieces + 1) distances = numpy.linspace(old_distance, self.distance, pieces + 1) angles = numpy.linspace(initial_angle, final_angle, pieces + 1) if (self.w != 0) or (old_w != 0): for jj in range(pieces): for ii in range(self.n): self.polygons.append(numpy.zeros((number_of_points, 2))) r0 = ( radius + ii * distances[jj + 1] - (self.n - 1) * distances[jj + 1] * 0.5 ) old_r0 = ( radius + ii * distances[jj] - (self.n - 1) * distances[jj] * 0.5 ) pts2 = number_of_points // 2 pts1 = number_of_points - pts2 ang = numpy.linspace(angles[jj], angles[jj + 1], pts1) rad = numpy.linspace(old_r0 + widths[jj], r0 + widths[jj + 1], pts1) self.polygons[-1][:pts1, 0] = numpy.cos(ang) * rad + cx self.polygons[-1][:pts1, 1] = numpy.sin(ang) * rad + cy if widths[jj + 1] == 0: pts1 -= 1 pts2 += 1 if widths[jj] == 0: self.polygons[-1][: pts1 - 1] = numpy.array( self.polygons[-1][1:pts1] ) pts1 -= 1 pts2 += 1 ang = numpy.linspace(angles[jj + 1], angles[jj], pts2) rad = numpy.linspace(r0 - widths[jj + 1], old_r0 - widths[jj], pts2) if rad[0] <= 0 or rad[-1] <= 0: warnings.warn( "[GDSPY] Path arc with width larger than radius created: possible self-intersecting polygon.", stacklevel=2, ) self.polygons[-1][pts1:, 0] = numpy.cos(ang) * rad + cx self.polygons[-1][pts1:, 1] = numpy.sin(ang) * rad + cy self.length += abs((angles[jj + 1] - angles[jj]) * radius) if isinstance(layer, list): self.layers.extend((layer * (self.n // len(layer) + 1))[: self.n]) else: self.layers.extend(layer for _ in range(self.n)) if isinstance(datatype, list): self.datatypes.extend( (datatype * (self.n // len(datatype) + 1))[: self.n] ) else: self.datatypes.extend(datatype for _ in range(self.n)) return self def turn( self, radius, angle, tolerance=0.01, number_of_points=None, max_points=199, final_width=None, final_distance=None, layer=0, datatype=0, ): """ Add a curved section to the path. Parameters ---------- radius : number Central radius of the section. angle : 'r', 'l', 'rr', 'll' or number Angle (in *radians*) of rotation of the path. The values 'r' and 'l' represent 90-degree turns cw and ccw, respectively; the values 'rr' and 'll' represent analogous 180-degree turns. tolerance : float Approximate curvature resolution. The number of points is automatically calculated. number_of_points : integer or None Manually define the number of vertices that form the object (polygonal approximation). Overrides `tolerance`. max_points : integer If the number of points in the element is greater than `max_points`, it will be fractured in smaller polygons with at most `max_points` each. If `max_points` is zero no fracture will occur. final_width : number If set, the paths of this segment will have their widths linearly changed from their current value to this one. final_distance : number If set, the distance between paths is linearly change from its current value to this one along this segment. layer : integer, list The GDSII layer numbers for the elements of each path. If the number of layers in the list is less than the number of paths, the list is repeated. datatype : integer, list The GDSII datatype for the elements of each path (between 0 and 255). If the number of datatypes in the list is less than the number of paths, the list is repeated. Returns ------- out : `Path` This object. Notes ----- The original GDSII specification supports only a maximum of 199 vertices per polygon. """ exact = True if angle == "r": delta_i = _halfpi delta_f = 0 elif angle == "rr": delta_i = _halfpi delta_f = -delta_i elif angle == "l": delta_i = -_halfpi delta_f = 0 elif angle == "ll": delta_i = -_halfpi delta_f = -delta_i elif angle < 0: exact = False delta_i = _halfpi delta_f = delta_i + angle else: exact = False delta_i = -_halfpi delta_f = delta_i + angle if self.direction == "+x": self.direction = 0 elif self.direction == "-x": self.direction = numpy.pi elif self.direction == "+y": self.direction = _halfpi elif self.direction == "-y": self.direction = -_halfpi elif exact: exact = False self.arc( radius, self.direction + delta_i, self.direction + delta_f, tolerance, number_of_points, max_points, final_width, final_distance, layer, datatype, ) if exact: self.direction = _directions_list[int(round(self.direction / _halfpi)) % 4] return self def parametric( self, curve_function, curve_derivative=None, tolerance=0.01, number_of_evaluations=5, max_points=199, final_width=None, final_distance=None, relative=True, layer=0, datatype=0, ): """ Add a parametric curve to the path. `curve_function` will be evaluated uniformly in the interval [0, 1] at least `number_of_points` times. More points will be added to the curve at the midpoint between evaluations if that points presents error larger than `tolerance`. Parameters ---------- curve_function : callable Function that defines the curve. Must be a function of one argument (that varies from 0 to 1) that returns a 2-element array with the coordinates of the curve. curve_derivative : callable If set, it should be the derivative of the curve function. Must be a function of one argument (that varies from 0 to 1) that returns a 2-element array. If None, the derivative will be calculated numerically. tolerance : number Acceptable tolerance for the approximation of the curve function by a finite number of evaluations. number_of_evaluations : integer Initial number of points where the curve function will be evaluated. According to `tolerance`, more evaluations will be performed. max_points : integer Elements will be fractured until each polygon has at most `max_points`. If `max_points` is less than 4, no fracture will occur. final_width : number or function If set to a number, the paths of this segment will have their widths linearly changed from their current value to this one. If set to a function, it must be a function of one argument (that varies from 0 to 1) and returns the width of the path. final_distance : number or function If set to a number, the distance between paths is linearly change from its current value to this one. If set to a function, it must be a function of one argument (that varies from 0 to 1) and returns the width of the path. relative : bool If True, the return values of `curve_function` are used as offsets from the current path position, i.e., to ensure a continuous path, ``curve_function(0)`` must be (0, 0). Otherwise, they are used as absolute coordinates. layer : integer, list The GDSII layer numbers for the elements of each path. If the number of layers in the list is less than the number of paths, the list is repeated. datatype : integer, list The GDSII datatype for the elements of each path (between 0 and 255). If the number of datatypes in the list is less than the number of paths, the list is repeated. Returns ------- out : `Path` This object. Notes ----- The norm of the vector returned by `curve_derivative` is not important. Only the direction is used. The original GDSII specification supports only a maximum of 199 vertices per polygon. Examples -------- >>> def my_parametric_curve(t): ... return (2**t, t**2) >>> def my_parametric_curve_derivative(t): ... return (0.69315 * 2**t, 2 * t) >>> my_path.parametric(my_parametric_curve, ... my_parametric_curve_derivative) """ err = tolerance ** 2 points = list(numpy.linspace(0, 1, number_of_evaluations)) values = [numpy.array(curve_function(u)) for u in points] delta = points[1] i = 1 while i < len(points): midpoint = 0.5 * (points[i] + points[i - 1]) midvalue = numpy.array(curve_function(midpoint)) test_err = (values[i] + values[i - 1]) / 2 - midvalue if test_err[0] ** 2 + test_err[1] ** 2 > err: delta = min(delta, points[i] - midpoint) points.insert(i, midpoint) values.insert(i, midvalue) else: i += 1 points = numpy.array(points) values = numpy.array(values) dvs = values[1:] - values[:-1] self.length += ((dvs[:, 0] ** 2 + dvs[:, 1]
</span><span style=\" font-style:italic; color:#ff0000;\">φ</span><span style=\" color:#ff0000;\"> are in rad; </span><span style=\" font-style:italic; color:#ff0000;\">t</span><span style=\" color:#ff0000;\"> is in seconds</span></p></body></html>")) self.ledt_phaseBMag.setToolTip(_translate("MainWindow", "The magnitude of Phase-B input")) self.lbl_phaseAOmega.setText(_translate("MainWindow", "<html><head/><body><p>ω<span style=\" vertical-align:sub;\">a</span><span style=\" font-style:normal;\"> :</span></p></body></html>")) self.ledt_phaseCDC.setToolTip(_translate("MainWindow", "The initial phase of Phase-C input")) self.lbl_phaseADC.setText(_translate("MainWindow", "<html><head/><body><p>DC<span style=\" vertical-align:sub;\">a</span><span style=\" font-style:normal;\"> :</span></p></body></html>")) self.ledt_phaseBDC.setToolTip(_translate("MainWindow", "The initial phase of Phase-B input")) self.ledt_phaseADC.setToolTip(_translate("MainWindow", "The initial phase of Phase-A input")) self.lbl_phaseBDC.setText(_translate("MainWindow", "<html><head/><body><p>DC<span style=\" vertical-align:sub;\">b</span><span style=\" font-style:normal;\"> :</span></p></body></html>")) self.lbl_pllOmega.setText(_translate("MainWindow", "<html><head/><body><p>ω<span style=\" vertical-align:sub;\">PLL</span><span style=\" font-style:normal;\"> :</span></p></body></html>")) self.ledt_pllOmega.setToolTip(_translate("MainWindow", "The angular frequency of the PLL")) self.ledt_phaseCOmega.setToolTip(_translate("MainWindow", "The angular frequency of Phase-C input")) self.lbl_phaseCOmega.setText(_translate("MainWindow", "<html><head/><body><p>ω<span style=\" vertical-align:sub;\">c</span><span style=\" font-style:normal;\"> :</span></p></body></html>")) self.ledt_phaseCPhi.setToolTip(_translate("MainWindow", "The initial phase of Phase-C input")) self.lbl_phaseCPhi.setText(_translate("MainWindow", "<html><head/><body><p>&phi;<span style=\" vertical-align:sub;\">c</span><span style=\" font-style:normal;\"> :</span></p></body></html>")) self.ledt_pllPhi.setToolTip(_translate("MainWindow", "The initial phase of the PLL")) self.ledt_time.setToolTip(_translate("MainWindow", "The total time")) self.ledt_phaseBOmega.setToolTip(_translate("MainWindow", "The angular frequency of Phase-B input")) self.lbl_phaseBPhi.setText(_translate("MainWindow", "<html><head/><body><p>&phi;<span style=\" vertical-align:sub;\">b</span><span style=\" font-style:normal;\"> :</span></p></body></html>")) self.lbl_phaseBOmega.setText(_translate("MainWindow", "<html><head/><body><p>ω<span style=\" vertical-align:sub;\">b</span><span style=\" font-style:normal;\"> :</span></p></body></html>")) self.ledt_phaseAOmega.setToolTip(_translate("MainWindow", "The angular frequency of Phase-A input")) self.lbl_phaseBMag.setText(_translate("MainWindow", "<html><head/><body><p>Mag<span style=\" vertical-align:sub;\">b</span><span style=\" font-style:normal;\"> : </span></p></body></html>")) self.lbl_phaseAPhi.setText(_translate("MainWindow", "<html><head/><body><p>&phi;<span style=\" vertical-align:sub;\">a</span><span style=\" font-style:normal;\"> :</span></p></body></html>")) self.lbl_phaseAMag.setText(_translate("MainWindow", "<html><head/><body><p>Mag<span style=\" vertical-align:sub;\">a</span><span style=\" font-style:normal;\"> : </span></p></body></html>")) self.lbl_pllPhase.setText(_translate("MainWindow", "<html><head/><body><p>&phi;<span style=\" vertical-align:sub;\">PLL</span><span style=\" font-style:normal;\"> :</span></p></body></html>")) self.lbl_time.setText(_translate("MainWindow", "<html><head/><body><p>t<span style=\" font-style:normal;\"> : </span></p></body></html>")) self.ledt_phaseAMag.setToolTip(_translate("MainWindow", "The magnitude of Phase-A input")) self.ledt_phaseCMag.setToolTip(_translate("MainWindow", "The magnitude of Phase-C input")) self.ledt_phaseAPhi.setToolTip(_translate("MainWindow", "The initial phase of Phase-A input")) self.lbl_phaseCMag.setText(_translate("MainWindow", "<html><head/><body><p>Mag<span style=\" vertical-align:sub;\">c</span><span style=\" font-style:normal;\"> : </span></p></body></html>")) self.ledt_phaseBPhi.setToolTip(_translate("MainWindow", "The initial phase of Phase-B input")) self.lbl_phaseCDC.setText(_translate("MainWindow", "<html><head/><body><p>DC<span style=\" vertical-align:sub;\">c</span><span style=\" font-style:normal;\"> :</span></p></body></html>")) self.btn_saveData.setText(_translate("MainWindow", "Save Data")) self.btn_saveFigures.setText(_translate("MainWindow", "Save Figures")) self.btn_update.setText(_translate("MainWindow", "Update")) self.lbl_copyright.setText(_translate("MainWindow", "© Dr.GAO, Siyu; 2017 - 2018\n" "<EMAIL>")) self.lbl_info.setText(_translate("MainWindow", "This is info")) self.menu_file.setTitle(_translate("MainWindow", "File")) self.menu_help.setTitle(_translate("MainWindow", "Help")) self.file_saveData.setText(_translate("MainWindow", "Save Data")) self.file_saveData.setShortcut(_translate("MainWindow", "Ctrl+Shift+S")) self.help_Documentation.setText(_translate("MainWindow", "Documentation")) self.help_About.setText(_translate("MainWindow", "About")) self.file_saveSetting.setText(_translate("MainWindow", "Save Setting")) self.file_Exit.setText(_translate("MainWindow", "Exit")) self.file_saveFigures.setText(_translate("MainWindow", "Save Figures")) self.file_saveFigures.setShortcut(_translate("MainWindow", "Ctrl+S")) self.file_Update.setText(_translate("MainWindow", "Update")) self.file_Update.setShortcut(_translate("MainWindow", "Ctrl+U")) def print_info(self, str_info): """ .. _print_info : This method displays the given string directly on the GUI. Parameters ---------- self str_info : str The message to be displayed. Returns ------- None Examples -------- .. code :: python self.print_info('All updated') """ self.lbl_info.setText(str_info) def updateAll(self): """ .. _updateAll : This method updates the calculated data first and then update the plots. User-defined inputs are then saved. Parameters ---------- self Returns ------- None Examples -------- .. code :: python self.btn_update.clicked.connect(self.updateAll) """ self.print_info('Updating all...') # if data update successful, then update the plots and save user inputs if self.update_data() == True: self.update_plots() self.save_setting() self.print_info('All updated') # if data update unsuccessful, prompt error message else: root = tk.Tk() root.withdraw() msgbox.showerror('Error', 'Failed to update data. Time cannot be zero.') root.destroy() self.print_info('Data update failed') def update_data(self): ''' .. _update_data : This method updates the calculated data according to the user inputs. Parameters ---------- self Returns ------- bool Examples -------- .. code :: python if self.update_data() == True: self.update_plots() self.save_setting() self.print_info('All updated') ''' print(gsyIO.date_time_now() + 'Updating') self.print_info('Updating data...') # temp list for printing to console-----------------------------------# list_temp = [] # convert user inputs to numerics-------------------------------------# self.phaseAMag = self.to_numeric(self.ledt_phaseAMag.text()) self.phaseAOmega = self.to_numeric(self.ledt_phaseAOmega.text()) self.phaseAPhi = self.to_numeric(self.ledt_phaseAPhi.text()) self.phaseADC = self.to_numeric(self.ledt_phaseADC.text()) list_temp.append(['Phase-A Mag = ', self.phaseAMag]) list_temp.append(['Phase-A Omega = ', self.phaseAOmega]) list_temp.append(['Phase-A Phi = ', self.phaseAPhi]) list_temp.append(['Phase-A DC = ', self.phaseADC]) self.phaseBMag = self.to_numeric(self.ledt_phaseBMag.text()) self.phaseBOmega = self.to_numeric(self.ledt_phaseBOmega.text()) self.phaseBPhi = self.to_numeric(self.ledt_phaseBPhi.text()) self.phaseBDC = self.to_numeric(self.ledt_phaseBDC.text()) list_temp.append(['Phase-B Mag = ', self.phaseBMag]) list_temp.append(['Phase-B Omega = ', self.phaseBOmega]) list_temp.append(['Phase-B Phi = ', self.phaseBPhi]) list_temp.append(['Phase-B DC = ', self.phaseBDC]) self.phaseCMag = self.to_numeric(self.ledt_phaseCMag.text()) self.phaseCOmega = self.to_numeric(self.ledt_phaseCOmega.text()) self.phaseCPhi = self.to_numeric(self.ledt_phaseCPhi.text()) self.phaseCDC = self.to_numeric(self.ledt_phaseCDC.text()) list_temp.append(['Phase-C Mag = ', self.phaseCMag]) list_temp.append(['Phase-C Omega = ', self.phaseCOmega]) list_temp.append(['Phase-C Phi = ', self.phaseCPhi]) list_temp.append(['Phase-C DC = ', self.phaseCDC]) self.pllOmega = self.to_numeric(self.ledt_pllOmega.text()) self.pllPhi = self.to_numeric(self.ledt_pllPhi.text()) list_temp.append(['PLL Omega = ', self.pllOmega]) list_temp.append(['PLL Phi = ', self.pllPhi]) self.timeEnd = self.to_numeric(self.ledt_time.text()) self.timeEnd = abs(self.timeEnd) # time cannot be negative list_temp.append(['Time = ', self.timeEnd]) # print to console----------------------------------------------------# for item in list_temp: print(gsyIO.date_time_now() + str(item[0]) +str(item[1])) # time cannot be zero-------------------------------------------------# if self.timeEnd == 0: root = tk.Tk() root.withdraw() msgbox.showerror('Error', 'Error when making phase data. Time cannot be zero.') root.destroy() return False # make three-phase data-----------------------------------------------# self.phaseAdata, self.time_samples = self.make_phase(self.phaseAMag, self.phaseAOmega, self.phaseAPhi, self.phaseADC) self.phaseBdata, _ = self.make_phase(self.phaseBMag, self.phaseBOmega, self.phaseBPhi, self.phaseBDC) # print(self.phaseBdata) self.phaseCdata, _ = self.make_phase(self.phaseCMag, self.phaseCOmega, self.phaseCPhi, self.phaseCDC) # calculations for Fortescue, Clarke and Park-------------------------# # Fortescue (self.phaseA_pos, self.phaseB_pos, self.phaseC_pos, self.phaseA_neg, self.phaseB_neg, self.phaseC_neg, self.phaseZero) = trf.cal_symm(self.phaseAdata, self.phaseBdata, self.phaseCdata) # Clarke self.alpha, self.beta, _ = trf.cal_clarke(self.phaseAdata, self.phaseBdata, self.phaseCdata) # Clarke symmetrical components (DSOGI) (self.alpha_pos, self.beta_pos, self.alpha_neg, self.beta_neg, _) = trf.cal_clarke_dsogi(self.phaseAdata, self.phaseBdata, self.phaseCdata) # Park self.thetaPLL = self.pllOmega * self.time_samples + self.pllPhi self.d, self.q, _ = trf.cal_park(self.thetaPLL, self.alpha, self.beta) # Park symmetrical components self.d_pos, self.q_pos, _ = trf.cal_park(self.thetaPLL, self.alpha_pos, self.beta_pos) self.d_neg, self.q_neg, _ = trf.cal_park(self.thetaPLL, self.alpha_neg, self.beta_neg) # set axes limits-----------------------------------------------------# self.xlim_max = self.timeEnd self.xlim_min = 0 # print('before limits') self.ylim_max = max(max(abs(self.phaseAdata)), max(abs(self.phaseBdata)), max(abs(self.phaseCdata)), max(abs(self.alpha)), max(abs(self.beta)), max(abs(self.d)), max(abs(self.q)), max(abs(self.phaseZero))) self.ylim_max *= 1.08 self.ylim_min = -1 * self.ylim_max self.print_info('Data updated') return True def update_plots(self): ''' .. _update_plots : This method updates the plots. Parameters ---------- self Returns ------- None Examples -------- .. code :: python if self.update_data() == True: self.update_plots() self.save_setting() self.print_info('All updated') ''' plt.close('all') self.print_info('Updating plots...') # function call, plot the ploar-domain self.fig_polar_plts = gsyPlt.pltPolarDom(self.ylim_max, self.phaseAdata, self.phaseBdata, self.phaseCdata, self.phaseA_pos, self.phaseB_pos, self.phaseC_pos, self.phaseA_neg, self.phaseB_neg, self.phaseC_neg, self.phaseZero, self.alpha, self.beta, self.alpha_pos, self.beta_pos, self.alpha_neg, self.beta_neg, self.d, self.q, self.d_pos, self.q_pos, self.d_neg, self.q_neg) # function call, plot the time-domain self.fig_time_plts = gsyPlt.pltTimeDom(self.time_samples, self.xlim_min, self.xlim_max, self.ylim_min, self.ylim_max, self.phaseAdata, self.phaseBdata, self.phaseCdata, self.phaseA_pos,self.phaseB_pos, self.phaseC_pos, self.phaseA_neg, self.phaseB_neg, self.phaseC_neg, self.phaseZero, self.alpha, self.beta, self.alpha_pos, self.beta_pos, self.alpha_neg, self.beta_neg, self.d, self.q, self.d_pos, self.q_pos, self.d_neg, self.q_neg) self.print_info('Plots updated') def to_numeric(self, str_input): ''' .. _to_numeric : This method uses the asteval package to evaluate the user inputs to numeric. Parameters ---------- self str_input : str The user input Returns ------- float The evaluated user input. Examples -------- .. code :: python self.phaseAMag = self.to_numeric(self.ledt_phaseAMag.text()) ''' aeval = Interpreter() # if empty string, return zero if len(str_input) == 0: return 0 # if not empty string and the evaluated var is both a number and also not boolean, # then return it # otherwise, return zero else: temp = aeval(str_input) if ( isinstance(temp, Number) == True ) and ( type(temp) != bool ): return temp else: return 0 def cal_samples(self): ''' .. _cal_samples : This method calculates the samples needed for plotting. The sampling rate is 6 times the maximum frequency. Parameters ---------- self Returns ------- samples : float The number of samples. Examples -------- .. code :: python samples = self.cal_samples() array_time = np.linspace(0, self.timeEnd, samples) ''' # find the maximum angular frequency max_omega = max(abs(self.phaseAOmega), abs(self.phaseBOmega), abs(self.phaseCOmega)) max_freq = max_omega / (2 * np.pi) if self.timeEnd == 0: raise ValueError('Time is zero. No data.') elif max_freq == 0: samples = 1e3 return samples else: # samples in one period of the maximum frequency = max_freq * 6 # period for the maximum frequecy = 1 / max_freq # how many periods of sampling = self.timeEnd / (1 /1 max_freq) # => # samples = max_freq * 6 * (self.timeEnd / (1 / max_freq)) samples = (max_freq ** 2) * 6 * self.timeEnd return samples def make_phase(self, mag, omega, phi, dc): ''' .. _make_phase : This method creates the phase signal in complex form. Parameters ---------- self mag : float The magnitude of the signal. omega : float The angular frequency of the signal. phi : float The initial phase of the signal. dc : float The DC offset of the signal. Returns ------- complex The signal in complex form. array_time : list The sampled time. Examples -------- .. code :: python self.phaseAdata, self.time_samples = self.make_phase(self.phaseAMag, self.phaseAOmega, self.phaseAPhi, self.phaseADC) ''' samples = self.cal_samples() array_time = np.linspace(0, self.timeEnd, samples) x = omega * array_time + phi return trf.to_complex(mag, x, dc), array_time def save_data(self):
<gh_stars>10-100 import random import cv2 import imgaug.augmenters as iaa import imgaug.parameters as iap import numpy as np import six.moves as sm from PIL import Image, ImageEnhance, ImageOps from scipy import ndimage from torchvision.models.resnet import resnet34 class RotateCropResize(iaa.meta.Augmenter): '''Class that allows performing rotations without introducing artifacts. This class is also extended to be used in conjunction with ImgAug. So far it only works with square images''' def __init__(self, rotate=(-15, 15), name=None, deterministic=False, random_state=None): super(RotateCropResize, self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.rotate = iap.handle_continuous_param(rotate, "rotate", value_range=None, tuple_to_uniform=True, list_to_choice=True) def get_parameters(self): return [self.rotate] def _augment_images(self, images, random_state, parents, hooks): nb_images = len(images) samples = self.rotate.draw_samples((nb_images,), random_state=random_state) for i in sm.xrange(nb_images): images[i] = self.rotate_crop_resize(images[i], samples[i]) return images def _augment_heatmaps(self, heatmaps, random_state, parents, hooks): raise NotImplementedError def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks): raise NotImplementedError @staticmethod def rotate_crop_resize(im, angle): '''Rotate image and crop black artifacts''' H, W = im.shape[:2] # Expects square images assert H == W, 'Image is not square' x = ndimage.rotate(im, angle) edge = int(np.ceil(H * np.sin(abs(angle) * np.pi / 180))) x = x[edge:-(edge + 1), edge:-(edge + 1), ...] x = cv2.resize(x, (H, W)) return x class CLAHE(iaa.meta.Augmenter): def __init__(self, clip_limit=2.0, tile_grid_size=(8, 8), name=None, deterministic=False, random_state=None): super(CLAHE, self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.clip_limit = clip_limit self.tile_grid_size = tile_grid_size def get_parameters(self): return [self.clip_limit, self.tile_grid_size] def _augment_images(self, images, random_state, parents, hooks): nb_images = len(images) for i in sm.xrange(nb_images): images[i] = self._clahe(images[i], self.clip_limit, self.tile_grid_size) return images def _augment_heatmaps(self, heatmaps, random_state, parents, hooks): raise NotImplementedError def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks): raise NotImplementedError @staticmethod def _clahe(img, clip_limit=2.0, tile_grid_size=(8, 8)): if img.dtype != np.uint8: raise TypeError('clahe supports only uint8 inputs') img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB) clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_grid_size) img[:, :, 0] = clahe.apply(img[:, :, 0]) img = cv2.cvtColor(img, cv2.COLOR_LAB2RGB) return img #################################################################################### ################################ AUG SCHEMES ####################################### #################################################################################### Aug1 = iaa.Sequential( [ # apply the following augmenters to most images iaa.Fliplr(0.5), # horizontally flip 50% of all images # iaa.Flipud(0.2), # vertically flip 20% of all images # iaa.Grayscale(alpha=(0.0, 1.0)), iaa.Sometimes(0.5, iaa.Affine( scale={"x": (0.95, 1.05), "y": (0.95, 1.05)}, shear=(-10, 10), # scale images to 80-120% of their size, individually per axis # translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)}, # translate by -20 to +20 percent (per axis) rotate=(-10, 10), order=1, # use bilinear interpolation (fast) cval=0, # if mode is constant, use a cval between 0 and 255 mode='constant' )), iaa.OneOf([ iaa.GammaContrast((0.5, 1.5)), iaa.LinearContrast((0.5, 1.5)), iaa.ContrastNormalization((0.70, 1.30)), ]) ]) Aug2a = iaa.Sequential([ iaa.Fliplr(0.5), # iaa.Sometimes(0.5, iaa.OneOf([iaa.AverageBlur(k=3), iaa.MotionBlur(k=3)])), iaa.Add((-5, 5), per_channel=0.5), iaa.Multiply((0.9, 1.1), per_channel=0.5), iaa.Sometimes(0.5, iaa.Affine( scale={'x': (0.9, 1.1), 'y': (0.9, 1.1)}, translate_percent={'x': (-0.05, 0.05), 'y': (-0.05, 0.05)}, shear=(-5, 5), rotate=(-30, 30) )), ], random_order=True) Aug2b = iaa.Sequential([ iaa.Fliplr(0.5), # iaa.Sometimes(0.5, iaa.OneOf([iaa.AverageBlur(k=3), iaa.MotionBlur(k=3)])), # iaa.Add((-5, 5), per_channel=0.5), # iaa.Multiply((0.9, 1.1), per_channel=0.5), iaa.Sometimes(0.5, iaa.Affine( scale={'x': (0.9, 1.1), 'y': (0.9, 1.1)}, translate_percent={'x': (-0.05, 0.05), 'y': (-0.05, 0.05)}, shear=(-5, 5), rotate=(-30, 30) )), ], random_order=True) Aug2d = iaa.Sequential([ iaa.Fliplr(0.5), # iaa.Sometimes(0.5, iaa.OneOf([iaa.AverageBlur(k=3), iaa.MotionBlur(k=3)])), iaa.OneOf([ iaa.Add((-10, 10), per_channel=0.5), iaa.Multiply((0.9, 1.1), per_channel=0.5)]), iaa.Sometimes(0.5, iaa.Affine( scale={'x': (0.9, 1.1), 'y': (0.9, 1.1)}, translate_percent={'x': (-0.05, 0.05), 'y': (-0.05, 0.05)}, shear=(-5, 5), rotate=(-30, 30) )), ], random_order=True) Aug2e = iaa.Sequential([ iaa.Fliplr(0.5), iaa.Add((-5, 5), per_channel=0.5), iaa.Multiply((0.9, 1.1), per_channel=0.5), # iaa.Sometimes(0.5, iaa.OneOf([iaa.AverageBlur(k=3), iaa.MotionBlur(k=3)])), iaa.Sometimes(0.5, iaa.Affine( scale={'x': (0.8, 1.2), 'y': (0.8, 1.2)}, translate_percent={'x': (-0.1, 0.1), 'y': (-0.1, 0.1)}, shear=(-10, 10), rotate=(-30, 30) )), iaa.OneOf([ iaa.GammaContrast((0.5, 1.5)), iaa.LinearContrast((0.5, 1.5)), iaa.ContrastNormalization((0.70, 1.30)), ]) ], random_order=True) Aug3 = iaa.Sequential([ iaa.Fliplr(0.5), iaa.Sometimes(0.5, iaa.OneOf([iaa.AverageBlur(k=(3, 5)), iaa.MotionBlur(k=(3, 5))])), iaa.Add((-15, 15), per_channel=0.5), iaa.Multiply((0.8, 1.2), per_channel=0.5), iaa.Sometimes(0.5, iaa.Affine( scale={'x': (0.8, 1.2), 'y': (0.8, 1.2)}, translate_percent={'x': (-0.15, 0.15), 'y': (-0.15, 0.15)}, shear=(-15, 15), rotate=(-30, 30) )), ] , random_order=True) class AutoAugment(object): def __init__(self): self.policies = [ ['Invert', 0.1, 7, 'Contrast', 0.2, 6], ['Rotate', 0.7, 2, 'TranslateX', 0.3, 9], ['Sharpness', 0.8, 1, 'Sharpness', 0.9, 3], ['ShearY', 0.5, 8, 'TranslateY', 0.7, 9], ['AutoContrast', 0.5, 8, 'Equalize', 0.9, 2], ['ShearY', 0.2, 7, 'Posterize', 0.3, 7], ['Color', 0.4, 3, 'Brightness', 0.6, 7], ['Sharpness', 0.3, 9, 'Brightness', 0.7, 9], ['Equalize', 0.6, 5, 'Equalize', 0.5, 1], ['Contrast', 0.6, 7, 'Sharpness', 0.6, 5], ['Color', 0.7, 7, 'TranslateX', 0.5, 8], ['Equalize', 0.3, 7, 'AutoContrast', 0.4, 8], ['TranslateY', 0.4, 3, 'Sharpness', 0.2, 6], ['Brightness', 0.9, 6, 'Color', 0.2, 8], ['Solarize', 0.5, 2, 'Invert', 0, 0.3], ['Equalize', 0.2, 0, 'AutoContrast', 0.6, 0], ['Equalize', 0.2, 8, 'Equalize', 0.6, 4], ['Color', 0.9, 9, 'Equalize', 0.6, 6], ['AutoContrast', 0.8, 4, 'Solarize', 0.2, 8], ['Brightness', 0.1, 3, 'Color', 0.7, 0], ['Solarize', 0.4, 5, 'AutoContrast', 0.9, 3], ['TranslateY', 0.9, 9, 'TranslateY', 0.7, 9], ['AutoContrast', 0.9, 2, 'Solarize', 0.8, 3], ['Equalize', 0.8, 8, 'Invert', 0.1, 3], ['TranslateY', 0.7, 9, 'AutoContrast', 0.9, 1], ] def __call__(self, img): img = Image.fromarray(img) img = apply_policy(img, self.policies[random.randrange(len(self.policies))]) return img operations = { 'ShearX': lambda img, magnitude: shear_x(img, magnitude), 'ShearY': lambda img, magnitude: shear_y(img, magnitude), 'TranslateX': lambda img, magnitude: translate_x(img, magnitude), 'TranslateY': lambda img, magnitude: translate_y(img, magnitude), 'Rotate': lambda img, magnitude: rotate(img, magnitude), 'AutoContrast': lambda img, magnitude: auto_contrast(img, magnitude), 'Invert': lambda img, magnitude: invert(img, magnitude), 'Equalize': lambda img, magnitude: equalize(img, magnitude), 'Solarize': lambda img, magnitude: solarize(img, magnitude), 'Posterize': lambda img, magnitude: posterize(img, magnitude), 'Contrast': lambda img, magnitude: contrast(img, magnitude), 'Color': lambda img, magnitude: color(img, magnitude), 'Brightness': lambda img, magnitude: brightness(img, magnitude), 'Sharpness': lambda img, magnitude: sharpness(img, magnitude), 'Cutout': lambda img, magnitude: cutout(img, magnitude), } def apply_policy(img, policy): if random.random() < policy[1]: img = operations[policy[0]](img, policy[2]) if random.random() < policy[4]: img = operations[policy[3]](img, policy[5]) return img def transform_matrix_offset_center(matrix, x, y): o_x = float(x) / 2 + 0.5 o_y = float(y) / 2 + 0.5 offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]]) reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]]) transform_matrix = offset_matrix @ matrix @ reset_matrix return transform_matrix def shear_x(img, magnitude): img = np.array(img) magnitudes = np.linspace(-0.3, 0.3, 11) transform_matrix = np.array([[1, random.uniform(magnitudes[magnitude], magnitudes[magnitude + 1]), 0], [0, 1, 0], [0, 0, 1]]) transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1]) affine_matrix = transform_matrix[:2, :2] offset = transform_matrix[:2, 2] img = np.stack([ndimage.interpolation.affine_transform( img[:, :, c], affine_matrix, offset) for c in range(img.shape[2])], axis=2) img = Image.fromarray(img) return img def shear_y(img, magnitude): img = np.array(img) magnitudes = np.linspace(-0.3, 0.3, 11) transform_matrix = np.array([[1, 0, 0], [random.uniform(magnitudes[magnitude], magnitudes[magnitude + 1]), 1, 0], [0, 0, 1]]) transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1]) affine_matrix = transform_matrix[:2, :2] offset = transform_matrix[:2, 2] img = np.stack([ndimage.interpolation.affine_transform( img[:, :, c], affine_matrix, offset) for c in range(img.shape[2])], axis=2) img = Image.fromarray(img) return img def translate_x(img, magnitude): img = np.array(img) magnitudes = np.linspace(-150 / 331, 150 / 331, 11) transform_matrix = np.array([[1, 0, 0], [0, 1, img.shape[1] * random.uniform(magnitudes[magnitude], magnitudes[magnitude + 1])], [0, 0, 1]]) transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1]) affine_matrix = transform_matrix[:2, :2] offset = transform_matrix[:2, 2] img = np.stack([ndimage.interpolation.affine_transform( img[:, :, c], affine_matrix, offset) for c in range(img.shape[2])], axis=2) img = Image.fromarray(img) return img def translate_y(img, magnitude): img = np.array(img) magnitudes = np.linspace(-150 / 331, 150 / 331, 11) transform_matrix = np.array( [[1, 0, img.shape[0] * random.uniform(magnitudes[magnitude], magnitudes[magnitude + 1])], [0, 1, 0], [0, 0, 1]]) transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1]) affine_matrix = transform_matrix[:2, :2] offset = transform_matrix[:2, 2] img = np.stack([ndimage.interpolation.affine_transform( img[:, :, c], affine_matrix, offset) for c in range(img.shape[2])], axis=2) img = Image.fromarray(img) return img def rotate(img, magnitude): img = np.array(img) magnitudes = np.linspace(-30, 30, 11) theta = np.deg2rad(random.uniform(magnitudes[magnitude], magnitudes[magnitude + 1])) transform_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1]) affine_matrix = transform_matrix[:2, :2] offset = transform_matrix[:2, 2] img = np.stack([ndimage.interpolation.affine_transform( img[:, :, c], affine_matrix, offset) for c in range(img.shape[2])], axis=2) img = Image.fromarray(img) return img def auto_contrast(img, magnitude): img = ImageOps.autocontrast(img) return img def invert(img, magnitude): img = ImageOps.invert(img) return img def equalize(img, magnitude): img = ImageOps.equalize(img) return img def solarize(img, magnitude): magnitudes = np.linspace(0, 256, 11) img = ImageOps.solarize(img, random.uniform(magnitudes[magnitude], magnitudes[magnitude + 1])) return img def posterize(img, magnitude): magnitudes = np.linspace(4, 8, 11) img = ImageOps.posterize(img, int(round(random.uniform(magnitudes[magnitude], magnitudes[magnitude + 1])))) return img def contrast(img, magnitude): magnitudes = np.linspace(0.1, 1.9, 11) img = ImageEnhance.Contrast(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude + 1])) return img def color(img, magnitude): magnitudes = np.linspace(0.1, 1.9, 11) img = ImageEnhance.Color(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude + 1])) return img def brightness(img, magnitude): magnitudes = np.linspace(0.1, 1.9, 11) img = ImageEnhance.Brightness(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude + 1])) return img def sharpness(img, magnitude): magnitudes = np.linspace(0.1, 1.9, 11) img = ImageEnhance.Sharpness(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude + 1]))
<filename>cool/views/mixins.py # encoding: utf-8 from django.core.exceptions import ValidationError from django.db import transaction from django.utils.translation import gettext as _, gettext_lazy from rest_framework import fields from rest_framework.exceptions import ValidationError as RestValidationError from rest_framework.utils import model_meta from cool.core.utils import get_search_results from cool.views import CoolAPIException, ErrorCode from cool.views.fields import JSONCheckField, SplitCharField from cool.views.utils import ( get_rest_field_from_model_field, parse_validation_error, ) class PageMixin: """ 分页返回数据Mixin """ PAGE_SIZE_MAX = 200 DEFAULT_PAGE_SIZE = 100 @classmethod def get_extend_param_fields(cls): assert 0 < cls.DEFAULT_PAGE_SIZE <= cls.PAGE_SIZE_MAX, ( "DEFAULT_PAGE_SIZE mast between 0 and PAGE_SIZE_MAX in class %s" % cls.__name__ ) return super().get_extend_param_fields() + ( ( 'page', fields.IntegerField( label=gettext_lazy('Page number'), default=1, help_text=gettext_lazy('Start with %(start)s') % {'start': 1} ) ), ( 'page_size', fields.IntegerField( label=gettext_lazy('Page size'), default=cls.DEFAULT_PAGE_SIZE, min_value=1, max_value=cls.PAGE_SIZE_MAX ) ), ) @classmethod def response_info_data(cls): return { 'page_size': _('Page size'), 'list': [super().response_info_data()], 'page': _('Page number'), 'total_page': _('Total page'), 'total_data': _('Total data') } def get_page_context(self, request, queryset, serializer_cls): page_size = request.params.page_size total_data = queryset.count() total_page = (total_data + page_size - 1) // page_size page = request.params.page data = [] if total_data > 0 and 1 <= page <= total_page: start = (page - 1) * page_size data = serializer_cls(queryset[start:start + page_size], request=request, many=True).data return {'page_size': page_size, 'list': data, 'page': page, 'total_page': total_page, 'total_data': total_data} class CRIDMixin: """ class ObjectAddDMixin(Add, APIBase): model = models.Object add_fields = ['name', 'desc'] """ model = None @classmethod def get_model_field_info(cls): if not hasattr(cls, '_model_field_info'): setattr(cls, '_model_field_info', model_meta.get_field_info(cls.model)) return getattr(cls, '_model_field_info') class SearchListMixin(PageMixin, CRIDMixin): PAGE_SIZE_MAX = 1000 model = None order_field = ('-pk', ) @classmethod def get_extend_param_fields(cls): """ 添加搜索字段 """ ret = list() ret.extend(super().get_extend_param_fields()) ret.append(('search_term', fields.CharField(label=_('Search key'), default=''))) return tuple(ret) @property def name(self): """ API文档中view的名字 """ return _("{model_name} List").format(model_name=self.model._meta.verbose_name) def get_search_fields(self): """ 返回本model可以被搜索的字段集合(基类回自动将带索引的字段生成搜索字段集合) """ return self.model.get_search_fields() def get_queryset(self, request, queryset=None): if queryset is None: queryset = self.model.objects.order_by(*self.order_field) if request.params.search_term: # 筛选搜索关键词 queryset, use_distinct = get_search_results( queryset, request.params.search_term, self.get_search_fields(), self.model ) if use_distinct: queryset = queryset.distinct() return queryset def get_context(self, request, *args, **kwargs): return self.get_page_context(request, self.get_queryset(request), self.response_info_serializer_class) class InfoMixin(CRIDMixin): model = None pk_id = True ex_unique_ids = [] @property def name(self): return _("{model_name} Info").format(model_name=self.model._meta.verbose_name) @classmethod def get_extend_param_fields(cls): ret = list() ret.extend(super().get_extend_param_fields()) if cls.model is not None: num = len(cls.ex_unique_ids) info = cls.get_model_field_info() if cls.pk_id: num += 1 ret.append((info.pk.name, get_rest_field_from_model_field( cls.model, info.pk.name, **{'default': None} if num > 1 else {'required': True} ))) for ex_unique_id in cls.ex_unique_ids: assert ex_unique_id in info.fields_and_pk and info.fields_and_pk[ex_unique_id].unique, ( "Field %s not found in %s's unique fields" % (ex_unique_id, cls.model.__name__) ) ret.append((ex_unique_id, get_rest_field_from_model_field( cls.model, ex_unique_id, **{'default': None} if num > 1 else {'required': True} ))) assert num > 0, 'Must set unique fields to ex_unique_ids or set True to pk_id' return tuple(ret) def get_obj(self, request, queryset=None): if queryset is None: queryset = self.model.objects.all() blank = True param_fields = [self.get_model_field_info().pk.name] param_fields.extend(self.ex_unique_ids) for field_name in param_fields: field = getattr(request.params, field_name) if field is not None: blank = False queryset = queryset.filter(**{field_name: field}) if blank: raise CoolAPIException( ErrorCode.ERROR_BAD_PARAMETER, data=_("{fields} cannot be empty at the same time").format(fields=",".join(param_fields)) ) return queryset.first() def get_context(self, request, *args, **kwargs): return self.response_info_serializer_class(self.get_obj(request), request=request).data class AddMixin(CRIDMixin): add_fields = [] @property def name(self): return _("Add {model_name}").format(model_name=self.model._meta.verbose_name) @classmethod def get_extend_param_fields(cls): ret = list() ret.extend(super().get_extend_param_fields()) if cls.model is not None: for add_field in cls.add_fields: field = get_rest_field_from_model_field(cls.model, add_field) ret.append((add_field, field)) return tuple(ret) def init_fields(self, request, obj): for add_field in self.add_fields: value = getattr(request.params, add_field, None) if value is not None: setattr(obj, add_field, value) def save_obj(self, request, obj): obj.full_clean() obj.save(force_insert=True) def serializer_response(self, data, request): return self.response_info_serializer_class(data, request=request).data def get_context(self, request, *args, **kwargs): with transaction.atomic(): try: obj = self.model() self.init_fields(request, obj) self.save_obj(request, obj) except ValidationError as e: raise RestValidationError(parse_validation_error(e)) return self.serializer_response(obj, request=request) class EditMixin(CRIDMixin): unique_key = 'pk' edit_fields = [] @property def name(self): return _("Edit {model_name}").format(model_name=self.model._meta.verbose_name) @classmethod def get_extend_param_fields(cls): ret = list() ret.extend(super().get_extend_param_fields()) field = cls.get_model_field_info().fields_and_pk[cls.unique_key] assert field.unique, "Field %s is not unique" % cls.unique_key ret.append((field.name, get_rest_field_from_model_field(cls.model, field, required=True))) if cls.model is not None: for edit_field in cls.edit_fields: ret.append((edit_field, get_rest_field_from_model_field(cls.model, edit_field, default=None))) return tuple(ret) def get_obj(self, request): return self.model.get_obj_by_pk_from_cache(request.params.id) def modify_obj(self, request, obj): for edit_field in self.edit_fields: value = getattr(request.params, edit_field, None) if value is not None: setattr(obj, edit_field, value) def save_obj(self, request, obj): obj.full_clean() obj.save_changed() def serializer_response(self, data, request): return self.response_info_serializer_class(data, request=request).data def get_context(self, request, *args, **kwargs): with transaction.atomic(): obj = self.get_obj(request) self.modify_obj(request, obj) self.save_obj(request, obj) return self.serializer_response(obj, request=request) class DeleteMixin(CRIDMixin): unique_key = 'pk' unique_key_sep = ',' @property def name(self): return _("Delete {model_name}").format(model_name=self.model._meta.verbose_name) @classmethod def get_extend_param_fields(cls): ret = list() ret.extend(super().get_extend_param_fields()) field = cls.get_model_field_info().fields_and_pk[cls.unique_key] assert field.unique, "Field %s is not unique" % cls.unique_key ret.append((field.name + 's', SplitCharField( label=_('Primary keys'), sep=cls.unique_key_sep, child=get_rest_field_from_model_field(cls.model, field, required=True) ))) return tuple(ret) def get_queryset(self, request): return self.model.objects.filter(id__in=request.params.ids) def delete_object(self, request, obj): obj.delete() def delete_queryset(self, request, queryset): for obj in queryset: self.delete_object(request, obj) def get_context(self, request, *args, **kwargs): with transaction.atomic(): queryset = self.get_queryset(request) self.delete_queryset(request, queryset) return None class ExtJSONCheckField(JSONCheckField): def __init__(self, *args, **kwargs): self.ext_model_field_key = kwargs.pop('ext_model_field_key') assert isinstance(self.ext_model_field_key, ExtModelFieldKey) super().__init__(*args, **kwargs) def clean_dict_data(self, data): data = super().clean_dict_data(data) if self.ext_model_field_key.pk_field is None: return data errors = dict() if data.get(self.ext_model_field_key.pk_field, None) is not None: for field_name in self.ext_model_field_key.add_field_list: if field_name in self.ext_model_field_key.add_not_required_field_list: continue if data.get(field_name, None) is None: try: self.children[field_name].fail('required') except RestValidationError as e: errors[field_name] = e.detail if errors: raise RestValidationError(errors) return data def run_children_validation(self, data): return super().run_children_validation(data) class ExtModelFieldKey: def __init__( self, field_name, ext_model, ext_foreign_key, edit_field_list=(), add_field_list=(), add_not_required_field_list=(), add_default_fields=None, pk_field='id', delete_not_found=True, ): """ 生成 ext_model_fields 供 get_ext_model_fields 使用 :param field_name: 接口字段名 :param ext_model: 扩展model类型 :param ext_foreign_key: 扩展model中的外键字段 :param add_field_list: 添加字段列表 :param edit_field_list: 修改字段列表 :param add_not_required_field_list: 新增非必填参数 :param add_default_fields: 添加时默认值 :param pk_field: 修改key(必须为主键或唯一键,提交数据有该值为修改,没有为新增),设空不允许修改 :param delete_not_found: 是否删除未出现在列表中的数据 """ if add_default_fields is None: add_default_fields = dict() self.field_name = field_name self.ext_model = ext_model self.ext_foreign_key = ext_foreign_key self.add_field_list = add_field_list self.edit_field_list = edit_field_list self.add_not_required_field_list = add_not_required_field_list self.add_default_fields = add_default_fields self.pk_field = pk_field self.delete_not_found = delete_not_found def get_json_check_field(self, label): children = dict() add_only = self.pk_field is None if not add_only: children[self.pk_field] = get_rest_field_from_model_field( self.ext_model, self.pk_field, default=None, help_text=_('If the parameter has a value, it is modified; if it has no value, it is added') ) field_list = list() field_list.extend(self.add_field_list) if not add_only: field_list.extend(self.edit_field_list) for field in field_list: if field in children: continue add_not_required = field in self.add_not_required_field_list field_kwargs = dict() if not add_only or field in self.add_not_required_field_list: field_kwargs['default'] = None if not add_only: help_text = [] if field in self.add_field_list: help_text.append(_('not required when add') if add_not_required else _('required when add')) if field in self.edit_field_list: help_text.append(_('not required when edit')) field_kwargs['help_text'] = ",".join(help_text) children[field] = get_rest_field_from_model_field( self.ext_model, field, **field_kwargs ) return ExtJSONCheckField( label=label, children=children, is_list=True, default=None, ext_model_field_key=self ) def gen_objs(self, data, obj, get_ext_obj): params = dict() params[self.ext_foreign_key] = obj add_objs = [] edit_objs = [] edit_ids = [] param_errors = dict() for idx, p in enumerate(data): if self.pk_field is not None and self.pk_field in p and p[self.pk_field] is not None: if p[self.pk_field] in edit_ids: param_errors[idx] = ValidationError(_('Primary key duplicate')) continue edit_ids.append(p[self.pk_field]) obj = get_ext_obj(self.ext_model, self.pk_field, p[self.pk_field]) if obj is None: param_errors[idx] = ValidationError(_('Modification item not found')) continue for ext_key, ext_value in params.items(): if getattr(obj, ext_key) != ext_value: param_errors[idx] = ValidationError(_('Modification item not found')) break else: for key, value in p.items(): if key in self.edit_field_list and value is not None: setattr(obj, key, value) try: obj.full_clean() except ValidationError as e: param_errors[idx] = e edit_objs.append(obj) else: obj = self.ext_model( **self.add_default_fields, **params, **dict(filter(lambda x: x[0] in self.add_field_list, p.items())), ) try: obj.full_clean() except ValidationError as e: param_errors[idx] = e add_objs.append(obj) del_objs = [] if self.delete_not_found: queryset = self.ext_model.objects.filter(**params) if edit_ids: queryset = queryset.exclude(**{"%s__in" % self.pk_field: edit_ids}) del_objs = list(queryset) if param_errors: raise RestValidationError(parse_validation_error(param_errors)) return add_objs, edit_objs, del_objs class ExtManyToOneMixin: @classmethod def get_ext_model_fields(cls): """ [ ExtModelFieldKey() ] """ return () def get_ext_obj(self, ext_model, unique_field, unique_field_value): return ext_model.get_obj_by_unique_key_from_cache(**{unique_field: unique_field_value}) def delete_ext_obj(self, obj): obj.delete() def delete_ext_objs(self, objs): for obj in objs: self.delete_ext_obj(obj) def save_ext_obj(self, obj): obj.save_changed() def edit_ext_objs(self, objs): for obj in objs: self.save_ext_obj(obj) def add_ext_objs(self, ext_model, objs): ext_model.objects.bulk_create(objs) def save_obj(self, request, obj): super().save_obj(request, obj) self.do_ext(request, obj) def do_ext(self, request, obj): errors = dict() ex_objs = list() for model_fields in self.get_ext_model_fields(): param = getattr(request.params, model_fields.field_name) if param is None: continue def _get_ext_obj(*args, **kwargs): return self.get_ext_obj(*args, **kwargs) try: add_objs, edit_objs, del_objs = model_fields.gen_objs(param, obj, _get_ext_obj) ex_objs.append((model_fields.ext_model, add_objs, edit_objs, del_objs)) except RestValidationError as e: errors[model_fields.field_name] =
<filename>python/pygame-1.9.1release/test/sprite_test.py #################################### IMPORTS ################################### if __name__ == '__main__': import sys import os pkg_dir = os.path.split(os.path.abspath(__file__))[0] parent_dir, pkg_name = os.path.split(pkg_dir) is_pygame_pkg = (pkg_name == 'tests' and os.path.split(parent_dir)[1] == 'pygame') if not is_pygame_pkg: sys.path.insert(0, parent_dir) else: is_pygame_pkg = __name__.startswith('pygame.tests.') if is_pygame_pkg: from pygame.tests.test_utils \ import test_not_implemented, unordered_equality, unittest else: from test.test_utils \ import test_not_implemented, unordered_equality, unittest import pygame from pygame import sprite ################################# MODULE LEVEL ################################# class SpriteModuleTest( unittest.TestCase ): pass ######################### SPRITECOLLIDE FUNCTIONS TEST ######################### class SpriteCollideTest( unittest.TestCase ): def setUp(self): self.ag = sprite.AbstractGroup() self.ag2 = sprite.AbstractGroup() self.s1 = sprite.Sprite(self.ag) self.s2 = sprite.Sprite(self.ag2) self.s3 = sprite.Sprite(self.ag2) self.s1.image = pygame.Surface((50,10), pygame.SRCALPHA, 32) self.s2.image = pygame.Surface((10,10), pygame.SRCALPHA, 32) self.s3.image = pygame.Surface((10,10), pygame.SRCALPHA, 32) self.s1.rect = self.s1.image.get_rect() self.s2.rect = self.s2.image.get_rect() self.s3.rect = self.s3.image.get_rect() self.s2.rect.move_ip(40, 0) self.s3.rect.move_ip(100, 100) def test_spritecollide__works_if_collided_cb_is_None(self): # Test that sprites collide without collided function. self.assertEqual ( sprite.spritecollide ( self.s1, self.ag2, dokill = False, collided = None ), [self.s2] ) def test_spritecollide__works_if_collided_cb_not_passed(self): # Should also work when collided function isn't passed at all. self.assertEqual(sprite.spritecollide ( self.s1, self.ag2, dokill = False), [self.s2] ) def test_spritecollide__collided_must_be_a_callable(self): # Need to pass a callable. self.assertRaises ( TypeError, sprite.spritecollide, self.s1, self.ag2, dokill = False, collided = 1 ) def test_spritecollide__collided_defaults_to_collide_rect(self): # collide_rect should behave the same as default. self.assertEqual ( sprite.spritecollide ( self.s1, self.ag2, dokill = False, collided = sprite.collide_rect ), [self.s2] ) def test_collide_rect_ratio__ratio_of_one_like_default(self): # collide_rect_ratio should behave the same as default at a 1.0 ratio. self.assertEqual ( sprite.spritecollide ( self.s1, self.ag2, dokill = False, collided = sprite.collide_rect_ratio(1.0) ), [self.s2] ) def test_collide_rect_ratio__collides_all_at_ratio_of_twenty(self): # collide_rect_ratio should collide all at a 20.0 ratio. self.assert_ ( unordered_equality ( sprite.spritecollide ( self.s1, self.ag2, dokill = False, collided = sprite.collide_rect_ratio(20.0) ), [self.s2, self.s3] ) ) def test_collide_circle__no_radius_set(self): # collide_circle with no radius set. self.assertEqual ( sprite.spritecollide ( self.s1, self.ag2, dokill = False, collided = sprite.collide_circle ), [self.s2] ) def test_collide_circle_ratio__no_radius_and_ratio_of_one(self): # collide_circle_ratio with no radius set, at a 1.0 ratio. self.assertEqual ( sprite.spritecollide ( self.s1, self.ag2, dokill = False, collided = sprite.collide_circle_ratio(1.0) ), [self.s2] ) def test_collide_circle_ratio__no_radius_and_ratio_of_twenty(self): # collide_circle_ratio with no radius set, at a 20.0 ratio. self.assert_ ( unordered_equality ( sprite.spritecollide ( self.s1, self.ag2, dokill = False, collided = sprite.collide_circle_ratio(20.0) ), [self.s2, self.s3] ) ) def test_collide_circle__with_radii_set(self): # collide_circle with a radius set. self.s1.radius = 50 self.s2.radius = 10 self.s3.radius = 400 self.assert_ ( unordered_equality ( sprite.spritecollide ( self.s1, self.ag2, dokill = False, collided = sprite.collide_circle ), [self.s2, self.s3] ) ) def test_collide_circle_ratio__with_radii_set(self): self.s1.radius = 50 self.s2.radius = 10 self.s3.radius = 400 # collide_circle_ratio with a radius set. self.assert_ ( unordered_equality ( sprite.spritecollide ( self.s1, self.ag2, dokill = False, collided = sprite.collide_circle_ratio(0.5) ), [self.s2, self.s3] ) ) def test_collide_mask(self): # make some fully opaque sprites that will collide with masks. self.s1.image.fill((255,255,255,255)) self.s2.image.fill((255,255,255,255)) self.s3.image.fill((255,255,255,255)) # masks should be autogenerated from image if they don't exist. self.assertEqual ( sprite.spritecollide ( self.s1, self.ag2, dokill = False, collided = sprite.collide_mask ), [self.s2] ) self.s1.mask = pygame.mask.from_surface(self.s1.image) self.s2.mask = pygame.mask.from_surface(self.s2.image) self.s3.mask = pygame.mask.from_surface(self.s3.image) # with set masks. self.assertEqual ( sprite.spritecollide ( self.s1, self.ag2, dokill = False, collided = sprite.collide_mask ), [self.s2] ) def test_collide_mask(self): # make some sprites that are fully transparent, so they won't collide. self.s1.image.fill((255,255,255,0)) self.s2.image.fill((255,255,255,0)) self.s3.image.fill((255,255,255,0)) self.s1.mask = pygame.mask.from_surface(self.s1.image, 255) self.s2.mask = pygame.mask.from_surface(self.s2.image, 255) self.s3.mask = pygame.mask.from_surface(self.s3.image, 255) self.assertFalse ( sprite.spritecollide ( self.s1, self.ag2, dokill = False, collided = sprite.collide_mask ) ) def todo_test_spritecollideany(self): # __doc__ (as of 2008-08-02) for pygame.sprite.spritecollideany: # pygame.sprite.spritecollideany(sprite, group) -> sprite # finds any sprites that collide # # given a sprite and a group of sprites, this will # return return any single sprite that collides with # with the given sprite. If there are no collisions # this returns None. # # if you don't need all the features of the # spritecollide function, this function will be a # bit quicker. # # collided is a callback function used to calculate if # two sprites are colliding. it should take two sprites # as values, and return a bool value indicating if # they are colliding. if collided is not passed, all # sprites must have a "rect" value, which is a # rectangle of the sprite area, which will be used # to calculate the collision. # # Test if the given Sprite intersects with any Sprites in a Group. # Intersection is determined by comparing of the Sprite.rect attribute # of each Sprite. # # This collision test can be faster than pygame.sprite.spritecollide() # since it has less work to do. self.fail() def todo_test_groupcollide(self): # __doc__ (as of 2008-08-02) for pygame.sprite.groupcollide: # pygame.sprite.groupcollide(groupa, groupb, dokilla, dokillb) -> dict # collision detection between group and group # # given two groups, this will find the intersections # between all sprites in each group. it returns a # dictionary of all sprites in the first group that # collide. the value for each item in the dictionary # is a list of the sprites in the second group it # collides with. the two dokill arguments control if # the sprites from either group will be automatically # removed from all groups. # collided is a callback function used to calculate if # two sprites are colliding. it should take two sprites # as values, and return a bool value indicating if # they are colliding. if collided is not passed, all # sprites must have a "rect" value, which is a # rectangle of the sprite area, which will be used # to calculate the collision. # # This will find intersections between all the Sprites in two groups. # Intersection is determined by comparing the Sprite.rect attribute of # each Sprite. # # Every Sprite inside group1 is added to the return dictionary. The # value for each item is the list of Sprites in group2 that intersect. # # If either dokill argument is True, the intersecting Sprites will be # removed from their respective Group. self.fail() def todo_test_collide_rect(self): # __doc__ (as of 2008-08-02) for pygame.sprite.collide_rect: # collision detection between two sprites, using rects. # pygame.sprite.collide_rect(left, right): return bool # # Tests for collision between two sprites. Uses the # pygame rect colliderect function to calculate the # collision. Intended to be passed as a collided # callback function to the *collide functions. # Sprites must have a "rect" attributes. # # New in pygame 1.8.0 # # Tests for collision between two sprites. Uses the pygame rect # colliderect function to calculate the collision. Intended to be # passed as a collided callback function to the *collide functions. # Sprites must have a "rect" attributes. # # New in pygame 1.8.0 self.fail() ################################################################################ class AbstractGroupTypeTest( unittest.TestCase ): def test_has( self ): " See if AbstractGroup.has() works as expected. " ag = sprite.AbstractGroup() ag2 = sprite.AbstractGroup() s1 = sprite.Sprite(ag) s2 = sprite.Sprite(ag) s3 = sprite.Sprite(ag2) s4 = sprite.Sprite(ag2) self.assertEqual(True, s1 in ag ) self.assertEqual(True, ag.has(s1) ) self.assertEqual(True, ag.has([s1, s2]) ) # see if one of them not being in there. self.assertNotEqual(True, ag.has([s1, s2, s3]) ) # see if a second AbstractGroup works. self.assertEqual(True, ag2.has(s3) ) def todo_test_add(self): # __doc__ (as of 2008-08-02) for pygame.sprite.AbstractGroup.add: # add(sprite, list, or group, ...) # add sprite to group # # Add a sprite or sequence of sprites to a group. self.fail() def todo_test_add_internal(self): # __doc__ (as of 2008-08-02) for pygame.sprite.AbstractGroup.add_internal: # self.fail() def todo_test_clear(self): # __doc__ (as of 2008-08-02) for pygame.sprite.AbstractGroup.clear: # clear(surface, bgd) # erase the previous position of all sprites # # Clears the area of all drawn sprites. the bgd # argument should be Surface
<gh_stars>1-10 import unittest import os from operator import __mul__ import numpy as np import cv2 from matplotlib import pyplot as plt from pathlib import Path import numpy as np import json from pybx import anchor from pybx import vis from pybx.basics import * def test_all_nbs(): # !/usr/bin/env python # coding: utf-8 # <a href="https://colab.research.google.com/github/thatgeeman/pybx/blob/master/nbs/pybx_walkthrough.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # >⚠ Note: walkthrough for v0.2.1 ⚠ # > # >run `! pip freeze | grep pybx` to see the installed version. # # PyBx # # PyBx is a simple python package to generate anchor boxes (aka default/prior boxes) for object detection tasks. # # SSD for Object Detection # # This walkthrough is build around the [Single-Shot Detection (SSD)](https://arxiv.org/pdf/1512.02325.pdf) algorithm. The SSD can be imagined as an encoder-decoder model architecture, where the input image is fed into a `backbone` (encoder) to generate inital features, which then goes through a series of 2D convolution layers (decoders) to perform further feature extraction/prediction tasks at each layer. For a single image, each layer in the decoder produces a total of `N x (4 + C)` predictions. Here `C` is the number of classes (plus one for `background` class) in the detection task and 4 comes from the corners of the rectangular bounding box. # # ### Usage of the term Feature/Filter/Channel # # Channel: RGB dimensione, also called a Filter # # Feature: (W,H) of a single channel # ## Example case # For this example, we assume that our input image is a single channel image is of shape `[B, 3, 300, 300]` where `B` is the batch size. Assuming that a pretrained `VGG-16` is our model `backbone`, the output feature shape would be: `[B, 512, 37, 37]`. Meaning that, 512 channels of shape `[37, 37]` were extracted from each image in the batch. In the subsequent decoder layers, for simplicity we double the channels while halving the feature shape using `3x3` `stride=2` convolutions (except for first decoder layer where convolution is not applied). This results in the following shapes: # # ```python # torch.Size([-1, 512, 37, 37]) # inp from vgg-16 encoder # torch.Size([-1, 1024, 18, 18]) # first layer logits # torch.Size([-1, 2048, 8, 8]) # second layer logits # torch.Size([-1, 4096, 3, 3]) # third layer logits # ``` # # <img src="https://lilianweng.github.io/lil-log/assets/images/SSD-box-scales.png" width="500" /> # ## Sample image # Image obtained from USC-SIPI Image Database. # The USC-SIPI image database is a collection of digitized images. It is maintained primarily to support research in image processing, image analysis, and machine vision. The first edition of the USC-SIPI image database was distributed in 1977 and many new images have been added since then. # Set working directory. # In[ ]: os.environ["DATADIR"] = "./data" # Install package if not already present. # In[ ]: # ## About anchor Boxes # # We are expected to provide our models with "good" anchor (aka default/prior) boxes. Strong opinion: Our model is [only as good as the initial anchor boxes](https://towardsdatascience.com/anchor-boxes-the-key-to-quality-object-detection-ddf9d612d4f9) that we generate. Inorder to improve the coverage of our model, we tend to add additional anchor boxes of different aspect ratios. Now, for a single image, each layer in the decoder produces a total of `N x A x (4 + C)` predictions. Here `A` is the number of aspect ratios to generate additional anchor boxes. # # ### Task description # # Our aim is to find the maximum number of anchor boxes in varying sizes `feature_szs` and aspect ratios `asp_ratios` across the entire image. We apply no filtering to get rid of low (IOU) anchors. # # <img src="https://lilianweng.github.io/lil-log/assets/images/SSD-framework.png" width="600" /> # In[ ]: feature_szs = [(37, 37), (18, 18), (8, 8), (3, 3)] # In[ ]: asp_ratios = [1 / 2., 1., 2.] # In[ ]: n_boxes = sum([__mul__(*f) for f in feature_szs]) print(f'minimum anchor boxes with 1 aspect ratio: {n_boxes}') print(f'minimum anchor boxes with {len(asp_ratios)} aspect ratios: {n_boxes * len(asp_ratios)}') # # Loading an image # In[ ]: # In[ ]: datadir = Path(os.environ["DATADIR"]) datadir # In[ ]: im = cv2.cvtColor(cv2.imread((datadir / "image.jpg").as_posix()), cv2.COLOR_BGR2RGB) im = cv2.resize(im, (300, 300), interpolation=cv2.INTER_NEAREST) _ = plt.imshow(im) # In[ ]: im.size # We also make 2 truth bounding boxes `bbox` for this image around the clock and the photoframe in `pascal voc` format: # In[ ]: bbox = [dict(x_min=150, y_min=70, x_max=270, y_max=220, label='clock'), dict(x_min=10, y_min=180, x_max=115, y_max=260, label='frame'), ] bbox # Save annotations as a json file. # In[ ]: with open(datadir / 'annots.json', 'w') as f: f.write(json.dumps(bbox)) # In[ ]: type(bbox[0]) # # Using PyBx # In[ ]: image_sz = (300, 300, 3) # W, H, C feature_sz = (3, 3) # number of features along W, H asp_ratio = 1. # aspect ratio of the anchor box anchors, labels = anchor.bx(image_sz, feature_sz, asp_ratio) # There are several ways to visualize the anchors. First we import the `vis` method. # In[ ]: # In[ ]: bbox # In[ ]: image_sz # ### Visualizing the locally stored `image.png` with provided bounding boxes. # In[ ]: im.size # In[ ]: plt.imshow(im) # In[ ]: image_arr = np.array(im) # In[ ]: v = vis.VisBx(image_arr=image_arr, annots=bbox, color={'frame': 'red', 'clock': 'blue'}) # In[ ]: v.show() # without any arguments # Pass arguments to `show` method to overlay with calculated anchor boxes. # In[ ]: v.show(anchors, labels) # ### Using the `sample=True` parameter to load a file # By default it looks in the current path `pth="."` for an image file `img_fn="image.png"` and annotations file `ann_fn="annots.json"`. # In[ ]: datadir # In[ ]: v = vis.VisBx(image_sz=(300, 300, 3), color={'frame': 'red', 'clock': 'blue'}, img_fn=datadir / 'image.jpg', ann_fn=datadir / 'annots.json') # In[ ]: v.show(anchors, labels) # ### Using randomly generated noise as `image_arr` # In[ ]: v = vis.VisBx(image_sz=(300, 300, 3)) # In[ ]: v.show(anchors, labels) # The boxes in white are the anchor boxes. We can hightlight them with a different color by looking up specific box labels. # In[ ]: anchors.shape, labels # We see there are 9 labels and box coordinates reported by `anchor.bx()` for our `feature_sz=3x3` and single `asp_ratio`. Once instantiated as a `MultiBx`, we can use the `mbx()` method. # In[ ]: b = mbx(anchors, labels) # instantiates MultiBx for us # In[ ]: type(b) # We can iterate over a `MultiBx` object using list comprehension to understand the internal checks: # In[ ]: [(i, b_.valid()) for i, b_ in enumerate(b)] # only valid boxes shown # `b_.valid()` returned `True` meaning that the box is considered valid. # # We can also calculate the areas of these boxes. # Each box `b_` of the `MultiBx` b is of type `BaseBx` which has some additional methods. # In[ ]: [b_.area() for b_ in b] # Each `BaseBx` is also pseudo-iterable (calling an iterator returns `self` itself and not the coordinates or labels). # In[ ]: b_ = b[0] [x for x in b_] # We can also stack the `BxTypes`. Issues a `UserWarning` if we try to add `BaseBx`+`MultiBx` or `BaseBx`+`BaseBx`. This is to preserve the philosophy of a `BaseBx`, since adding something to a `BaseBx`, which should technically only hold a single coordinate and label, makes the result a `MultiBx`. # In[ ]: b_s = b_ + b_ b_s.coords, b_s.label # To safely add two boxes, use `basics.stack_bxs()` method. # In[ ]: stack_bxs(b_, b_).coords # From `v1.0.0` `BaseBx` can be iterated. What does it mean to iterate a single coordinate. Technically it should return each point of the coordinate. But `BaseBx` behaves differently on being iterated. It returns the `BaseBx` itself. # In[ ]: [x for x in b_] # To truly iterate over the coordinates and label, one must do: # In[ ]: [x for x in b_.values()] # In[ ]: # or # [x.label for x in b_] [x.coords for x in b_] #
# -*- coding: utf-8 -*- # Copyright CERN since 2018 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from json import dumps from flask import Flask, Blueprint, Response, request, jsonify from rucio.api.account_limit import get_rse_account_usage from rucio.api.rse import add_rse, update_rse, list_rses, del_rse, add_rse_attribute, list_rse_attributes, \ del_rse_attribute, add_protocol, get_rse_protocols, del_protocols, update_protocols, get_rse, set_rse_usage, \ get_rse_usage, list_rse_usage_history, set_rse_limits, get_rse_limits, delete_rse_limits, parse_rse_expression, \ add_distance, get_distance, update_distance, delete_distance, list_qos_policies, add_qos_policy, delete_qos_policy from rucio.common.exception import Duplicate, AccessDenied, RSENotFound, RSEOperationNotSupported, \ RSEProtocolNotSupported, InvalidObject, RSEProtocolDomainNotSupported, RSEProtocolPriorityError, \ InvalidRSEExpression, RSEAttributeNotFound, CounterNotFound, InvalidPath, ReplicaNotFound from rucio.common.utils import render_json, APIEncoder from rucio.rse import rsemanager from rucio.web.rest.flaskapi.v1.common import request_auth_env, response_headers, check_accept_header_wrapper_flask, \ try_stream, generate_http_error_flask, ErrorHandlingMethodView, json_parameters, param_get class RSEs(ErrorHandlingMethodView): """ List all RSEs in the database. """ @check_accept_header_wrapper_flask(['application/x-json-stream']) def get(self): """ --- summary: List RSEs description: Lists all RSEs. tags: - Rucio Storage Elements requestBody: content: application/json: schema: type: object properties: expression: description: An RSE expression. type: string responses: 200: description: OK content: application/json: schema: description: A list with the corresponding rses. type: array items: type: object properties: id: description: The rse id. type: string rse: description: The name of the rse. type: string rse_type: description: The type of the rse. type: string deterministic: description: If the rse is deterministic. type: boolean volatile: description: If the rse is volatile. type: boolean staging_area: description: Is this rse a staging area? type: boolean city: description: The city of the rse. type: string region_code: description: The region_code of the rse. type: string country_name: description: The country name of the rse. type: string continent: description: The continent of the rse. type: string time_zone: description: The time zone of the rse. type: string ISP: description: The isp of the rse. type: string ASN: description: The asn of the rse. type: string longitude: description: The longitude of the rse. type: number latitude: description: The latitude of the rse. type: number availability: description: The availability of the rse. type: integer usage: description: The usage of the rse. type: integer qos_class: description: The quality of service class. type: string 400: description: Invalid RSE expression 401: description: Invalid Auth Token 406: description: Not acceptable """ expression = request.args.get('expression', default=None) if expression: try: def generate(vo): for rse in parse_rse_expression(expression, vo=vo): yield render_json(rse=rse) + '\n' return try_stream(generate(vo=request.environ.get('vo'))) except (InvalidRSEExpression, InvalidObject) as error: return generate_http_error_flask(400, error) else: def generate(vo): for rse in list_rses(vo=vo): yield render_json(**rse) + '\n' return try_stream(generate(vo=request.environ.get('vo'))) class RSE(ErrorHandlingMethodView): """ Create, update, get and disable RSE. """ def post(self, rse): """ --- summary: Create RSE description: Creates a RSE with all the metadata. tags: - Rucio Storage Elements parameters: - name: rse in: path description: The name of the Rucio Storage Element name. schema: type: string style: simple requestBody: content: application/json: schema: type: object properties: deterministic: description: If the pfn is generated deterministicly. type: boolean volatile: description: RSE cache. type: boolean city: description: The city of the RSE. type: string staging_area: description: Staging area. type: string region_code: description: The region code of the RSE. type: string country_name: description: The country name of the RSE. type: string continent: description: The continent of the RSE. type: string time_zone: description: The time zone of the RSE. type: string ISP: description: The internet service provider of the RSE. type: string rse_type: description: The rse type. type: string enum: ["DISK", "TAPE"] latitute: description: The latitute of the RSE. type: float longitude: description: The longitude of the RSE. type: float ASN: description: The access service network of the RSE. type: string availability: description: The availability of the RSE. type: integer responses: 201: description: OK content: application/json: schema: type: string enum: ["Created"] 400: description: Cannot decode json parameter dictionary 401: description: Invalid Auth Token 404: description: RSE not found 409: description: RSE already exists. """ kwargs = { 'deterministic': True, 'volatile': False, 'city': None, 'staging_area': False, 'region_code': None, 'country_name': None, 'continent': None, 'time_zone': None, 'ISP': None, 'rse_type': None, 'latitude': None, 'longitude': None, 'ASN': None, 'availability': None, } if request.get_data(as_text=True): parameters = json_parameters() for keyword in kwargs.keys(): kwargs[keyword] = param_get(parameters, keyword, default=kwargs[keyword]) kwargs['issuer'] = request.environ.get('issuer') kwargs['vo'] = request.environ.get('vo') try: add_rse(rse, **kwargs) except InvalidObject as error: return generate_http_error_flask(400, error) except AccessDenied as error: return generate_http_error_flask(401, error) except RSENotFound as error: return generate_http_error_flask(404, error) except Duplicate as error: return generate_http_error_flask(409, error) return 'Created', 201 def put(self, rse): """ --- summary: Update RSE description: Update RSE properties. tags: - Rucio Storage Elements parameters: - name: rse in: path description: The name of the Rucio Storage Element name. schema: type: string style: simple requestBody: content: application/json: schema: type: object properties: availability_raed: description: The vailability of the RSE. type: boolean availability_write: description: The vailability of the RSE. type: boolean availability_delete: description: The vailability of the RSE. type: boolean deterministic: description: If the pfn is generated deterministicly. type: boolean volatile: description: RSE cache. type: boolean city: description: The city of the RSE. type: string staging_area: description: Staging area. type: string region_code: description: The region code of the RSE. type: string country_name: description: The country name of the RSE. type: string time_zone: description: The time zone of the RSE. type: string rse_type: description: The rse type. type: string enum: ["DISK", "TAPE"] latitute: description: The latitute of the RSE. type: float longitude: description: The longitude of the RSE. type: float responses: 201: description: OK content: application/json: schema: type: string enum: ["Created"] 400: description: Cannot decode json parameter dictionary 401: description: Invalid Auth Token 404: description: RSE not found """ kwargs = { 'parameters': json_parameters(optional=True), 'issuer': request.environ.get('issuer'), 'vo': request.environ.get('vo'), } try: update_rse(rse, **kwargs) except InvalidObject as error: return generate_http_error_flask(400, error) except AccessDenied as error: return generate_http_error_flask(401, error) except RSENotFound as error: return generate_http_error_flask(404, error) except Duplicate as error: return generate_http_error_flask(409, error) return 'Created', 201 @check_accept_header_wrapper_flask(['application/json']) def get(self, rse): """ --- summary: Get RSE description: Get details about a specific RSE. tags: - Rucio Storage Elements parameters: - name: rse in: path description: The name of the Rucio Storage Element name. schema: type: string style: simple responses: 200: description: OK content: application/json: schema: description: The RSE properties. type: object properties: deterministic: description: If the pfn is generated deterministicly. type: boolean volatile: description: RSE cache. type: boolean city: description: The city of the RSE. type: string staging_area: description: Staging area. type: string region_code: description: The region code of the RSE. type: string country_name: description: The country name of the RSE. type: string continent: description: The continent of the RSE. type: string time_zone: description: The time zone of the RSE. type: string ISP: description: The internet service provider of the RSE. type: string rse_type: description: The rse type. type: string enum: ["DISK", "TAPE"] latitute: description: The latitute of the RSE. type: float longitude: description: The longitude of the RSE. type: float ASN: description: The access service network of the RSE. type: string availability: description: The availability of the RSE. type: integer 401: description: Invalid Auth Token 404: description: RSE not found 406: description: Not acceptable """ try: rse_prop = get_rse(rse=rse, vo=request.environ.get('vo')) return Response(render_json(**rse_prop), content_type="application/json") except RSENotFound as error: return generate_http_error_flask(404, error) def delete(self, rse): """ --- summary: Disable RSE description: Disable a specific RSE. tags: - Rucio Storage Elements parameters: - name: rse in: path description: The name of the Rucio Storage Element name. schema: type: string style: simple responses: 200: description: OK 401: description: Invalid Auth Token 404: description: RSE not found """ try: del_rse(rse=rse, issuer=request.environ.get('issuer'), vo=request.environ.get('vo')) except (RSENotFound, RSEOperationNotSupported, CounterNotFound) as error: return generate_http_error_flask(404, error) except AccessDenied as error: return generate_http_error_flask(401, error) return '', 200 class Attributes(ErrorHandlingMethodView): """ Create, update, get and
dt): pass b = BetterTry() t = cls(1, 1, 1, tzinfo=b) self.assertIs(t.tzinfo, b) def test_utc_offset_out_of_bounds(self): class Edgy(tzinfo): def __init__(self, offset): self.offset = timedelta(minutes=offset) def utcoffset(self, dt): return self.offset cls = self.theclass for offset, legit in ((-1440, False), (-1439, True), (1439, True), (1440, False)): if cls is time: t = cls(1, 2, 3, tzinfo=Edgy(offset)) elif cls is datetime: t = cls(6, 6, 6, 1, 2, 3, tzinfo=Edgy(offset)) else: assert 0, "impossible" if legit: aofs = abs(offset) h, m = divmod(aofs, 60) tag = "%c%02d:%02d" % (offset < 0 and '-' or '+', h, m) if isinstance(t, datetime): t = t.timetz() self.assertEqual(str(t), "01:02:03" + tag) else: self.assertRaises(ValueError, str, t) def test_tzinfo_classes(self): cls = self.theclass class C1(tzinfo): def utcoffset(self, dt): return None def dst(self, dt): return None def tzname(self, dt): return None for t in (cls(1, 1, 1), cls(1, 1, 1, tzinfo=None), cls(1, 1, 1, tzinfo=C1())): self.assertIsNone(t.utcoffset()) self.assertIsNone(t.dst()) self.assertIsNone(t.tzname()) class C3(tzinfo): def utcoffset(self, dt): return timedelta(minutes=-1439) def dst(self, dt): return timedelta(minutes=1439) def tzname(self, dt): return "aname" t = cls(1, 1, 1, tzinfo=C3()) self.assertEqual(t.utcoffset(), timedelta(minutes=-1439)) self.assertEqual(t.dst(), timedelta(minutes=1439)) self.assertEqual(t.tzname(), "aname") # Wrong types. class C4(tzinfo): def utcoffset(self, dt): return "aname" def dst(self, dt): return 7 def tzname(self, dt): return 0 t = cls(1, 1, 1, tzinfo=C4()) self.assertRaises(TypeError, t.utcoffset) self.assertRaises(TypeError, t.dst) self.assertRaises(TypeError, t.tzname) # Offset out of range. class C6(tzinfo): def utcoffset(self, dt): return timedelta(hours=-24) def dst(self, dt): return timedelta(hours=24) t = cls(1, 1, 1, tzinfo=C6()) self.assertRaises(ValueError, t.utcoffset) self.assertRaises(ValueError, t.dst) # Not a whole number of seconds. class C7(tzinfo): def utcoffset(self, dt): return timedelta(microseconds=61) def dst(self, dt): return timedelta(microseconds=-81) t = cls(1, 1, 1, tzinfo=C7()) self.assertEqual(t.utcoffset(), timedelta(microseconds=61)) self.assertEqual(t.dst(), timedelta(microseconds=-81)) def test_aware_compare(self): cls = self.theclass # Ensure that utcoffset() gets ignored if the comparands have # the same tzinfo member. class OperandDependentOffset(tzinfo): def utcoffset(self, t): if t.minute < 10: # d0 and d1 equal after adjustment return timedelta(minutes=t.minute) else: # d2 off in the weeds return timedelta(minutes=59) base = cls(8, 9, 10, tzinfo=OperandDependentOffset()) d0 = base.replace(minute=3) d1 = base.replace(minute=9) d2 = base.replace(minute=11) for x in d0, d1, d2: for y in d0, d1, d2: for op in lt, le, gt, ge, eq, ne: got = op(x, y) expected = op(x.minute, y.minute) self.assertEqual(got, expected) # However, if they're different members, uctoffset is not ignored. # Note that a time can't actually have an operand-dependent offset, # though (and time.utcoffset() passes None to tzinfo.utcoffset()), # so skip this test for time. if cls is not time: d0 = base.replace(minute=3, tzinfo=OperandDependentOffset()) d1 = base.replace(minute=9, tzinfo=OperandDependentOffset()) d2 = base.replace(minute=11, tzinfo=OperandDependentOffset()) for x in d0, d1, d2: for y in d0, d1, d2: got = (x > y) - (x < y) if (x is d0 or x is d1) and (y is d0 or y is d1): expected = 0 elif x is y is d2: expected = 0 elif x is d2: expected = -1 else: assert y is d2 expected = 1 self.assertEqual(got, expected) # Testing time objects with a non-None tzinfo. class TestTimeTZ(TestTime, TZInfoBase, unittest.TestCase): theclass = time def test_empty(self): t = self.theclass() self.assertEqual(t.hour, 0) self.assertEqual(t.minute, 0) self.assertEqual(t.second, 0) self.assertEqual(t.microsecond, 0) self.assertIsNone(t.tzinfo) def test_zones(self): est = FixedOffset(-300, "EST", 1) utc = FixedOffset(0, "UTC", -2) met = FixedOffset(60, "MET", 3) t1 = time( 7, 47, tzinfo=est) t2 = time(12, 47, tzinfo=utc) t3 = time(13, 47, tzinfo=met) t4 = time(microsecond=40) t5 = time(microsecond=40, tzinfo=utc) self.assertEqual(t1.tzinfo, est) self.assertEqual(t2.tzinfo, utc) self.assertEqual(t3.tzinfo, met) self.assertIsNone(t4.tzinfo) self.assertEqual(t5.tzinfo, utc) self.assertEqual(t1.utcoffset(), timedelta(minutes=-300)) self.assertEqual(t2.utcoffset(), timedelta(minutes=0)) self.assertEqual(t3.utcoffset(), timedelta(minutes=60)) self.assertIsNone(t4.utcoffset()) self.assertRaises(TypeError, t1.utcoffset, "no args") self.assertEqual(t1.tzname(), "EST") self.assertEqual(t2.tzname(), "UTC") self.assertEqual(t3.tzname(), "MET") self.assertIsNone(t4.tzname()) self.assertRaises(TypeError, t1.tzname, "no args") self.assertEqual(t1.dst(), timedelta(minutes=1)) self.assertEqual(t2.dst(), timedelta(minutes=-2)) self.assertEqual(t3.dst(), timedelta(minutes=3)) self.assertIsNone(t4.dst()) self.assertRaises(TypeError, t1.dst, "no args") self.assertEqual(hash(t1), hash(t2)) self.assertEqual(hash(t1), hash(t3)) self.assertEqual(hash(t2), hash(t3)) self.assertEqual(t1, t2) self.assertEqual(t1, t3) self.assertEqual(t2, t3) self.assertNotEqual(t4, t5) # mixed tz-aware & naive self.assertRaises(TypeError, lambda: t4 < t5) # mixed tz-aware & naive self.assertRaises(TypeError, lambda: t5 < t4) # mixed tz-aware & naive self.assertEqual(str(t1), "07:47:00-05:00") self.assertEqual(str(t2), "12:47:00+00:00") self.assertEqual(str(t3), "13:47:00+01:00") self.assertEqual(str(t4), "00:00:00.000040") self.assertEqual(str(t5), "00:00:00.000040+00:00") self.assertEqual(t1.isoformat(), "07:47:00-05:00") self.assertEqual(t2.isoformat(), "12:47:00+00:00") self.assertEqual(t3.isoformat(), "13:47:00+01:00") self.assertEqual(t4.isoformat(), "00:00:00.000040") self.assertEqual(t5.isoformat(), "00:00:00.000040+00:00") d = 'datetime.time' self.assertEqual(repr(t1), d + "(7, 47, tzinfo=est)") self.assertEqual(repr(t2), d + "(12, 47, tzinfo=utc)") self.assertEqual(repr(t3), d + "(13, 47, tzinfo=met)") self.assertEqual(repr(t4), d + "(0, 0, 0, 40)") self.assertEqual(repr(t5), d + "(0, 0, 0, 40, tzinfo=utc)") self.assertEqual(t1.strftime("%H:%M:%S %%Z=%Z %%z=%z"), "07:47:00 %Z=EST %z=-0500") self.assertEqual(t2.strftime("%H:%M:%S %Z %z"), "12:47:00 UTC +0000") self.assertEqual(t3.strftime("%H:%M:%S %Z %z"), "13:47:00 MET +0100") yuck = FixedOffset(-1439, "%z %Z %%z%%Z") t1 = time(23, 59, tzinfo=yuck) self.assertEqual(t1.strftime("%H:%M %%Z='%Z' %%z='%z'"), "23:59 %Z='%z %Z %%z%%Z' %z='-2359'") # Check that an invalid tzname result raises an exception. class Badtzname(tzinfo): tz = 42 def tzname(self, dt): return self.tz t = time(2, 3, 4, tzinfo=Badtzname()) self.assertEqual(t.strftime("%H:%M:%S"), "02:03:04") self.assertRaises(TypeError, t.strftime, "%Z") # Issue #6697: if '_Fast' in self.__class__.__name__: Badtzname.tz = '\ud800' self.assertRaises(ValueError, t.strftime, "%Z") def test_hash_edge_cases(self): # Offsets that overflow a basic time. t1 = self.theclass(0, 1, 2, 3, tzinfo=FixedOffset(1439, "")) t2 = self.theclass(0, 0, 2, 3, tzinfo=FixedOffset(1438, "")) self.assertEqual(hash(t1), hash(t2)) t1 = self.theclass(23, 58, 6, 100, tzinfo=FixedOffset(-1000, "")) t2 = self.theclass(23, 48, 6, 100, tzinfo=FixedOffset(-1010, "")) self.assertEqual(hash(t1), hash(t2)) def test_pickling(self): # Try one without a tzinfo. args = 20, 59, 16, 64**2 orig = self.theclass(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) self.assertEqual(orig.__reduce__(), orig.__reduce_ex__(2)) # Try one with a tzinfo. tinfo = PicklableFixedOffset(-300, 'cookie') orig = self.theclass(5, 6, 7, tzinfo=tinfo) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) self.assertIsInstance(derived.tzinfo, PicklableFixedOffset) self.assertEqual(derived.utcoffset(), timedelta(minutes=-300)) self.assertEqual(derived.tzname(), 'cookie') self.assertEqual(orig.__reduce__(), orig.__reduce_ex__(2)) def test_compat_unpickle(self): tests = [ b"cdatetime\ntime\n(S'\\x05\\x06\\x07\\x01\\xe2@'\n" b"ctest.datetimetester\nPicklableFixedOffset\n(tR" b"(dS'_FixedOffset__offset'\ncdatetime\ntimedelta\n" b"(I-1\nI68400\nI0\ntRs" b"S'_FixedOffset__dstoffset'\nNs" b"S'_FixedOffset__name'\nS'cookie'\nsbtR.", b'cdatetime\ntime\n(U\x06\x05\x06\x07\x01\xe2@' b'ctest.datetimetester\nPicklableFixedOffset\n)R' b'}(U\x14_FixedOffset__offsetcdatetime\ntimedelta\n' b'(J\xff\xff\xff\xffJ0\x0b\x01\x00K\x00tR' b'U\x17_FixedOffset__dstoffsetN' b'U\x12_FixedOffset__nameU\x06cookieubtR.', b'\x80\x02cdatetime\ntime\nU\x06\x05\x06\x07\x01\xe2@' b'ctest.datetimetester\nPicklableFixedOffset\n)R' b'}(U\x14_FixedOffset__offsetcdatetime\ntimedelta\n' b'J\xff\xff\xff\xffJ0\x0b\x01\x00K\x00\x87R' b'U\x17_FixedOffset__dstoffsetN' b'U\x12_FixedOffset__nameU\x06cookieub\x86R.', ] tinfo = PicklableFixedOffset(-300, 'cookie') expected = self.theclass(5, 6, 7, 123456, tzinfo=tinfo) for data in tests: for loads in pickle_loads: derived = loads(data, encoding='latin1') self.assertEqual(derived, expected, repr(data)) self.assertIsInstance(derived.tzinfo, PicklableFixedOffset) self.assertEqual(derived.utcoffset(), timedelta(minutes=-300)) self.assertEqual(derived.tzname(), 'cookie') def test_more_bool(self): # time is always True. cls = self.theclass t = cls(0, tzinfo=FixedOffset(-300, "")) self.assertTrue(t) t = cls(5, tzinfo=FixedOffset(-300, "")) self.assertTrue(t) t = cls(5, tzinfo=FixedOffset(300, "")) self.assertTrue(t) t = cls(23, 59, tzinfo=FixedOffset(23*60 + 59, "")) self.assertTrue(t) def test_replace(self): cls = self.theclass z100 = FixedOffset(100, "+100") zm200 = FixedOffset(timedelta(minutes=-200), "-200") args = [1, 2, 3, 4, z100] base = cls(*args) self.assertEqual(base, base.replace()) i = 0 for name, newval in (("hour", 5), ("minute", 6), ("second", 7), ("microsecond", 8), ("tzinfo", zm200)): newargs = args[:] newargs[i] = newval expected = cls(*newargs) got = base.replace(**{name: newval}) self.assertEqual(expected, got) i += 1 # Ensure we can get rid of a tzinfo. self.assertEqual(base.tzname(), "+100") base2 = base.replace(tzinfo=None) self.assertIsNone(base2.tzinfo) self.assertIsNone(base2.tzname()) # Ensure we can add one. base3 = base2.replace(tzinfo=z100) self.assertEqual(base, base3) self.assertIs(base.tzinfo, base3.tzinfo) # Out of bounds. base = cls(1) self.assertRaises(ValueError, base.replace, hour=24) self.assertRaises(ValueError, base.replace, minute=-1) self.assertRaises(ValueError, base.replace, second=100) self.assertRaises(ValueError, base.replace, microsecond=1000000) def test_mixed_compare(self): t1 = self.theclass(1, 2, 3) t2 = self.theclass(1, 2, 3) self.assertEqual(t1, t2) t2 = t2.replace(tzinfo=None) self.assertEqual(t1, t2) t2 = t2.replace(tzinfo=FixedOffset(None, "")) self.assertEqual(t1, t2) t2 = t2.replace(tzinfo=FixedOffset(0, "")) self.assertNotEqual(t1, t2) # In time w/ identical tzinfo objects, utcoffset is ignored. class Varies(tzinfo): def __init__(self): self.offset = timedelta(minutes=22) def utcoffset(self, t): self.offset += timedelta(minutes=1) return self.offset v = Varies() t1 = t2.replace(tzinfo=v) t2 = t2.replace(tzinfo=v) self.assertEqual(t1.utcoffset(), timedelta(minutes=23)) self.assertEqual(t2.utcoffset(), timedelta(minutes=24)) self.assertEqual(t1, t2) # But if they're not identical, it isn't ignored. t2 = t2.replace(tzinfo=Varies()) self.assertTrue(t1 < t2) # t1's offset counter still going up def test_fromisoformat(self): time_examples = [ (0, 0, 0, 0), (23, 59, 59, 999999), ] hh = (9, 12, 20) mm = (5, 30) ss = (4, 45) usec = (0, 245000, 678901) time_examples += list(itertools.product(hh, mm, ss, usec)) tzinfos = [None, timezone.utc, timezone(timedelta(hours=2)), timezone(timedelta(hours=6, minutes=27))] for ttup in time_examples: for tzi in tzinfos: t = self.theclass(*ttup, tzinfo=tzi) tstr = t.isoformat() with self.subTest(tstr=tstr): t_rt = self.theclass.fromisoformat(tstr) self.assertEqual(t, t_rt) def test_fromisoformat_timezone(self): base_time = self.theclass(12, 30, 45, 217456) tzoffsets = [ timedelta(hours=5), timedelta(hours=2), timedelta(hours=6, minutes=27), timedelta(hours=12, minutes=32, seconds=30), timedelta(hours=2, minutes=4, seconds=9, microseconds=123456) ] tzoffsets += [-1 * td for td in tzoffsets] tzinfos = [None, timezone.utc, timezone(timedelta(hours=0))] tzinfos += [timezone(td) for
#!/usr/bin/env python3 import time from matrix_client.client import MatrixClient from matrix_client.api import MatrixRequestError from requests.exceptions import ConnectionError, Timeout import argparse import random from configparser import ConfigParser import re import traceback import urllib.parse import logging import os import sys import signal import queue import codecs from database import MarkovDatabaseBrain COMMANDS = [ '!rate' ] def sigterm_handler(_signo, _stack_frame): """Raises SystemExit(0), causing everything to cleanly shut down.""" sys.exit(0) class ConfigParser(ConfigParser): # allow case-sensitive option names # needed for saving per-room response rates optionxform = str class Backend(object): """Interface for chat backends.""" def __init__(self, brain_file): pass def train_file(self, filename): """Trains the chat backend on the given file.""" with codecs.open(filename, encoding='utf8') as train_file: for line in train_file: self.learn(line) def learn(self, line): """Updates the chat backend based on the given line of input.""" pass def save(self): """Saves the backend to disk, if needed.""" pass def reply(self, message): """Generates a reply to the given message.""" return "(dummy response)" class MarkovBackend(Backend): """Chat backend using markov chains.""" def __init__(self, brain_file): self.brain = MarkovDatabaseBrain(brain_file) def sanitize(self, word): """Removes any awkward whitespace characters from the given word. Removes '\n', '\r', and '\\u2028' (unicode newline character).""" return word.replace('\n', '').replace('\r', '').replace('\u2028', '') def train_file(self, filename): with codecs.open(filename, encoding='utf8') as train_file: for line in train_file: self.learn(line) self.save() def learn(self, line): line = line.strip() words = line.split(' ') words = [self.sanitize(word) for word in words] for i in range(len(words) - 2): prefix = words[i], words[i + 1] follow = words[i + 2] self.brain.add(prefix, follow) def save(self): self.brain.save() def get_random_next_link(self, word1, word2): """Gives a word that could come after the two provided. Words that follow the two given words are weighted by how frequently they appear after them. """ possibilities = self.brain.get_followers((word1, word2)) if not possibilities: return None total = 0 for p in possibilities: total += possibilities[p] num = random.randint(1, total) total = 0 for p in possibilities: total += possibilities[p] if total >= num: break return p def reply(self, message): if self.brain.is_empty(): return '' seed = None # try to seed reply from the message possible_seed_words = message.split() while seed is None and possible_seed_words: message_word = random.choice(possible_seed_words) seeds = list(self.brain.get_pairs_containing_word_ignoring_case( message_word)) if seeds: seed = random.choice(seeds) else: possible_seed_words.remove(message_word) # we couldn't seed the reply from the input # fall back to random seed if seed is None: seed = self.brain.get_three_random_words() words = list(seed) while self.brain.contains_pair((words[-2], words[-1])) and \ len(words) < 100: word = self.get_random_next_link(words[-2], words[-1]) words.append(word) return ' '.join(words) class Config(object): def __init__(self, cfgparser): self.backend = cfgparser.get('General', 'backend') self.display_name = cfgparser.get('General', 'display name') self.learning = cfgparser.getboolean('General', 'learning') self.username = cfgparser.get('Login', 'username') self.password = cfgparser.get('Login', 'password') self.server = cfgparser.get('Login', 'server') self.default_response_rate = cfgparser.getfloat( 'General', 'default response rate') self.response_rates = {} for room_id, rate in cfgparser.items('Response Rates'): room_id = room_id.replace('-colon-', ':') self.response_rates[room_id] = float(rate) def get_response_rate(self, room_id): """Returns our response rate for the room with the given room id.""" if room_id in self.response_rates: return self.response_rates[room_id] else: return self.default_response_rate def write(self): """Writes this config back to the file, with any changes reflected.""" cfgparser = ConfigParser() cfgparser.add_section('General') cfgparser.set('General', 'default response rate', str(self.default_response_rate)) cfgparser.set('General', 'backend', self.backend) cfgparser.set('General', 'display name', self.display_name) cfgparser.set('General', 'learning', str(self.learning)) cfgparser.add_section('Login') cfgparser.set('Login', 'username', self.username) cfgparser.set('Login', 'password', self.password) cfgparser.set('Login', 'server', self.server) cfgparser.add_section('Response Rates') for room_id, rate in list(self.response_rates.items()): # censor colons because they are a configparser special # character room_id = room_id.replace(':', '-colon-') cfgparser.set('Response Rates', room_id, str(rate)) with open('config.cfg', 'wt') as configfile: cfgparser.write(configfile) def get_default_configparser(): """Returns a ConfigParser object for the default config file.""" config = ConfigParser(allow_no_value=True) config.add_section('General') config.set('General', 'default response rate', "0.10") config.set('General', 'backend', 'markov') config.set('General', 'display name', 'Markov') config.set('General', 'learning', 'on') config.add_section('Login') config.set('Login', 'username', 'username') config.set('Login', 'password', 'password') config.set('Login', 'server', 'http://matrix.org') config.add_section('Response Rates') return config class Bot(object): """Handles everything that the bot does.""" def __init__(self, config, chat_backend): self.config = config self.client = None self.chat_backend = chat_backend self.event_queue = queue.Queue() self.invite_queue = queue.Queue() def login(self): """Logs onto the server.""" client = MatrixClient(self.config.server) client.login_with_password_no_sync( self.config.username, self.config.password) self.client = client def get_room(self, event): """Returns the room the given event took place in.""" return self.client.rooms[event['room_id']] def handle_command(self, event, command, args): """Handles the given command, possibly sending a reply to it.""" command = command.lower() if command == '!rate': if args: num = re.match(r'[0-9]*(\.[0-9]+)?(%|)', args[0]).group() if not num: self.reply(event, "Error: Could not parse number.") return if num[-1] == '%': rate = float(num[:-1]) / 100 else: rate = float(num) self.config.response_rates[event['room_id']] = rate self.reply(event, "Response rate set to %f." % rate) else: rate = self.config.get_response_rate(event['room_id']) self.reply( event, "Response rate set to %f in this room." % rate) def reply(self, event, message): """Replies to the given event with the provided message.""" room = self.get_room(event) logging.info("Reply: %s" % message) room.send_notice(message) def is_name_in_message(self, message): """Returns whether the message contains the bot's name. Considers both display name and username. """ regex = "({}|{})".format( self.config.display_name, self.config.username) return re.search(regex, message, flags=re.IGNORECASE) def handle_invite(self, room_id, invite_state): # join rooms if invited try: self.client.join_room(room_id) logging.info('Joined room: %s' % room_id) except MatrixRequestError as e: if e.code == 404: # room was deleted after invite or something; ignore it logging.info('invited to nonexistent room {}'.format(room_id)) elif e.code in range(500, 600): # synapse v0.99.1 500s if it cannot locate a room sometimes # (when there are federation issues) logging.warning('got 500 trying to join room we were invited to') else: raise(e) def handle_event(self, event): """Handles the given event. Joins a room if invited, learns from messages, and possibly responds to messages. """ if event['type'] == 'm.room.message': # only care about text messages by other people if event['sender'] != self.client.user_id and \ event['content']['msgtype'] == 'm.text': message = str(event['content']['body']) # lowercase message so we can search it # case-insensitively logging.info("Handling message: %s" % message) command_found = False for command in COMMANDS: match = re.search(command, message, flags=re.IGNORECASE) if match and (match.start() == 0 or self.is_name_in_message(message)): command_found = True args = message[match.start():].split(' ') self.handle_command(event, args[0], args[1:]) break if not command_found: room = self.get_room(event) response_rate = self.config.get_response_rate(room.room_id) if self.is_name_in_message(message) or \ random.random() < response_rate: # remove name from message and respond to it message_no_name = re.sub( ' *' + re.escape(self.get_display_name()) + ' *', ' ', message, flags=re.IGNORECASE) response = self.chat_backend.reply(message_no_name) self.reply(event, response) if self.config.learning: self.chat_backend.learn(message) self.send_read_receipt(event) def set_display_name(self, display_name): """Sets the bot's display name on the server.""" self.client.api.set_display_name(self.client.user_id, display_name) def get_display_name(self): """Gets the bot's display name from the server.""" return self.client.api.get_display_name(self.client.user_id) def run(self): """Indefinitely listens for messages and handles all that come.""" current_display_name = self.get_display_name() if current_display_name != self.config.display_name: self.set_display_name(self.config.display_name) last_save = time.time() # listen for invites, including initial sync invites self.client.add_invite_listener( lambda room_id, state: self.invite_queue.put((room_id, state))) # get rid of initial event sync logging.info("initial event stream") self.client.listen_for_events() # listen to events and add them all to the event queue # for handling in this thread self.client.add_listener(self.event_queue.put) def exception_handler(e): if isinstance(e, Timeout): logging.warning("listener thread timed out.") logging.error("exception in listener thread:") traceback.print_exc() # start listen thread logging.info("starting listener thread") self.client.start_listener_thread(exception_handler=exception_handler) try: while True: time.sleep(1) # handle any queued events while not self.event_queue.empty(): event = self.event_queue.get_nowait() self.handle_event(event) while not self.invite_queue.empty(): room_id, invite_state = self.invite_queue.get_nowait() self.handle_invite(room_id, invite_state) # save every 10 minutes or so if time.time() - last_save > 60 * 10: self.chat_backend.save() last_save = time.time() finally: logging.info("stopping listener thread") self.client.stop_listener_thread() def send_read_receipt(self, event): """Sends a read receipt for the given event.""" if "room_id" in event and "event_id" in event: room_id = urllib.parse.quote(event['room_id']) event_id = urllib.parse.quote(event['event_id']) self.client.api._send("POST", "/rooms/" + room_id + "/receipt/m.read/" + event_id, api_path="/_matrix/client/r0") def train(backend, train_file): """Trains the given chat backend on the given train_file & saves it.""" print("Training...") backend.train_file(train_file) print("Training complete!") backend.save() def main(): argparser = argparse.ArgumentParser( description="A chatbot for Matrix (matrix.org)") argparser.add_argument("--debug", help="Print out way more things.", action="store_true") argparser.add_argument("--train", metavar="train.txt", type=str, help="Train the bot with a file of text.") argparser.add_argument("--config", metavar="config.cfg", type=str, help="Bot's config file (must be read-writable)") argparser.add_argument("--brain", metavar="brain.db", type=str, help="Bot's brain file (must be read-writable)") args = vars(argparser.parse_args()) debug = args['debug'] # suppress logs of libraries logging.getLogger("requests").setLevel(logging.WARNING) logging.getLogger("urllib3").setLevel(logging.WARNING) log_level = logging.DEBUG if debug else logging.INFO logging.basicConfig(level=log_level, format='%(asctime)s %(name)s ' '%(levelname)s %(message)s') train_path = args['train'] config_path = args['config'] if args['config'] \ else os.getenv('MATRIX_CHATBOT_CONFIG', 'config.cfg') brain_path = args['brain'] if args['brain'] \ else os.getenv('MATRIX_CHATBOT_BRAIN', 'brain.db') cfgparser = ConfigParser() success = cfgparser.read(config_path) if not success: cfgparser = get_default_configparser() with open(config_path, 'wt')
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 from abc import ABC, abstractmethod from typing import Any, Dict, List, Mapping, Optional, Sequence, Type, cast from uuid import uuid4 from intelliflow.core.application.context.traversal import ContextVisitor from intelliflow.core.platform.development import HostPlatform from intelliflow.core.serialization import Serializable, dumps, loads from intelliflow.core.signal_processing.definitions.metric_alarm_defs import ( ALARM_STATE_TRANSITION_DIMENSION_TYPE, ALARM_TIME_DIMENSION_TYPE, METRIC_NAME_DIMENSION_TYPE, METRIC_PERIOD_DIMENSION_TYPE, METRIC_STATISTIC_DIMENSION_TYPE, METRIC_TIME_DIMENSION_TYPE, AlarmDimension, MetricDimension, MetricSubDimensionMapType, ) from intelliflow.core.signal_processing.dimension_constructs import DateVariant, Dimension, DimensionFilter, DimensionSpec from intelliflow.core.signal_processing.signal import Signal, SignalDomainSpec, SignalType from intelliflow.core.signal_processing.signal_source import ( AlarmSignalSourceAccessSpec, CompositeAlarmSignalSourceAccessSpec, MetricSignalSourceAccessSpec, SignalSourceAccessSpec, TimerSignalSourceAccessSpec, ) from intelliflow.core.signal_processing.slot import Slot class Node(Serializable["Node"], ABC): """Abstract class representing an atomic signal/slot provider within Application flow. A Node generically represents a persistent action taken against an entity flowing through an Application. This entities are represented with Signal abstraction. So basically a Node is at least a transformation or a listener on a Signal. As a transformation, it should definitely provide a Signal, and as a listener it should be taking some persistent action and yielding new data in Platform::storage, which cannot be exposed as a Signal to the rest of the flow. So it should have at least one Slot and optionally expose a Signal. Because of this fact, a Node should register its connections (input Signals and at least one Slot) during Activation so that in runtime its Slot(s) can be executed, based on changes in its input Signal(s). Obeys Serializable protocol to guarantee serialization at Context level. """ class QueryVisitor(ContextVisitor): """Node impls can (are supposed to) provide QueryVisitor impls to abstract high-level modules from the details of how to build a query for a particular node type (and its attributes)""" def __init__(self, node_type: Type["Node"], *args, **kwargs) -> None: """Impls should provide a pretty interface for more convenient high-level instantiation a query visitor""" self._results: Dict[str, "Node"] = dict() self._args: Sequence[Any] = list(args) if args else [] self._kwargs: Dict[str, Any] = dict(kwargs) if kwargs else {} self._node_type = node_type if node_type else kwargs.get("node_type", None) @property def results(self) -> Mapping[str, "Node"]: return self._results # overrides def visit(self, node: "Node") -> None: if self.match(node): self._results[node.node_id] = node def match(self, node: "Node") -> bool: return type(node) == self._node_type or isinstance(node, self._node_type) # parent and child_nodes strong/direct references would normally mean memory leak here. # currently this issue is intentionally ignored based on the particular use-cases that # we are targeting for POC (as of 07/2020). # TODO Will move to 'weakref' or ignore it at all, depepending on our FUTURE evaluation. def __init__(self, parent_node: "Node", child_nodes: List["Node"] = None, node_id: str = None) -> None: """ Create a Node. Parameters ---------- parent_node: Node Container Node. Owner/creator of this Node. Not to be confused with an upstream relationship. That type of relationship will be up to sub-classes / Node implementations. node_id: str Unique ID for this Node. Within a Context this id is used to retrieve the handle for this Node. *child_node : Node Children. """ self._parent_node = parent_node self._child_nodes = [] if child_nodes is None else child_nodes # don't forget that our serializer should not call __init__ on new objects during deserialization. # so this assignment is safe for now since we are using pickle. Pickle does not call __init__. self._node_id = "{0}-{1}".format(self.__class__.__name__, uuid4()) if node_id is None else node_id self._check_cylic_dependency() def _check_cylic_dependency(self) -> None: if self._parent_node == self or self in self._child_nodes: raise TypeError("Node ({0}) has a cyclic relationship") # TODO check the whole hierarchy def __hash__(self) -> int: return hash(self._node_id) def __eq__(self, other: Any) -> bool: try: return self._node_id == cast("Node", other).node_id except AttributeError: return NotImplemented @property def parent_node(self) -> "Node": return self._parent_node @parent_node.setter def parent_node(self, value: "Node") -> None: self._parent_node = value @property def node_id(self) -> str: return self._node_id @abstractmethod def _id(self) -> str: pass @property def route_id(self) -> str: return self._node_id def is_root(self) -> bool: return self._parent_node is None @property def child_nodes(self) -> Sequence["Node"]: return self._child_nodes def add_child(self, child: "Node") -> None: child.parent_node = self self._child_nodes.append(child) @abstractmethod def signal(self) -> Optional[Signal]: """What does this Node represent? Emit it as a Signal during development and use the same during 'activation' This method is particularly important while Nodes are referring each other. While a Node binds to another one (like MarshalerNode to a DataNode) or when a Node is implicitly used as an input for another Node (i.e Application::createDataset), this method provides the necessary abstraction in terms of representing this particular Node impl (a subclass) as a Signal object. """ pass @abstractmethod def _slots(self) -> Optional[List[Slot]]: """A Node is supposed to provide at least one Slot. if a Slot is not going to be provided, then the existence of Node impl should be questioned as it can only be a development-time entity such as the views in "node.marshaling.filter_views" module. Currently only exception to this logic is <ExternalDataNode>. That is why the return type is still Optional. Otherwise this would not make sense. The reason this method is supposed to be 'private' is that currently we cannot envision a scenario where the slots would be accessed externally. So sole purpose of this abstract method is to high-light the above logic behind having Node impls. """ pass def activate(self, platform: HostPlatform) -> None: """Do activation while the high-level Context (and Application) is being interpreted/activated. Node activates itself and then scans its children. This basic logic is what determines the order of route entries (i.e Code Segment) within a DAG of Nodes (that represents an <Instruction>). High-level order of route entries (aligned with the flow of Application code) is determined by :class:`InstructionChain` . """ self.do_activate(platform) for child_node in self._child_nodes: child_node.activate(platform) @abstractmethod def do_activate(self, platform: HostPlatform) -> None: pass def accept(self, visitor: ContextVisitor) -> None: visitor.visit(self) for child_node in self._child_nodes: child_node.accept(visitor) class DataNode(Node, ABC): """Base class for all Data (new Signal) provider Nodes. It encapsulates bare-bones fields for the creation of a new Signal. """ class QueryVisitor(Node.QueryVisitor): def __init__(self, data_id: str, *args, **kwargs) -> None: super().__init__(None, args, kwargs) self._data_id: str = data_id if data_id else kwargs.get("data_id", None) self._exact_match: bool = kwargs.get("exact_match", False) # overrides def match(self, node: "Node") -> bool: if isinstance(node, DataNode): if self._exact_match: return self._data_id == node.data_id else: return self._data_id in node.data_id return False def __init__( self, data_id: str, source_access_spec: SignalSourceAccessSpec, domain_spec: SignalDomainSpec, parent_node: Node = None, child_nodes: List[Node] = None, node_id: str = None, ) -> None: """ Create a DataNode. Parameters ---------- data_id: str Identifier for the logical data object represented by this Node. resource_format: str See Signal::resource_format domain_spec: SignalDomainSpec See Signal::domain_spec parent_node: Node Container Node. Owner/creator of this Node. Not to be confused with an upstream relationship. That type of relationship will be up to sub-classes / Node implementations. node_id: str Unique ID for this Node. Within a Context this id is used to retrieve the handle for this Node. *child_node : Node Children. """ super().__init__(parent_node, child_nodes, node_id) self._data_id = data_id self._source_access_spec = source_access_spec self._domain_spec = domain_spec # overrides @property def _id(self) -> str: return self.data_id @property def data_id(self) -> str: return self._data_id # overrides @property def route_id(self) -> str: # It is by design that we allow the same route_id generation for the same data_id. # This allows the most recent node to overwrite / update the route (during activation). return "{0}-{1}".format(self.__class__.__name__, self._data_id) class TimerNode(Node): class QueryVisitor(Node.QueryVisitor): def __init__(self, timer_id: str, *args, **kwargs) -> None: super().__init__(None, args, kwargs) self._timer_id: str = timer_id if timer_id else kwargs.get("timer_id", None) self._exact_match: bool = kwargs.get("exact_match", False) # overrides def match(self, node: "Node") -> bool: if isinstance(node, TimerNode): if self._exact_match: return self._timer_id == node.timer_id else: return self._timer_id in node.timer_id return False def __init__( self, timer_id: str, schedule_expression: str, date_dimension: DateVariant, context_id: str, parent_node: Node = None, child_nodes: List[Node] = None, node_id: str = None, **kwargs, ) -> None: super().__init__(parent_node, child_nodes, node_id) self._timer_id = timer_id self._schedule_expresssion = schedule_expression spec: DimensionSpec = DimensionSpec() spec.add_dimension(Dimension(date_dimension.name, date_dimension.type, date_dimension.params), None) dim_filter: DimensionFilter = DimensionFilter() dim_filter.add_dimension(date_dimension, None) source_access_spec = TimerSignalSourceAccessSpec(timer_id, schedule_expression, context_id, **kwargs) domain_spec = SignalDomainSpec(spec, dim_filter, None) self._timer_signal = Signal(SignalType.TIMER_EVENT, source_access_spec,
<reponame>Standard-Cognition/recursive-bayesian-filtering ''' Statistics tools for tracking. MIT License Copyright (c) 2018 Standard Cognition Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' # Standard from sys import float_info # for float_info.epsilon from typing import Union, Tuple, List # Scientific import numpy as np from scipy.stats import chi2 from matplotlib import pyplot as plt from matplotlib import patches # ----- Misc. ----- def det(A: np.ndarray) -> float: ''' Evaluate determinant of a matrix A. Use direct formula for speedup in 2x2 special case. For speed, input validity is not checked. ''' if A.shape[0] == 2: determinant = A[0, 0]*A[1, 1] - A[0, 1]*A[1, 0] else: determinant = np.linalg.det(A) return determinant # ----- Multivariate Normal Distributions ----- def assert_cov_validity( cov: Union[float, np.ndarray], eigenvalue_lbnd: float = 1000.0*float_info.epsilon, condition_number_ubnd: float = 1.0e6): ''' Assert that covariance `cov` is symmetric, real, positive-definite, has eigenvalues not too close to zero, and is well-conditioned. ::WARNING:: Applying `enforce_cov_validity` with the same parameters does not guarantee that these assertions will pass. Consider either (1) using the functions mutally exclusively, or (2) making the parameters of `enforce_cov_validity` slightly stricter in order to compensate for possible small numerical errors in eigenreconstruction. Args: cov: an alleged variance (as `float`) or covariance matrix (as `np.ndarray`). eigenvalue_lbnd: eigenvalues should be at least this much greater than zero. Must be strictly positive. condition_number_ubnd: inclusive upper bound on matrix condition number. Must be greater or equal to 1.0. Returns: Whether cov is positive definite and has all real elements. ''' assert eigenvalue_lbnd > 0.0, \ 'Covariance eigenvalue lower bound must be > 0.0!' assert condition_number_ubnd >= 1.0, \ 'Covariance condition number bound must be >= 1.0!' # Symmetry if not np.isscalar(cov): assert (cov.T == cov).all(), 'Covariance must be symmetric!' # Realness assert np.isrealobj(cov), 'Covariance must be a real object!' # Eigenvalue properties if np.isscalar(cov): assert cov > 0.0, \ 'Variance must be strictly positive!' assert cov >= eigenvalue_lbnd, \ 'Variance must be >= lower bound!' else: # Precompute eigenvalues for subsequent tests. ws = np.linalg.eigvalsh(cov) # The eigenvalues of cov w_min = min(ws) w_max = max(ws) # Strict positivity assert w_min > 0.0, 'Covariance must be strictly positive!' # Eigenvalue lower bound assert w_min >= eigenvalue_lbnd, \ 'Covariance eigenvalues must be >= lower bound!' # Condition number upper bound assert w_max/w_min <= condition_number_ubnd, \ 'Condition number must be <= upper bound!' def enforce_cov_validity( cov: Union[float, np.ndarray], eigenvalue_lbnd: float = 1000.0*float_info.epsilon, condition_number_ubnd: float = 1.0e6) -> Union[float, np.ndarray]: ''' Create and return a version of covariance `cov` which is modified to ensure it is symmetric, real, positive-definite, has eigenvalues not too close to zero, and is well-conditioned. ::WARNING:: Applying this function to a numpy array does not guarantee that calling `assert_cov_validity` with the same parameters will pass. Consider either (1) using the functions mutally exclusively, or (2) making the parameters of `assert_cov_validity` slightly more lenient in order to compensate for possible small numerical errors in eigenreconstruction. Args: cov: an alleged variance (as `float`) or covariance matrix (as `np.ndarray`). eigenvalue_lbnd: eigenvalues should be at least this much greater than zero. condition_number_ubnd: upper bound on matrix condition number. Should be greater or equal to 1.0. If it is necessary to modify `cov` to enforce this, the largest eigenvalue is held fixed and the smaller are increased. Returns: A version of cov modified to be valid. ''' assert eigenvalue_lbnd > 0.0, \ 'Covariance eigenvalue lower bound must be > 0.0!' assert condition_number_ubnd >= 1.0, \ 'Covariance condition number bound must be >= 1.0!' if np.isscalar(cov): # Realness cov = float(cov.real) # Eigenvalue lower bound if cov < eigenvalue_lbnd: cov = eigenvalue_lbnd else: # Symmetry cov = 0.5*(cov + cov.T) # Realness if not np.isrealobj(cov): cov = cov.real # Precompute eigendecomposition for subsequent enforcements. ws, vr = np.linalg.eigh(cov) # Eigenvalues and right eigenvectors # Eigenvalue lower bound for i, w in enumerate(ws): if w < eigenvalue_lbnd: ws[i] = eigenvalue_lbnd # Condition number upper bound # condition number := max_eigval/min_eigval <= condition_number_ubnd # <=> max_eigval/condition_number_ubnd <= min_eigval eigenvalue_lbnd_for_conditioning = max(ws)/condition_number_ubnd for i, w in enumerate(ws): if w < eigenvalue_lbnd_for_conditioning: ws[i] = eigenvalue_lbnd_for_conditioning # Eigenreconstruction cov = vr.dot(np.diag(ws).dot(vr.T)) return cov def evaluate_normal_pdf( x: Union[float, np.ndarray], cov: Union[float, np.ndarray], mean: Union[float, np.ndarray] = None) -> float: ''' Compute and return the value of a multivariate normal PDF (Probability Density Function) at a point x. Args: x: where to evaluate PDF. cov: covariance of distribution. mean: mean of distribution. None => assumed zeros. Returns: PDF value at x. ''' # Get dimension of distribution if np.isscalar(x): dimension = 1 else: dimension = len(x) if mean is None: delta = x # assume zero mean else: delta = x - mean if dimension > 1: k = (2.0*np.pi)**(-0.5*dimension)*det(cov)**(-0.5) quadratic = delta.dot(np.linalg.solve(cov, delta)) p = k*np.exp(-0.5*quadratic) else: k = (2.0*np.pi*cov)**(-0.5) quadratic = delta*(1.0/cov)*delta p = k*np.exp(-0.5*quadratic) return float(p) def sample_from_normal_distribution( cov: Union[float, np.ndarray], cov_cholesky: np.ndarray = None, mean: Union[float, np.ndarray] = None, num_samples: int = 1) -> np.ndarray: ''' Generate random sample(s) from a normal distribution having mean `mean` and covariance `cov`. This function is used instead of `np.random.multivariate_normal` because the latter issues incorrect warnings (as of 2018:05:24) and is less flexible in input. It may also be less efficient if you already have a Cholesky factorization. Args: cov: covariance of the distribution. cov_cholesky: optionally precomputed cholesky factorization, as output from `np.linalg.cholesky(cov)`. If `cov_cholesky` is None, then the covariance is allowed to be rank deficient. mean: mean of the distribution. None => assume zeros. num_samples: number of desired samples. Returns: Array of samples. Each column is a sample and the rows run over components of the vectors. ''' if np.isscalar(cov): sigma = np.sqrt(cov) samples = sigma*np.random.normal(size=(1, num_samples)) + mean else: d = cov.shape[0] if mean is None: mean = np.zeros(d) try: if cov_cholesky is None: cov_cholesky = np.linalg.cholesky(cov) samples = np.dot( cov_cholesky, np.random.normal(size=(d, num_samples))) for i in range(d): samples[i, :] += mean[i] except np.linalg.linalg.LinAlgError: # Fall back on `np.random.multivariate_normal` only for rank- # deficient covariances. samples = np.random.multivariate_normal( mean=mean, cov=cov, size=num_samples) samples = samples.T return samples # ----- Error Ellipse Visualization ----- def generate_error_ellipse_points( mean: np.ndarray, cov: np.ndarray, cov_cholesky: np.ndarray = None, acceptance: float = 0.99, num_points: int = 30, format: str = 'matplotlib') -> np.ndarray: ''' Generate points on a level set of a bivariate Gaussian PDF, usu. for plotting error ellipses. Args: mean: the distribution's mean. cov: 2x2 array, the distribution's covariance. cov_cholesky: optionally precomputed cholesky factorization, as output from `np.linalg.cholesky(cov)`. acceptance: probability mass that ellipse should contain around mean. num_points: number of points to sample on ellipse. This is a measure of plotting resolution. format: use 'matplotlib' for points output as a `float64` numpy array with rows running over x and y physical dimensions, columns over points. Use 'opencv' for points output as a `uint32` numpy array with rows running over points and columns running over x and y pixel dimensions. Returns: Shape (2, num_points) array of points for plotting. ''' assert mean.shape == (2,), 'Incorrect mean shape!' assert cov.shape == (2, 2), 'Incorrect cov shape!' assert acceptance >= 0.0 and acceptance < 1.0, \ 'acceptance rate
tar file. Access to container metadata. """ def __init__(self, localrepo, container_id=None): self.localrepo = localrepo self.container_id = container_id self.tag = "" self.imagerepo = "" def get_container_attr(self): """Get container directory and JSON metadata by id or name""" if Config.location: container_dir = "" container_json = [] else: container_dir = self.localrepo.cd_container(self.container_id) if not container_dir: Msg().err("Error: container id or name not found") return(False, False) container_json = self.localrepo.load_json( container_dir + "/container.json") if not container_json: Msg().err("Error: invalid container json metadata") return(False, False) return(container_dir, container_json) def _chk_container_root(self, container_id=None): """Check container ROOT sanity""" if container_id: container_dir = self.localrepo.cd_container(container_id) else: container_dir = self.localrepo.cd_container(self.container_id) if not container_dir: return 0 container_root = container_dir + "/ROOT" check_list = ["/lib", "/bin", "/etc", "/tmp", "/var", "/usr", "/sys", "/dev", "/data", "/home", "/system", "/root", "/proc", ] found = 0 for f_path in check_list: if os.path.exists(container_root + f_path): found += 1 return found def create_fromimage(self, imagerepo, tag): """Create a container from an image in the repository. Since images are stored as layers in tar files, this step consists in extracting those layers into a ROOT directory in the appropriate sequence. first argument: imagerepo second argument: image tag in that repo """ self.imagerepo = imagerepo self.tag = tag image_dir = self.localrepo.cd_imagerepo(self.imagerepo, self.tag) if not image_dir: Msg().err("Error: create container: imagerepo is invalid") return False (container_json, layer_files) = self.localrepo.get_image_attributes() if not container_json: Msg().err("Error: create container: getting layers or json") return False if not self.container_id: self.container_id = Unique().uuid(os.path.basename(self.imagerepo)) container_dir = self.localrepo.setup_container( self.imagerepo, self.tag, self.container_id) if not container_dir: Msg().err("Error: create container: setting up container") return False self.localrepo.save_json( container_dir + "/container.json", container_json) status = self._untar_layers(layer_files, container_dir + "/ROOT") if not status: Msg().err("Error: creating container:", self.container_id) elif not self._chk_container_root(): Msg().err("Warning: check container content:", self.container_id, l=Msg.WAR) return self.container_id def create_fromlayer(self, imagerepo, tag, layer_file, container_json): """Create a container from a layer file exported by Docker. """ self.imagerepo = imagerepo self.tag = tag if not self.container_id: self.container_id = Unique().uuid(os.path.basename(self.imagerepo)) if not container_json: Msg().err("Error: create container: getting json") return False container_dir = self.localrepo.setup_container( self.imagerepo, self.tag, self.container_id) if not container_dir: Msg().err("Error: create container: setting up") return False self.localrepo.save_json( container_dir + "/container.json", container_json) status = self._untar_layers([layer_file, ], container_dir + "/ROOT") if not status: Msg().err("Error: creating container:", self.container_id) elif not self._chk_container_root(): Msg().err("Warning: check container content:", self.container_id, l=Msg.WAR) return self.container_id def clone_fromfile(self, clone_file): """Create a cloned container from a file containing a cloned container exported by udocker. """ if not self.container_id: self.container_id = Unique().uuid(os.path.basename(self.imagerepo)) container_dir = self.localrepo.setup_container( "CLONING", "inprogress", self.container_id) if not container_dir: Msg().err("Error: create container: setting up") return False status = self._untar_layers([clone_file, ], container_dir) if not status: Msg().err("Error: creating container clone:", self.container_id) elif not self._chk_container_root(): Msg().err("Warning: check container content:", self.container_id, l=Msg.WAR) return self.container_id def _apply_whiteouts(self, tarf, destdir): """The layered filesystem od docker uses whiteout files to identify files or directories to be removed. The format is .wh.<filename> """ cmd = r"tar tf %s '*\/\.wh\.*'" % (tarf) proc = subprocess.Popen(cmd, shell=True, stderr=Msg.chlderr, stdout=subprocess.PIPE, close_fds=True) while True: wh_filename = proc.stdout.readline().strip() if wh_filename: wh_basename = os.path.basename(wh_filename) if wh_basename.startswith(".wh."): rm_filename = destdir + "/" \ + os.path.dirname(wh_filename) + "/" \ + wh_basename.replace(".wh.", "", 1) FileUtil(rm_filename).remove() else: try: proc.stdout.close() proc.terminate() except(NameError, AttributeError): pass break return True def _untar_layers(self, tarfiles, destdir): """Untar all container layers. Each layer is extracted and permissions are changed to avoid file permission issues when extracting the next layer. """ status = True gid = str(os.getgid()) for tarf in tarfiles: if tarf != "-": self._apply_whiteouts(tarf, destdir) cmd = "umask 022 ; tar -C %s -x " % destdir if Msg.level >= Msg.VER: cmd += " -v " cmd += r" --one-file-system --no-same-owner " cmd += r"--no-same-permissions --overwrite -f " + tarf cmd += r"; find " + destdir cmd += r" \( -type d ! -perm -u=x -exec /bin/chmod u+x {} \; \) , " cmd += r" \( ! -perm -u=w -exec /bin/chmod u+w {} \; \) , " cmd += r" \( ! -gid " + gid + r" -exec /bin/chgrp " + gid cmd += r" {} \; \) , " cmd += r" \( -name '.wh.*' -exec " cmd += r" /bin/rm -f --preserve-root {} \; \)" status = subprocess.call(cmd, shell=True, stderr=Msg.chlderr, close_fds=True) if status: Msg().err("Error: while extracting image layer") return not status def _tar(self, tarfile, sourcedir): """Create a tar file for a given sourcedir """ cmd = "tar -C %s -c " % sourcedir if Msg.level >= Msg.VER: cmd += " -v " cmd += r" --one-file-system " #cmd += r" --xform 's:^\./::' " cmd += r" -S --xattrs -f " + tarfile + " . " status = subprocess.call(cmd, shell=True, stderr=Msg.chlderr, close_fds=True) if status: Msg().err("Error: creating tar file:", tarfile) return not status def _copy(self, sourcedir, destdir): """Copy directories """ cmd = "tar -C %s -c " % sourcedir if Msg.level >= Msg.VER: cmd += " -v " cmd += r" --one-file-system -S --xattrs -f - . " cmd += r"|tar -C %s -x " % destdir cmd += r" -f - " status = subprocess.call(cmd, shell=True, stderr=Msg.chlderr, close_fds=True) if status: Msg().err("Error: copying:", sourcedir, " to ", destdir) return not status def get_container_meta(self, param, default, container_json): """Get the container metadata from the container""" if "config" in container_json: confidx = "config" elif "container_config" in container_json: confidx = "container_config" if container_json[confidx] and param in container_json[confidx]: if container_json[confidx][param] is None: pass elif (isinstance(container_json[confidx][param], str) and ( isinstance(default, (list, tuple)))): return container_json[confidx][param].strip().split() elif (isinstance(default, str) and ( isinstance(container_json[confidx][param], (list, tuple)))): return " ".join(container_json[confidx][param]) elif (isinstance(default, str) and ( isinstance(container_json[confidx][param], dict))): return self._dict_to_str(container_json[confidx][param]) elif (isinstance(default, list) and ( isinstance(container_json[confidx][param], dict))): return self._dict_to_list(container_json[confidx][param]) else: return container_json[confidx][param] return default def _dict_to_str(self, in_dict): """Convert dict to str""" out_str = "" for (key, val) in in_dict.iteritems(): out_str += "%s:%s " % (str(key), str(val)) return out_str def _dict_to_list(self, in_dict): """Convert dict to list""" out_list = [] for (key, val) in in_dict.iteritems(): out_list.append("%s:%s" % (str(key), str(val))) return out_list def export_tofile(self, clone_file): """Export a container creating a tar file of the rootfs """ container_dir = self.localrepo.cd_container(self.container_id) if not container_dir: Msg().err("Error: container not found:", self.container_id) return False status = self._tar(clone_file, container_dir + "/ROOT") if not status: Msg().err("Error: exporting container file system:", self.container_id) return self.container_id def clone_tofile(self, clone_file): """Create a cloned container tar file containing both the rootfs and all udocker control files. This is udocker specific. """ container_dir = self.localrepo.cd_container(self.container_id) if not container_dir: Msg().err("Error: container not found:", self.container_id) return False status = self._tar(clone_file, container_dir) if not status: Msg().err("Error: exporting container as clone:", self.container_id) return self.container_id def clone(self): """Clone a container by creating a complete copy """ source_container_dir = self.localrepo.cd_container(self.container_id) if not source_container_dir: Msg().err("Error: source container not found:", self.container_id) return False dest_container_id = Unique().uuid(os.path.basename(self.imagerepo)) dest_container_dir = self.localrepo.setup_container( "CLONING", "inprogress", dest_container_id) if not dest_container_dir: Msg().err("Error: create destination container: setting up") return False status = self._copy(source_container_dir, dest_container_dir) if not status: Msg().err("Error: creating container:", dest_container_id) elif not self._chk_container_root(dest_container_id): Msg().err("Warning: check container content:", dest_container_id, l=Msg.WAR) return dest_container_id class LocalRepository(object): """Implements a basic repository for images and containers. The repository will be usually in the user home directory. The repository has a simple directory structure: 1. layers : one dir containing all image layers so that layers shared among images are not duplicated 2. containers: has inside one directory per container, each dir has a ROOT with the extracted image 3. repos: has inside a directory tree of repos the leaf repo dirs are called tags and contain the image data (these are links both to layer tarballs and json metadata files. 4. bin: contains executables (PRoot) 5. lib: contains python libraries """ def __init__(self, topdir=None): if topdir: self.topdir = topdir else: self.topdir = Config.topdir self.bindir = Config.bindir self.libdir = Config.libdir self.reposdir = Config.reposdir self.layersdir = Config.layersdir self.containersdir = Config.containersdir self.homedir = Config.homedir if not self.bindir: self.bindir = self.topdir + "/bin" if not self.libdir: self.libdir = self.topdir + "/lib" if not self.reposdir: self.reposdir = self.topdir + "/repos" if not self.layersdir: self.layersdir = self.topdir + "/layers" if not self.containersdir: self.containersdir = self.topdir + "/containers" self.cur_repodir = "" self.cur_tagdir = ""
<reponame>godlovesdavid/Impulcifer # -*- coding: utf-8 -*- import numpy as np import matplotlib.pyplot as plt import matplotlib.ticker as ticker from matplotlib.mlab import specgram from matplotlib.ticker import LinearLocator, FormatStrFormatter, FuncFormatter from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import from mpl_toolkits.axes_grid1 import make_axes_locatable from scipy import signal, stats, ndimage, interpolate import nnresample from copy import deepcopy from autoeq.frequency_response import FrequencyResponse from utils import magnitude_response, get_ylim, running_mean from constants import COLORS class ImpulseResponse: def __init__(self, data, fs, recording=None): self.fs = fs self.data = data self.recording = recording def copy(self): return deepcopy(self) def __len__(self): """Impulse response length in samples.""" return len(self.data) def duration(self): """Impulse response duration in seconds.""" return len(self) / self.fs def peak_index(self, start=0, end=None, peak_height=0.12589): """Finds the first high (negative or positive) peak in the impulse response wave form. Args: start: Index for start of search range end: Index for end of search range peak_height: Minimum peak height. Default is -18 dBFS Returns: Peak index to impulse response data """ if end is None: end = len(self.data) # Peak height threshold, relative to the data maximum value # Copy to avoid manipulating the original data here data = self.data.copy() # Limit search to given range data = data[start:end] # Normalize to 1.0 data /= np.max(np.abs(data)) # Find positive peaks peaks_pos, properties = signal.find_peaks(data, height=peak_height) # Find negative peaks that are at least peaks_neg, _ = signal.find_peaks(data * -1.0, height=peak_height) # Combine positive and negative peaks peaks = np.concatenate([peaks_pos, peaks_neg]) # Add start delta to peak indices peaks += start # Return the first one return np.min(peaks) def decay_params(self): """Determines decay parameters with Lundeby method https://www.ingentaconnect.com/content/dav/aaua/1995/00000081/00000004/art00009 http://users.spa.aalto.fi/mak/PUB/AES_Modal9992.pdf Returns: - peak_ind: Fundamental starting index - knee_point_ind: Index where decay reaches noise floor - noise_floor: Noise floor in dBFS, also peak to noise ratio - window_size: Averaging window size as determined by Lundeby method """ peak_index = self.peak_index() # 1. The squared impulse response is averaged into localtime intervals in the range of 10–50 ms, # to yield a smooth curve without losing short decays. data = self.data.copy() # From peak to 2 seconds after the peak data = data[peak_index:min(peak_index + 2 * self.fs, len(self))] data /= np.max(np.abs(data)) # Normalize squared = data ** 2 # Squared impulse response starting from the peak t_squared = np.linspace(0, len(squared) / self.fs, len(squared)) # Time stamps starting from peak wd = 0.03 # Window duration, let's start with 30 ms n = int(len(squared) / self.fs / wd) # Number of time windows w = int(len(squared) / n) # Width of a single time window t_windows = np.arange(n) * wd + wd / 2 # Timestamps for the window centers windows = squared.copy() # Copy to avoid modifying the original windows = np.reshape(windows[:n * w], (n, w)) # Split into time windows, one window per row windows = np.mean(windows, axis=1) # Average each time window windows = 10 * np.log10(windows) # dB # 2. A first estimate for the background noise level is determined from a time segment containing the last # 10 % of the impulse response. This gives a reasonable statistical selection without a large systematic error, # if the decay continues to the end of the response. tail = squared[int(-0.1 * len(squared)):] # Last 10 % noise_floor = 10 * np.log10(np.mean(tail)) # Mean as dBs, not mean of dB values # 3. The decay slope is estimated using linear regression between the time interval containing the response # 0 dB peak, and the first interval 5–10 dB above the background noise level. slope_end = np.argwhere(windows <= noise_floor + 10)[0, 0] - 1 # Index previous to the first below 10 dB slope, intercept, _, _, _ = stats.linregress(t_windows[:slope_end], windows[:slope_end]) # 4. A preliminary knee point is determined at the intersection of the decay slope and the background noise # level. # Everything falls apart if this is not in the decay range but in the tail # This can happen when there is a long tail which has plateau first but then starts to decay again # in that case the noise floor estimated from the end of the impulse response is far below the knee point. # Should be preventable by truncating the impulse response to N seconds after the peak knee_point_time = (noise_floor - intercept) / slope # 5. A new time interval length is calculated according to the calculated slope, so that there are 3–10 # intervals per 10 dB of decay. n_windows_per_10dB = 3 wd = 10 / (abs(slope) * n_windows_per_10dB) n = int(len(squared) / self.fs / wd) # Number of time windows w = int(len(squared) / n) # Width of a single time window t_windows = np.arange(n) * wd + wd / 2 # Time window center time stamps # 6. The squared impulse is averaged into the new local time intervals. windows = squared.copy() windows = np.reshape(windows[:n * w], (n, w)) # Split into time windows windows = np.mean(windows, axis=1) # Average each time window windows = 10 * np.log10(windows) # dB try: knee_point_index = np.argwhere(t_windows >= knee_point_time)[0, 0] knee_point_value = windows[knee_point_index] except IndexError as err: # Probably tail has already been cropped return peak_index, len(self), noise_floor, w # print(f' Knee point: {knee_point_value:.2f} dB @ {knee_point_time * 1000:.0f} ms') # Steps 7–9 are iterated until the knee_point is found to converge(max. 5 iterations). for i in range(5): # print(f' iter {i}') # 7. The background noise level is determined again. The evaluated noise segment should start from a # point corresponding to 5–10 dB of decay after the knee_point, or a minimum of 10 % of the total # response length. try: noise_floor_start_index = np.argwhere(windows <= knee_point_value - 5)[0, 0] except IndexError: break noise_floor_start_time = max(t_windows[noise_floor_start_index], 0.1 * self.duration()) # Protection against over shooting the impulse response end, in case the IR has been truncated already # In that case the noise floor will be calculated from the last half of the last window noise_floor_start_time = min(noise_floor_start_time, t_windows[-1]) # noise_floor_end_time = noise_floor_start_time + 0.1 * len(squared) / ir.fs # TODO: Until the very end? # Noise floor estimation range ends one full decay time after the start, truncated to the IR length noise_floor_end_time = min(noise_floor_start_time + knee_point_time, self.duration()) noise_floor = np.mean(squared[np.logical_and( t_squared >= noise_floor_start_time, t_squared <= noise_floor_end_time )]) noise_floor = 10 * np.log10(noise_floor) # dB # print(f' Noise floor ' # f'({(noise_floor_start_time + peak_index / self.fs) * 1000:.0f} ms -> ' # f'{(noise_floor_end_time + peak_index / self.fs) * 1000:.0f} ms): ' # f'{noise_floor}') # 8. The late decay slope is estimated for a dynamic range of 10–20 dB, starting from a point 5–10 dB above # the noise level. slope_end_headroom = 8 slope_dynamic_range = 20 try: slope_end = np.argwhere(windows <= noise_floor + slope_end_headroom)[0, 0] - 1 # 8 dB above noise level slope_start = np.argwhere(windows <= noise_floor + (slope_end_headroom + slope_dynamic_range))[0, 0] - 1 late_slope, late_intercept, _, _, _ = stats.linregress( t_windows[slope_start:slope_end], windows[slope_start:slope_end] ) except (IndexError, ValueError): # Problems with already cropped IR tail break # print(f' Late slope {t_windows[slope_start] * 1000:.0f} ms -> {t_windows[slope_end] * 1000:.0f} ms: {late_slope:.1f}t + {late_intercept:.2f}') # 9. A new knee_point is found. knee_point_time = (noise_floor - late_intercept) / late_slope if knee_point_time > t_windows[-1]: knee_point_time = t_windows[-1] break knee_point_index = np.argwhere(t_windows >= knee_point_time)[0, 0] knee_point_value = windows[knee_point_index] # print(f' Knee point: {knee_point_value:.2f} dB @ {knee_point_time * 1000:.0f} ms') # Index of first window which comes after slope end time new_knee_point_index = np.argwhere(t_windows >= knee_point_time)[0, 0] if new_knee_point_index == knee_point_index: # Converged knee_point_index = new_knee_point_index break else: knee_point_index = new_knee_point_index # Until this point knee_point_index has been an index to windows, # find the index to impulse response data knee_point_time = t_windows[knee_point_index] knee_point_index = np.argwhere(t_squared >= knee_point_time)[0, 0] return peak_index, peak_index + knee_point_index, noise_floor, w def decay_times(self, peak_ind=None, knee_point_ind=None, noise_floor=None, window_size=None): """Calculates decay times EDT, RT20, RT30, RT60 Args: peak_ind: Peak index as returned by `decay_params()`. Optional. knee_point_ind: Knee point index
<filename>onlinejudge_command/subcommand/test.py<gh_stars>0 import argparse import concurrent.futures import contextlib import enum import json import os import pathlib import platform import subprocess import tempfile import threading import traceback from logging import getLogger from typing import * import onlinejudge_command.format_utils as fmtutils import onlinejudge_command.output_comparators as output_comparators import onlinejudge_command.pretty_printers as pretty_printers import onlinejudge_command.utils as utils from onlinejudge_command.output_comparators import CompareMode logger = getLogger(__name__) def add_subparser(subparsers: argparse.Action) -> None: subparsers_add_parser: Callable[..., argparse.ArgumentParser] = subparsers.add_parser # type: ignore subparser = subparsers_add_parser('test', aliases=['t'], help='test your code', formatter_class=argparse.RawTextHelpFormatter, epilog='''\ format string for --format: %s name %e extension: "in" or "out" (both %s and %e are required.) tips: There is a feature to use special judges. See https://github.com/online-judge-tools/oj/blob/master/docs/getting-started.md#test-for-problems-with-special-judge for details. You can do similar things with shell e.g. $ for f in test/*.in ; do echo $f ; ./a.out < $f | diff - ${f%.in}.out ; done ''') subparser.add_argument('-c', '--command', default=utils.get_default_command(), help='your solution to be tested. (default: "{}")'.format(utils.get_default_command())) subparser.add_argument('-f', '--format', default='%s.%e', help='a format string to recognize the relationship of test cases. (default: "%%s.%%e")') subparser.add_argument('-d', '--directory', type=pathlib.Path, default=pathlib.Path('test'), help='a directory name for test cases (default: test/)') subparser.add_argument('-m', '--compare-mode', choices=[mode.value for mode in CompareMode], default=CompareMode.CRLF_INSENSITIVE_EXACT_MATCH.value, help='mode to compare outputs. The default behavoir is exact-match to ensure that you always get AC on remote judge servers when you got AC on local tests for the same cases. (default: crlf-insensitive-exact-match)') subparser.add_argument('-M', '--display-mode', choices=[mode.value for mode in DisplayMode], default=DisplayMode.SUMMARY.value, help='mode to display outputs (default: summary)') subparser.add_argument('-S', '--ignore-spaces', dest='compare_mode', action='store_const', const=CompareMode.IGNORE_SPACES.value, help="ignore spaces to compare outputs, but doesn't ignore newlines (equivalent to --compare-mode=ignore-spaces") subparser.add_argument('-N', '--ignore-spaces-and-newlines', dest='compare_mode', action='store_const', const=CompareMode.IGNORE_SPACES_AND_NEWLINES.value, help='ignore spaces and newlines to compare outputs (equivalent to --compare-mode=ignore-spaces-and-newlines') subparser.add_argument('-D', '--diff', dest='display_mode', action='store_const', const=DisplayMode.DIFF.value, help='display the diff (equivalent to --display-mode=diff)') subparser.add_argument('-s', '--silent', action='store_true', help='don\'t report output and correct answer even if not AC (for --mode all)') subparser.add_argument('-e', '--error', type=float, help='check as floating point number: correct if its absolute or relative error doesn\'t exceed it') subparser.add_argument('-t', '--tle', type=float, help='set the time limit (in second) (default: inf)') subparser.add_argument('--mle', type=float, help='set the memory limit (in megabyte) (default: inf)') subparser.add_argument('-i', '--print-input', action='store_true', default=True, help='print input cases if not AC (default)') subparser.add_argument('--no-print-input', action='store_false', dest='print_input') subparser.add_argument('-j', '--jobs', metavar='N', type=int, help='specifies the number of jobs to run simultaneously (default: no parallelization)') subparser.add_argument('--print-memory', action='store_true', help='print the amount of memory which your program used, even if it is small enough') subparser.add_argument('--gnu-time', help='used to measure memory consumption (default: "time" on Linux, "gtime" on mac)', default=None) subparser.add_argument('--no-ignore-backup', action='store_false', dest='ignore_backup') subparser.add_argument('--ignore-backup', action='store_true', help='ignore backup files and hidden files (i.e. files like "*~", "\\#*\\#" and ".*") (default)') subparser.add_argument('--log-file', type=pathlib.Path, help=argparse.SUPPRESS) subparser.add_argument('--judge-command', dest='judge', default=None, help='specify judge command instead of default diff judge. The given command (e.g. `./judge`) will be called as `$ ./judge input.txt actual-output.txt expected-output.txt` and should return the result with the exit code of its `main` function.') subparser.add_argument('test', nargs='*', type=pathlib.Path, help='paths of test cases. (if empty: globbed from --format)') MEMORY_WARNING = 500 # megabyte MEMORY_PRINT = 100 # megabyte class DisplayMode(enum.Enum): SUMMARY = 'summary' ALL = 'all' DIFF = 'diff' DIFF_ALL = 'diff-all' class SpecialJudge: def __init__(self, judge_command: str, *, is_silent: bool): self.judge_command = judge_command # already quoted and joined command self.is_silent = is_silent def run(self, *, actual_output: bytes, input_path: pathlib.Path, expected_output_path: Optional[pathlib.Path]) -> bool: with tempfile.TemporaryDirectory() as tempdir: actual_output_path = pathlib.Path(tempdir) / 'actual.out' with open(actual_output_path, 'wb') as fh: fh.write(actual_output) # if you use shlex.quote, it fails on Windows. why? command = ' '.join([ self.judge_command, # already quoted and joined command str(input_path.resolve()), str(actual_output_path.resolve()), str(expected_output_path.resolve() if expected_output_path is not None else ''), ]) logger.info('$ %s', command) info, proc = utils.exec_command(command) if not self.is_silent: logger.info(utils.NO_HEADER + 'judge\'s output:\n%s', pretty_printers.make_pretty_large_file_content(info['answer'] or b'', limit=40, head=20, tail=10)) return proc.returncode == 0 def build_match_function(*, compare_mode: CompareMode, error: Optional[float], judge_command: Optional[str], silent: bool, test_input_path: pathlib.Path, test_output_path: Optional[pathlib.Path]) -> Callable[[bytes, bytes], bool]: """build_match_function builds the function to compare actual outputs and expected outputs. This function doesn't any I/O. """ if judge_command is not None: special_judge = SpecialJudge(judge_command=judge_command, is_silent=silent) def run_judge_command(actual: bytes, expected: bytes) -> bool: # the second argument is ignored return special_judge.run( actual_output=actual, input_path=test_input_path, expected_output_path=test_output_path, ) return run_judge_command is_exact = False if compare_mode == CompareMode.EXACT_MATCH and error is None: is_exact = True file_comparator: output_comparators.OutputComparator = output_comparators.ExactComparator() elif compare_mode == CompareMode.CRLF_INSENSITIVE_EXACT_MATCH and error is None: is_exact = True file_comparator = output_comparators.CRLFInsensitiveComparator(output_comparators.ExactComparator()) else: if error is not None: word_comparator: output_comparators.OutputComparator = output_comparators.FloatingPointNumberComparator(rel_tol=error, abs_tol=error) else: word_comparator = output_comparators.ExactComparator() if compare_mode in (CompareMode.EXACT_MATCH, CompareMode.CRLF_INSENSITIVE_EXACT_MATCH, CompareMode.IGNORE_SPACES): file_comparator = output_comparators.SplitLinesComparator(output_comparators.SplitComparator(word_comparator)) elif compare_mode == CompareMode.IGNORE_SPACES_AND_NEWLINES: file_comparator = output_comparators.SplitComparator(word_comparator) else: assert False file_comparator = output_comparators.CRLFInsensitiveComparator(file_comparator) def compare_outputs(actual: bytes, expected: bytes) -> bool: result = file_comparator(actual, expected) if not result and is_exact: non_stcict_comparator = output_comparators.CRLFInsensitiveComparator(output_comparators.SplitComparator(output_comparators.ExactComparator())) if non_stcict_comparator(actual, expected): logger.warning('This was AC if spaces and newlines were ignored. Please use --ignore-spaces (-S) option or --ignore-spaces-and-newline (-N) option.') return result return compare_outputs def run_checking_output(*, answer: bytes, test_output_path: Optional[pathlib.Path], is_special_judge: bool, match_function: Callable[[bytes, bytes], bool]) -> Optional[bool]: """run_checking_output executes matching of the actual output and the expected output. This function has file I/O including the execution of the judge command. """ if test_output_path is None and not is_special_judge: return None if test_output_path is not None: with test_output_path.open('rb') as outf: expected = outf.read() else: # only if --judge option expected = b'' logger.warning('expected output is not found') return match_function(answer, expected) class JudgeStatus(enum.Enum): AC = 'AC' WA = 'WA' RE = 'RE' TLE = 'TLE' MLE = 'MLE' def display_result(proc: subprocess.Popen, answer: str, memory: Optional[float], test_input_path: pathlib.Path, test_output_path: Optional[pathlib.Path], *, mle: Optional[float], display_mode: DisplayMode, compare_mode: CompareMode, does_print_input: bool, silent: bool, match_result: Optional[bool]) -> JudgeStatus: """display_result prints the result of the test and its statistics. This function prints many logs and does some I/O. """ # prepare the function to print the input is_input_printed = False def print_input() -> None: nonlocal is_input_printed if does_print_input and not is_input_printed: is_input_printed = True with test_input_path.open('rb') as inf: logger.info(utils.NO_HEADER + 'input:\n%s', pretty_printers.make_pretty_large_file_content(inf.read(), limit=40, head=20, tail=10)) # check TLE, RE or not status = JudgeStatus.AC if proc.returncode is None: logger.info(utils.FAILURE + '' + utils.red('TLE')) status = JudgeStatus.TLE if not silent: print_input() elif memory is not None and mle is not None and memory > mle: logger.info(utils.FAILURE + '' + utils.red('MLE')) status = JudgeStatus.MLE if not silent: print_input() elif proc.returncode != 0: logger.info(utils.FAILURE + '' + utils.red('RE') + ': return code %d', proc.returncode) status = JudgeStatus.RE if not silent: print_input() # check WA or not if match_result is not None and not match_result: if status == JudgeStatus.AC: logger.info(utils.FAILURE + '' + utils.red('WA')) status = JudgeStatus.WA if not silent: print_input() if test_output_path is not None: with test_output_path.open('rb') as outf: expected = outf.read().decode() else: expected = '' if display_mode == DisplayMode.SUMMARY: logger.info(utils.NO_HEADER + 'output:\n%s', pretty_printers.make_pretty_large_file_content(answer.encode(), limit=40, head=20, tail=10)) logger.info(utils.NO_HEADER + 'expected:\n%s', pretty_printers.make_pretty_large_file_content(expected.encode(), limit=40, head=20, tail=10)) elif display_mode == DisplayMode.ALL: logger.info(utils.NO_HEADER + 'output:\n%s', pretty_printers.make_pretty_all(answer.encode())) logger.info(utils.NO_HEADER + 'expected:\n%s', pretty_printers.make_pretty_all(expected.encode())) elif display_mode == DisplayMode.DIFF: logger.info(utils.NO_HEADER + pretty_printers.make_pretty_diff(answer.encode(), expected=expected, compare_mode=compare_mode, limit=40)) elif display_mode == DisplayMode.DIFF_ALL: logger.info(utils.NO_HEADER + pretty_printers.make_pretty_diff(answer.encode(), expected=expected, compare_mode=compare_mode, limit=-1)) else: assert False if match_result is None: if not silent: print_input() logger.info(utils.NO_HEADER + 'output:\n%s', pretty_printers.make_pretty_large_file_content(answer.encode(), limit=40, head=20, tail=10)) if status == JudgeStatus.AC: logger.info(utils.SUCCESS + '' + utils.green('AC')) return status def test_single_case(test_name: str, test_input_path: pathlib.Path, test_output_path: Optional[pathlib.Path], *, lock: Optional[threading.Lock] = None, args: argparse.Namespace) -> Dict[str, Any]: # print the header earlier if not in parallel if lock is None: logger.info('') logger.info('%s', test_name) # run the binary with test_input_path.open('rb') as inf: info, proc = utils.exec_command(args.command, stdin=inf, timeout=args.tle, gnu_time=args.gnu_time) # TODO: the `answer` should be bytes, not str answer: str = (info['answer'] or b'').decode(errors='replace') elapsed: float = info['elapsed'] memory: Optional[float] = info['memory'] # lock is require to avoid mixing logs if in parallel nullcontext = contextlib.ExitStack() # TODO: use contextlib.nullcontext() after updating Python to 3.7 with lock or nullcontext: if lock is not None: logger.info('') logger.info('%s', test_name) logger.info('time: %f sec', elapsed) if memory: if memory < MEMORY_PRINT: if args.print_memory: logger.info('memory: %f MB', memory) elif memory < MEMORY_WARNING: logger.info('memory: %f MB', memory) else: logger.warning('memory: %f MB', memory) match_function = build_match_function(compare_mode=CompareMode(args.compare_mode), error=args.error, judge_command=args.judge, silent=args.silent, test_input_path=test_input_path, test_output_path=test_output_path) match_result = run_checking_output(answer=answer.encode(), test_output_path=test_output_path, is_special_judge=args.judge is not None, match_function=match_function) status = display_result(proc, answer, memory, test_input_path, test_output_path, mle=args.mle, display_mode=DisplayMode(args.display_mode), compare_mode=CompareMode(args.compare_mode), does_print_input=args.print_input, silent=args.silent, match_result=match_result) # return the result testcase = { 'name': test_name, 'input': str(test_input_path.resolve()), } if
from storage """ raise NotImplementedError def start_full_verification(self) -> None: """ Save full verification on storage """ self.add_value(self._running_full_verification_attribute, '1') def finish_full_verification(self) -> None: """ Remove from storage that the full node is initializing with a full verification """ self.remove_value(self._running_full_verification_attribute) def is_running_full_verification(self) -> bool: """ Return if the full node is initializing with a full verification or was running a full verification and was stopped in the middle """ return self.get_value(self._running_full_verification_attribute) == '1' def start_running_manager(self) -> None: """ Save on storage that manager is running """ self.add_value(self._manager_running_attribute, '1') def stop_running_manager(self) -> None: """ Remove from storage that manager is running """ self.remove_value(self._manager_running_attribute) def is_running_manager(self) -> bool: """ Return if the manager is running or was running and a sudden crash stopped the full node """ return self.get_value(self._manager_running_attribute) == '1' def set_db_clean(self) -> None: """ Save on storage that the db has clean data (without voided blocks/txs) """ self.add_value(self._clean_db_attribute, '1') def is_db_clean(self) -> bool: """ Return if the node has a clean db (without voided blocks/txs) """ return self.get_value(self._clean_db_attribute) == '1' def add_new_to_block_height_index(self, height: int, block_hash: bytes, timestamp: int) -> None: """Add a new block to the height index that must not result in a re-org""" self._block_height_index.add(height, block_hash, timestamp) def add_reorg_to_block_height_index(self, height: int, block_hash: bytes, timestamp: int) -> None: """Add a new block to the height index that can result in a re-org""" # XXX: in the future we can make this more strict so that it MUST result in a re-orgr self._block_height_index.add(height, block_hash, timestamp, can_reorg=True) def get_from_block_height_index(self, height: int) -> bytes: return self._block_height_index.get(height) class BaseTransactionStorage(TransactionStorage): def __init__(self, with_index: bool = True, pubsub: Optional[Any] = None) -> None: super().__init__() # Pubsub is used to publish tx voided and winner but it's optional self.pubsub = pubsub # Initialize index if needed. self.with_index = with_index if with_index: self._reset_cache() # Either save or verify all genesis. self._save_or_verify_genesis() @property def latest_timestamp(self) -> int: return self._latest_timestamp @property def first_timestamp(self) -> int: return self._first_timestamp @abstractmethod def _save_transaction(self, tx: BaseTransaction, *, only_metadata: bool = False) -> None: raise NotImplementedError def _reset_cache(self) -> None: """Reset all caches. This function should not be called unless you know what you are doing.""" assert self.with_index, 'Cannot reset cache because it has not been enabled.' self._cache_block_count = 0 self._cache_tx_count = 0 self.block_index = IndexesManager() self.tx_index = IndexesManager() self.all_index = IndexesManager() self.wallet_index = None self.tokens_index = None genesis = self.get_all_genesis() if genesis: self._latest_timestamp = max(x.timestamp for x in genesis) self._first_timestamp = min(x.timestamp for x in genesis) else: self._latest_timestamp = 0 self._first_timestamp = 0 def remove_cache(self) -> None: """Remove all caches in case we don't need it.""" self.with_index = False self.block_index = None self.tx_index = None self.all_index = None def get_best_block_tips(self, timestamp: Optional[float] = None, *, skip_cache: bool = False) -> List[bytes]: return super().get_best_block_tips(timestamp, skip_cache=skip_cache) def get_weight_best_block(self) -> float: return super().get_weight_best_block() def get_block_tips(self, timestamp: Optional[float] = None) -> Set[Interval]: if not self.with_index: raise NotImplementedError assert self.block_index is not None assert self.block_index.tips_index is not None if timestamp is None: timestamp = self.latest_timestamp return self.block_index.tips_index[timestamp] def get_tx_tips(self, timestamp: Optional[float] = None) -> Set[Interval]: if not self.with_index: raise NotImplementedError assert self.tx_index is not None assert self.tx_index.tips_index is not None if timestamp is None: timestamp = self.latest_timestamp tips = self.tx_index.tips_index[timestamp] if __debug__: # XXX: this `for` is for assert only and thus is inside `if __debug__:` for interval in tips: meta = self.get_metadata(interval.data) assert meta is not None # assert not meta.voided_by return tips def get_all_tips(self, timestamp: Optional[float] = None) -> Set[Interval]: if not self.with_index: raise NotImplementedError assert self.all_index is not None if timestamp is None: timestamp = self.latest_timestamp if self._all_tips_cache is not None and timestamp >= self._all_tips_cache.timestamp: assert self._all_tips_cache.timestamp == self.latest_timestamp return self._all_tips_cache.tips assert self.all_index.tips_index is not None tips = self.all_index.tips_index[timestamp] if timestamp >= self.latest_timestamp: merkle_tree, hashes = self.calculate_merkle_tree(tips) self._all_tips_cache = AllTipsCache(self.latest_timestamp, tips, merkle_tree, hashes) return tips def get_newest_blocks(self, count: int) -> Tuple[List[Block], bool]: if not self.with_index: raise NotImplementedError assert self.block_index is not None block_hashes, has_more = self.block_index.get_newest(count) blocks = [cast(Block, self.get_transaction(block_hash)) for block_hash in block_hashes] return blocks, has_more def get_newest_txs(self, count: int) -> Tuple[List[BaseTransaction], bool]: if not self.with_index: raise NotImplementedError assert self.tx_index is not None tx_hashes, has_more = self.tx_index.get_newest(count) txs = [self.get_transaction(tx_hash) for tx_hash in tx_hashes] return txs, has_more def get_older_blocks_after(self, timestamp: int, hash_bytes: bytes, count: int) -> Tuple[List[Block], bool]: if not self.with_index: raise NotImplementedError assert self.block_index is not None block_hashes, has_more = self.block_index.get_older(timestamp, hash_bytes, count) blocks = [cast(Block, self.get_transaction(block_hash)) for block_hash in block_hashes] return blocks, has_more def get_newer_blocks_after(self, timestamp: int, hash_bytes: bytes, count: int) -> Tuple[List[BaseTransaction], bool]: if not self.with_index: raise NotImplementedError assert self.block_index is not None block_hashes, has_more = self.block_index.get_newer(timestamp, hash_bytes, count) blocks = [self.get_transaction(block_hash) for block_hash in block_hashes] return blocks, has_more def get_older_txs_after(self, timestamp: int, hash_bytes: bytes, count: int) -> Tuple[List[BaseTransaction], bool]: if not self.with_index: raise NotImplementedError assert self.tx_index is not None tx_hashes, has_more = self.tx_index.get_older(timestamp, hash_bytes, count) txs = [self.get_transaction(tx_hash) for tx_hash in tx_hashes] return txs, has_more def get_newer_txs_after(self, timestamp: int, hash_bytes: bytes, count: int) -> Tuple[List[BaseTransaction], bool]: if not self.with_index: raise NotImplementedError assert self.tx_index is not None tx_hashes, has_more = self.tx_index.get_newer(timestamp, hash_bytes, count) txs = [self.get_transaction(tx_hash) for tx_hash in tx_hashes] return txs, has_more def _manually_initialize(self) -> None: self._reset_cache() # We need to construct a topological sort, then iterate from # genesis to tips. for tx in self._topological_sort(): self.add_to_indexes(tx) def _topological_sort(self) -> Iterator[BaseTransaction]: # TODO We must optimize this algorithm to remove the `visited` set. # It will consume too much memory when the number of transactions is big. # A solution would be to store the ordering in disk, probably indexing by tx's height. # Sorting the vertices by the lengths of their longest incoming paths produces a topological # ordering (Dekel, Nassimi & Sahni 1981). See: https://epubs.siam.org/doi/10.1137/0210049 # See also: https://gitlab.com/HathorNetwork/hathor-python/merge_requests/31 visited: Dict[bytes, int] = dict() # Dict[bytes, int] for tx in self.get_all_transactions(): if not tx.is_block: continue yield from self._topological_sort_dfs(tx, visited) for tx in self.get_all_transactions(): yield from self._topological_sort_dfs(tx, visited) def _topological_sort_dfs(self, root: BaseTransaction, visited: Dict[bytes, int]) -> Iterator[BaseTransaction]: if root.hash in visited: return stack = [root] while stack: tx = stack[-1] assert tx.hash is not None if tx.hash in visited: if visited[tx.hash] == 0: visited[tx.hash] = 1 # 1 = Visited yield tx assert tx == stack.pop() continue visited[tx.hash] = 0 # 0 = Visit in progress # The parents are reversed to go first through the blocks and only then # go through the transactions. It works because blocks must have the # previous block as the first parent. For transactions, the order does not # matter. for parent_hash in tx.parents[::-1]: if parent_hash not in visited: try: parent = self.get_transaction(parent_hash) except TransactionDoesNotExist: # XXX: it's possible transactions won't exist because of missing dependencies pass else: stack.append(parent) for txin in tx.inputs: if txin.tx_id not in visited: try: txinput = self.get_transaction(txin.tx_id) except TransactionDoesNotExist: # XXX: it's possible transactions won't exist because of missing dependencies pass else: stack.append(txinput) def add_to_indexes(self, tx: BaseTransaction) -> None: if not self.with_index: raise NotImplementedError assert self.all_index is not None assert self.block_index is not None assert self.tx_index is not None self._latest_timestamp = max(self.latest_timestamp, tx.timestamp) if self._first_timestamp == 0: self._first_timestamp = tx.timestamp else: self._first_timestamp = min(self.first_timestamp, tx.timestamp) self._first_timestamp = min(self.first_timestamp, tx.timestamp) self._all_tips_cache = None self.all_index.add_tx(tx) if self.wallet_index: self.wallet_index.add_tx(tx) if self.tokens_index: self.tokens_index.add_tx(tx) if tx.is_block: if self.block_index.add_tx(tx): self._cache_block_count += 1 else: if self.tx_index.add_tx(tx): self._cache_tx_count += 1 def del_from_indexes(self, tx: BaseTransaction, *, relax_assert: bool = False) -> None: if not self.with_index: raise NotImplementedError assert self.block_index is not None assert self.tx_index is not None if self.tokens_index: self.tokens_index.del_tx(tx) if tx.is_block: self._cache_block_count -= 1 self.block_index.del_tx(tx, relax_assert=relax_assert) else: self._cache_tx_count -= 1 self.tx_index.del_tx(tx, relax_assert=relax_assert) def get_block_count(self) -> int: if not self.with_index: raise NotImplementedError return self._cache_block_count def get_tx_count(self) -> int: if not self.with_index: raise NotImplementedError return self._cache_tx_count def get_genesis(self, hash_bytes: bytes) -> Optional[BaseTransaction]: assert self._genesis_cache is not None return self._genesis_cache.get(hash_bytes, None) def get_all_genesis(self) -> Set[BaseTransaction]: assert self._genesis_cache is not None return set(self._genesis_cache.values()) def get_transactions_before(self, hash_bytes: bytes, num_blocks: int = 100) -> List[BaseTransaction]:
# ================================================================ # Blue Gecko BLE API BGLib code generator: Python3 platform # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # ---------------------------------------------------------------- # # CHANGELOG: # 2020-08-03 - Ported to Blue Gecko (Kris Young) # 2017-06-26 - Moved to python3 # 2013-05-04 - Fixed single-item struct.unpack returns (@zwasson on Github) # 2013-04-28 - Fixed numerous uint8array/bd_addr command arg errors # - Added 'debug' support # 2013-04-16 - Fixed 'bglib_on_idle' to be 'on_idle' # 2013-04-15 - Added wifi BGAPI support in addition to BLE BGAPI # - Fixed references to 'this' instead of 'self' # 2013-04-11 - Initial release # # ================================================================ # Refer to LICENSE.md in the project repo for license details. from xml.dom.minidom import parseString import string from datetime import datetime # open, read, and close the BLEAPI XML data print("Reading gecko.xml...") file = open('gecko.xml', 'r') data = file.read() file.close() # parse XML into a DOM structure print("Parsing BLE API definition...") dom = parseString(data) # read relevant dom nodes for highlighter generation ble_datatypes = dom.getElementsByTagName('datatype') ble_classes = dom.getElementsByTagName('class') #for ble_datatype in ble_datatypes: # print(ble_datatype.toxml()) ble_command_method_definitions = [] ble_response_callback_definitions = [] ble_response_callback_parser_conditions = [] ble_event_callback_definitions = [] ble_event_callback_parser_conditions = [] ble_constant_macros = [] for ble_class in ble_classes: class_name = ble_class.attributes['name'].value print("Gathering command, event, and enum data from main class '" + class_name + "'...") if len(ble_response_callback_parser_conditions) > 0: ble_response_callback_parser_conditions.append('elif packet_class == ' + ble_class.attributes['index'].value + ':') else: ble_response_callback_parser_conditions.append('if packet_class == ' + ble_class.attributes['index'].value + ':') num_responses = 0 for ble_command in ble_class.getElementsByTagName('command'): #print(class_name + '_' + ble_command.attributes['name'].value) ble_command_name = class_name + '_' + ble_command.attributes['name'].value # gather parameter info, if present ble_params = ble_command.getElementsByTagName('params'); parameters = ['self'] # python class methods require this payload_length = 0 payload_additional = '' payload_parameters = [] pack_pattern = '<4B' pack_args = ['0x20', '0', ble_class.attributes['index'].value, ble_command.attributes['index'].value] if len(ble_params) > 0: for ble_param in ble_params[0].getElementsByTagName('param'): parameters.append('' + ble_param.attributes['name'].value) if ble_param.attributes['type'].value == 'uint8': pack_args.append('' + ble_param.attributes['name'].value) pack_pattern += 'B' payload_length += 1 elif ble_param.attributes['type'].value == 'int8': pack_args.append('' + ble_param.attributes['name'].value) pack_pattern += 'b' payload_length += 1 elif ble_param.attributes['type'].value == 'uint16': pack_args.append('' + ble_param.attributes['name'].value) pack_pattern += 'H' payload_length += 2 elif ble_param.attributes['type'].value == 'int16': pack_args.append('' + ble_param.attributes['name'].value) pack_pattern += 'h' payload_length += 2 elif ble_param.attributes['type'].value == 'uint32': pack_args.append('' + ble_param.attributes['name'].value) pack_pattern += 'I' payload_length += 4 elif ble_param.attributes['type'].value == 'bd_addr': pack_args.append('' + 'bytes(i for i in ' + ble_param.attributes['name'].value + ')') pack_pattern += '6s' payload_length += 6 elif ble_param.attributes['type'].value == 'uint8array': pack_args.append('len(' + ble_param.attributes['name'].value + ')') pack_args.append('' + 'bytes(i for i in ' + ble_param.attributes['name'].value + ')') pack_pattern += 'B\' + str(len(' + ble_param.attributes['name'].value + ')) + \'s' payload_length += 1 payload_additional += ' + len(' + ble_param.attributes['name'].value + ')' pack_args[1] = str(payload_length) if len(payload_additional) > 0: pack_args[1] += payload_additional ble_command_method_definitions.append('def gecko_cmd_' + ble_command_name + '(' + ', '.join(parameters) + '):') ble_command_method_definitions.append(' return struct.pack(\'' + pack_pattern + '\', ' + ', '.join(pack_args) + ')') # gather return value info, if present ble_returns = ble_command.getElementsByTagName('returns'); returns = [] if len(ble_returns) > 0: for ble_return in ble_returns[0].getElementsByTagName('param'): returns.append(ble_return.attributes['type'].value + ' ' + ble_return.attributes['name'].value) ble_response_args = [] obj_args = [] unpack_pattern = '<' unpack_args = [] payload_length = 0 additional_code = [] if len(ble_returns) > 0: for ble_return in ble_returns[0].getElementsByTagName('param'): if (ble_return.attributes['type'].value == 'uint8'): unpack_pattern += 'B' unpack_args.append(ble_return.attributes['name'].value) obj_args.append("'" + ble_return.attributes['name'].value + "': " + ble_return.attributes['name'].value) payload_length += 1 elif (ble_return.attributes['type'].value == 'uint16'): unpack_pattern += 'H' unpack_args.append(ble_return.attributes['name'].value) obj_args.append("'" + ble_return.attributes['name'].value + "': " + ble_return.attributes['name'].value) payload_length += 2 elif (ble_return.attributes['type'].value == 'uint32'): unpack_pattern += 'I' unpack_args.append(ble_return.attributes['name'].value) obj_args.append("'" + ble_return.attributes['name'].value + "': " + ble_return.attributes['name'].value) payload_length += 4 elif (ble_return.attributes['type'].value == 'int8'): unpack_pattern += 'b' unpack_args.append(ble_return.attributes['name'].value) obj_args.append("'" + ble_return.attributes['name'].value + "': " + ble_return.attributes['name'].value) payload_length += 1 elif (ble_return.attributes['type'].value == 'int16'): unpack_pattern += 'h' unpack_args.append(ble_return.attributes['name'].value) obj_args.append("'" + ble_return.attributes['name'].value + "': " + ble_return.attributes['name'].value) payload_length += 2 elif (ble_return.attributes['type'].value == 'int32'): unpack_pattern += 'i' unpack_args.append(ble_return.attributes['name'].value) obj_args.append("'" + ble_return.attributes['name'].value + "': " + ble_return.attributes['name'].value) payload_length += 4 elif (ble_return.attributes['type'].value == 'bd_addr'): unpack_pattern += '6s' unpack_args.append(ble_return.attributes['name'].value) obj_args.append("'" + ble_return.attributes['name'].value + "': " + ble_return.attributes['name'].value) payload_length += 6 additional_code.append(ble_return.attributes['name'].value + ' = ' + ble_return.attributes['name'].value) elif (ble_return.attributes['type'].value == 'uint8array'): unpack_pattern += 'B' unpack_args.append(ble_return.attributes['name'].value + '_len') obj_args.append("'" + ble_return.attributes['name'].value + "': " + ble_return.attributes['name'].value + '_data') payload_length += 1 additional_code.append(ble_return.attributes['name'].value + '_data = self.bgapi_rx_payload[' + str(payload_length) + ':]') if num_responses > 0: ble_response_callback_parser_conditions.append(' elif packet_command == %s: # gecko_rsp_%s' % (ble_command.attributes['index'].value, ble_command_name)) else: ble_response_callback_parser_conditions.append(' if packet_command == %s: # gecko_rsp_%s' % (ble_command.attributes['index'].value, ble_command_name)) ble_response_code = [] if payload_length > 0: if len(unpack_args) > 1: ble_response_code.append(', '.join(unpack_args) + ' = struct.unpack(\'' + unpack_pattern + '\', self.bgapi_rx_payload[:' + str(payload_length) + '])') else: # "struct.unpack" returns a tuple no matter what # (thanks @zwasson: https://github.com/jrowberg/bglib/issues/5) ble_response_code.append(', '.join(unpack_args) + ' = struct.unpack(\'' + unpack_pattern + '\', self.bgapi_rx_payload[:' + str(payload_length) + '])[0]') [ble_response_code.append(x) for x in additional_code] ble_response_code.append('self.gecko_rsp_' + ble_command_name + '({ ' + ', '.join(obj_args) + ' })') ble_response_callback_parser_conditions.append(' ' + '\n '.join(ble_response_code)) if ble_class.attributes['index'].value == '0' and ble_command.attributes['index'].value == '0': ble_response_callback_parser_conditions.append(' self.busy = False') ble_response_callback_parser_conditions.append(' self.on_idle()') ble_response_callback_definitions.append('gecko_rsp_' + ble_command_name + ' = BGAPIEvent()') num_responses += 1 if num_responses == 0: ble_response_callback_parser_conditions.pop() if len(ble_event_callback_parser_conditions) > 0: ble_event_callback_parser_conditions.append('elif packet_class == ' + ble_class.attributes['index'].value + ':') else: ble_event_callback_parser_conditions.append('if packet_class == ' + ble_class.attributes['index'].value + ':') num_events = 0 for ble_event in ble_class.getElementsByTagName('event'): #print(class_name + '_' + ble_event.attributes['name'].value) ble_event_name = class_name + '_' + ble_event.attributes['name'].value # gather parameter info, if present ble_params = ble_event.getElementsByTagName('params'); obj_args = [] unpack_pattern = '<' unpack_args = [] payload_length = 0 additional_code = [] if len(ble_params) > 0: for ble_param in ble_params[0].getElementsByTagName('param'): if (ble_param.attributes['type'].value == 'uint8'): unpack_pattern += 'B' unpack_args.append(ble_param.attributes['name'].value) obj_args.append("'" + ble_param.attributes['name'].value + "': " + ble_param.attributes['name'].value) payload_length += 1 elif (ble_param.attributes['type'].value == 'uint16'): unpack_pattern += 'H' unpack_args.append(ble_param.attributes['name'].value) obj_args.append("'" + ble_param.attributes['name'].value + "': " + ble_param.attributes['name'].value) payload_length += 2 elif (ble_param.attributes['type'].value == 'uint32'): unpack_pattern += 'I' unpack_args.append(ble_param.attributes['name'].value) obj_args.append("'" + ble_param.attributes['name'].value + "': " + ble_param.attributes['name'].value) payload_length += 4 elif (ble_param.attributes['type'].value == 'int8'): unpack_pattern += 'b' unpack_args.append(ble_param.attributes['name'].value) obj_args.append("'" + ble_param.attributes['name'].value + "': " + ble_param.attributes['name'].value) payload_length += 1 elif (ble_param.attributes['type'].value == 'int16'): unpack_pattern += 'h' unpack_args.append(ble_param.attributes['name'].value) obj_args.append("'" + ble_param.attributes['name'].value + "': " + ble_param.attributes['name'].value) payload_length += 2 elif (ble_param.attributes['type'].value == 'int32'): unpack_pattern += 'i' unpack_args.append(ble_param.attributes['name'].value) obj_args.append("'" + ble_param.attributes['name'].value + "': " + ble_param.attributes['name'].value) payload_length += 4 elif (ble_param.attributes['type'].value == 'bd_addr'): unpack_pattern += '6s' unpack_args.append(ble_param.attributes['name'].value) obj_args.append("'" + ble_param.attributes['name'].value + "': " + ble_param.attributes['name'].value) payload_length += 6 additional_code.append(ble_param.attributes['name'].value + ' = ' + ble_param.attributes['name'].value) elif (ble_param.attributes['type'].value == 'uint8array'): unpack_pattern += 'B' unpack_args.append(ble_param.attributes['name'].value + '_len') obj_args.append("'" + ble_param.attributes['name'].value + "': " + ble_param.attributes['name'].value + '_data') payload_length += 1 additional_code.append(ble_param.attributes['name'].value + '_data = self.bgapi_rx_payload[' + str(payload_length) + ':]') if num_events > 0: ble_event_callback_parser_conditions.append(' elif packet_command == %s: # gecko_evt_%s' % (ble_event.attributes['index'].value, ble_event_name)) else: ble_event_callback_parser_conditions.append(' if packet_command == %s: # gecko_evt_%s' % (ble_event.attributes['index'].value, ble_event_name)) ble_event_code = [] if payload_length > 0: if len(unpack_args) > 1: ble_event_code.append(', '.join(unpack_args) + ' = struct.unpack(\'' + unpack_pattern + '\', self.bgapi_rx_payload[:' + str(payload_length) + '])') else: # "struct.unpack" returns a tuple no matter what # (thanks @zwasson: https://github.com/jrowberg/bglib/issues/5) ble_event_code.append(', '.join(unpack_args) + ' = struct.unpack(\'' + unpack_pattern + '\', self.bgapi_rx_payload[:' + str(payload_length) + '])[0]') [ble_event_code.append(x) for x in additional_code] ble_event_code.append('self.gecko_evt_' + ble_event_name + '({ ' + ', '.join(obj_args) + ' })') ble_event_callback_parser_conditions.append(' ' + '\n '.join(ble_event_code)) if ble_class.attributes['index'].value == '0' and ble_event.attributes['index'].value == '0': ble_event_callback_parser_conditions.append(' self.busy = False') ble_event_callback_parser_conditions.append(' self.on_idle()') ble_event_callback_definitions.append('gecko_evt_' + ble_event_name + ' = BGAPIEvent()') num_events += 1 if num_events == 0: ble_event_callback_parser_conditions.pop() for ble_enum in ble_class.getElementsByTagName('enum'): #print(class_name + '_' + ble_enum.attributes['name'].value) enum_name = class_name + '_' + ble_enum.attributes['name'].value ble_constant_macros.append('#define BGLIB_' + (enum_name.upper() + ' ').ljust(54) + ble_enum.attributes['value'].value) if len(ble_constant_macros) > 0 and ble_constant_macros[len(ble_constant_macros) - 1] != '': ble_constant_macros.append('') # create Python library file(s) print("Writing Python source library files...") source = open('bglib.py', 'w') source.write('#!/usr/bin/env python\n\ \n\ """ Blue Gecko BGAPI/BGLib implementation\n\ \n\ Changelog:\n\ 2020-08-03 - Ported to Blue Gecko v2.x API (Kris Young)\n\ 2017-06-26 - Moved to python3\n\ 2013-05-04 - Fixed single-item struct.unpack returns (@zwasson on Github)\n\ 2013-04-28 - Fixed numerous uint8array/bd_addr command arg errors\n\ - Added \'debug\' support\n\ 2013-04-16 - Fixed \'bglib_on_idle\' to be \'on_idle\'\n\ 2013-04-15 - Added wifi BGAPI support in addition to BLE BGAPI\n\ - Fixed references to \'this\' instead of \'self\'\n\ 2013-04-11 - Initial release\n\ \n\ ============================================\n\ Blue Gecko BGLib Python interface library\n\ 2013-05-04 by <NAME> <<EMAIL>>\n\ Updates should (hopefully) always be
"k": "<KEY>"} E_A3_vector = \ "<KEY> \ "6KB707dM9YTIgHtLvtgWQ8mKwboJW3of9locizkDTHzBC2IlrT1oOQ." \ "AxY8DCtDaGlsbGljb3RoZQ." \ "KDlTtXchhZTGufMYmOYGS4HffxPSUrfmqCHXaI9wOGY." \ "U0m_YmjN04DJvceFICbCVQ" E_A3_ex = {'key': jwk.JWK(**E_A3_key), 'protected': base64url_decode(E_A3_protected).decode('utf-8'), 'plaintext': E_A3_plaintext, 'vector': E_A3_vector} E_A4_protected = "<KEY>" E_A4_unprotected = {"jku": "https://server.example.com/keys.jwks"} E_A4_vector = \ '{"protected":"eyJlbmMiOiJBMTI4Q0JDLUhTMjU2In0",' \ '"unprotected":{"jku":"https://server.example.com/keys.jwks"},' \ '"recipients":[' \ '{"header":{"alg":"RSA1_5","kid":"2011-04-29"},' \ '"encrypted_key":'\ '"<KEY> \ '<KEY>' \ '<KEY>' \ 'YvkkysZIFNPccxRU7qve1WYPxqbb2Yw8kZqa2rMWI5ng8OtvzlV7elprCbuPh' \ 'cCdZ6XDP0_F8rkXds2vE4X-ncOIM8hAYHHi29NX0mcKiRaD0-D-ljQTP-cFPg' \ 'wCp6X-nZZd9OHBv-B3oWh2TbqmScqXMR4gp_A"},' \ '{"header":{"alg":"A128KW","kid":"7"},' \ '"encrypted_key":' \ '"<KEY>"}],' \ '"iv":"AxY8DCtDaGlsbGljb3RoZQ",' \ '"ciphertext":"KDlTtXchhZTGufMYmOYGS4HffxPSUrfmqCHXaI9wOGY",' \ '"tag":"Mz-VPPyU4RlcuYv1IwIvzw"}' E_A4_ex = {'key1': jwk.JWK(**E_A2_key), 'header1': '{"alg":"RSA1_5","kid":"2011-04-29"}', 'key2': jwk.JWK(**E_A3_key), 'header2': '{"alg":"A128KW","kid":"7"}', 'protected': base64url_decode(E_A4_protected), 'unprotected': json_encode(E_A4_unprotected), 'plaintext': E_A3_plaintext, 'vector': E_A4_vector} E_A5_ex = \ '{"protected":"<KEY>",' \ '"unprotected":{"jku":"https://server.example.com/keys.jwks"},' \ '"header":{"alg":"A128KW","kid":"7"},' \ '"encrypted_key":' \ '"<KEY>",' \ '"iv":"AxY8DCtDaGlsbGljb3RoZQ",' \ '"ciphertext":"KDlTtXchhZTGufMYmOYGS4HffxPSUrfmqCHXaI9wOGY",' \ '"tag":"Mz-VPPyU4RlcuYv1IwIvzw"}' customhdr_jwe_ex = \ '{"protected":"<KEY>",' \ '"unprotected":{"jku":"https://server.example.com/keys.jwks"},' \ '"header":{"alg":"A128KW","kid":"7", "custom1":"custom_val"},' \ '"encrypted_key":' \ '"6KB707dM9YTIgHtLvtgWQ8mKwboJW3of9locizkDTHzBC2IlrT1oOQ",' \ '"iv":"AxY8DCtDaGlsbGljb3RoZQ",' \ '"ciphertext":"KDlTtXchhZTGufMYmOYGS4HffxPSUrfmqCHXaI9wOGY",' \ '"tag":"Mz-VPPyU4RlcuYv1IwIvzw"}' Issue_136_Protected_Header_no_epk = { "alg": "ECDH-ES+A256KW", "enc": "A256CBC-HS512"} Issue_136_Contributed_JWE = \ "<KEY>" \ "<KEY>" \ "cDNpU241cEFSNUpYUE5aVF9SSEw2MTJMUGliWEI2WDhvTE9EOXFrN2NhTSIsInki" \ "<KEY>" \ ".wG51hYE_Vma8tvFKVyeZs4lsHhXiarEw3-59eWHPmhRflDAKrMvnBw1urezo_Bz" \ "ZyPJ76m42ORQPbhEu5NvbJk3vgdgcp03j" \ ".lRttW8r6P6zM0uYDQt0EjQ.qnOnz7biCbqdLEdUH3acMamFm-cBRCSTFb83tNPrgDU" \ ".vZnwYpYjzrTaYritwMzaguaAMsq9rQOWe8NUHICv2hg" Issue_136_Contributed_Key = { "alg": "ECDH-ES+A128KW", "crv": "P-256", "d": "F2PnliYin65AoIUxL1CwwzBPNeL2TyZPAKtkXOP50l8", "kid": "key1", "kty": "EC", "x": "FPrb_xwxe8SBP3kO-e-WsofFp7n5-yc_tGgfAvqAP8g", "y": "lM3HuyKMYUVsYdGqiWlkwTZbGO3Fh-hyadq8lfkTgBc"} # RFC 8037 A.1 E_Ed25519 = { 'key_json': '{"kty": "OKP",' '"crv": "Ed25519", ' '"d": "<KEY>", ' '"x": "<KEY>"}', 'payload': 'Example of Ed25519 signing', 'protected_header': {"alg": "EdDSA"}, 'jws_serialization_compact': '<KEY>' 'ZDI1NTE5IHNpZ25pbmc.<KEY>' 'nLWG1PPOt7-09PGcvMg3AIbQR6dWbhijcNR4ki' '4iylGjg5BhVsPt9g7sVvpAr_MuM0KAg'} X25519_Protected_Header_no_epk = { "alg": "ECDH-ES+A128KW", "enc": "A128GCM"} class TestJWE(unittest.TestCase): def check_enc(self, plaintext, protected, key, vector): e = jwe.JWE(plaintext, protected, algs=jwe_algs_and_rsa1_5) e.add_recipient(key) # Encrypt and serialize using compact enc = e.serialize() # And test that we can decrypt our own e.deserialize(enc, key) # Now test the Spec Test Vector e.deserialize(vector, key) def test_A1(self): self.check_enc(E_A1_ex['plaintext'], E_A1_ex['protected'], E_A1_ex['key'], E_A1_ex['vector']) def test_A2(self): self.check_enc(E_A2_ex['plaintext'], E_A2_ex['protected'], E_A2_ex['key'], E_A2_ex['vector']) def test_A3(self): self.check_enc(E_A3_ex['plaintext'], E_A3_ex['protected'], E_A3_ex['key'], E_A3_ex['vector']) def test_A4(self): e = jwe.JWE(E_A4_ex['plaintext'], E_A4_ex['protected'], algs=jwe_algs_and_rsa1_5) e.add_recipient(E_A4_ex['key1'], E_A4_ex['header1']) e.add_recipient(E_A4_ex['key2'], E_A4_ex['header2']) enc = e.serialize() e.deserialize(enc, E_A4_ex['key1']) e.deserialize(enc, E_A4_ex['key2']) # Now test the Spec Test Vector e.deserialize(E_A4_ex['vector'], E_A4_ex['key1']) e.deserialize(E_A4_ex['vector'], E_A4_ex['key2']) def test_A5(self): e = jwe.JWE(algs=jwe_algs_and_rsa1_5) e.deserialize(E_A5_ex, E_A4_ex['key2']) with self.assertRaises(jwe.InvalidJWEData): e = jwe.JWE(algs=['A256KW']) e.deserialize(E_A5_ex, E_A4_ex['key2']) def test_compact_protected_header(self): """Compact representation requires a protected header""" e = jwe.JWE(E_A1_ex['plaintext']) e.add_recipient(E_A1_ex['key'], E_A1_ex['protected']) with self.assertRaises(jwe.InvalidJWEOperation): e.serialize(compact=True) def test_compact_invalid_header(self): with self.assertRaises(jwe.InvalidJWEOperation): e = jwe.JWE(E_A1_ex['plaintext'], E_A1_ex['protected'], aad='XYZ', recipient=E_A1_ex['key']) e.serialize(compact=True) with self.assertRaises(jwe.InvalidJWEOperation): e = jwe.JWE(E_A1_ex['plaintext'], E_A1_ex['protected'], unprotected='{"jku":"https://example.com/keys.jwks"}', recipient=E_A1_ex['key']) e.serialize(compact=True) def test_JWE_Issue_136(self): plaintext = "plain" protected = json_encode(Issue_136_Protected_Header_no_epk) key = jwk.JWK.generate(kty='EC', crv='P-521') e = jwe.JWE(plaintext, protected) e.add_recipient(key) enc = e.serialize() e.deserialize(enc, key) self.assertEqual(e.payload, plaintext.encode('utf-8')) e = jwe.JWE() e.deserialize(Issue_136_Contributed_JWE, jwk.JWK(**Issue_136_Contributed_Key)) def test_customhdr_jwe(self): def jwe_chk1(jwobj): return jwobj.jose_header['custom1'] == 'custom_val' newhdr = JWSEHeaderParameter('Custom header 1', False, True, jwe_chk1) newreg = {'custom1': newhdr} e = jwe.JWE(header_registry=newreg) e.deserialize(customhdr_jwe_ex, E_A4_ex['key2']) def jwe_chk2(jwobj): return jwobj.jose_header['custom1'] == 'custom_not' newhdr = JWSEHeaderParameter('Custom header 1', False, True, jwe_chk2) newreg = {'custom1': newhdr} e = jwe.JWE(header_registry=newreg) with self.assertRaises(jwe.InvalidJWEData): e.deserialize(customhdr_jwe_ex, E_A4_ex['key2']) def test_customhdr_jwe_exists(self): newhdr = JWSEHeaderParameter('Custom header 1', False, True, None) newreg = {'alg': newhdr} with self.assertRaises(InvalidJWSERegOperation): jwe.JWE(header_registry=newreg) def test_X25519_ECDH(self): plaintext = b"plain" protected = json_encode(X25519_Protected_Header_no_epk) if 'X25519' in jwk.ImplementedOkpCurves: x25519key = jwk.JWK.generate(kty='OKP', crv='X25519') e1 = jwe.JWE(plaintext, protected) e1.add_recipient(x25519key) enc = e1.serialize() e2 = jwe.JWE() e2.deserialize(enc, x25519key) self.assertEqual(e2.payload, plaintext) MMA_vector_key = jwk.JWK(**E_A2_key) MMA_vector_ok_cek = \ '{"protected":"<KEY>",' \ '"unprotected":{"jku":"https://server.example.com/keys.jwks"},' \ '"recipients":[' \ '{"header":{"alg":"RSA1_5","kid":"2011-04-29"},' \ '"encrypted_key":'\ '"<KEY> \ 'kFm1NJn8LE9XShH59_i8J0PH5ZZyNfGy2xGdULU7sHNF6Gp2vPLgNZ__deLKx' \ 'GHZ7PcHALUzoOegEI-8E66jX2E4zyJKx-YxzZIItRzC5hlRirb6Y5Cl_p-ko3' \ 'YvkkysZIFNPccxRU7qve1WYPxqbb2Yw8kZqa2rMWI5ng8OtvzlV7elprCbuPh' \ 'cCdZ6XDP0_F8rkXds2vE4X-ncOIM8hAYHHi29NX0mcKiRaD0-D-ljQTP-cFPg' \ 'wCp6X-nZZd9OHBv-B3oWh2TbqmScqXMR4gp_A"}],' \ '"iv":"AxY8DCtDaGlsbGljb3RoZQ",' \ '"ciphertext":"PURPOSEFULLYBROKENYGS4HffxPSUrfmqCHXaI9wOGY",' \ '"tag":"Mz-VPPyU4RlcuYv1IwIvzw"}' MMA_vector_ko_cek = \ '{"protected":"<KEY>",' \ '"unprotected":{"jku":"https://server.example.com/keys.jwks"},' \ '"recipients":[' \ '{"header":{"alg":"RSA1_5","kid":"2011-04-29"},' \ '"encrypted_key":'\ '"<KEY> \ 'kFm1NJn8LE9XShH59_i8J0PH5ZZyNfGy2xGdULU7sHNF6Gp2vPLgNZ__deLKx' \ 'GHZ7PcHALUzoOegEI-8E66jX2E4zyJKx-YxzZIItRzC5hlRirb6Y5Cl_p-ko3' \ 'YvkkysZIFNPccxRU7qve1WYPxqbb2Yw8kZqa2rMWI5ng8OtvzlV7elprCbuPh' \ 'cCdZ6XDP0_F8rkXds2vE4X-ncOIM8hAYHHi29NX0mcKiRaD0-D-ljQTP-cFPg' \ 'wCp6X-nZZd9OHBv-B3oWh2TbqmScqXMR4gp_A"}],' \ '"iv":"AxY8DCtDaGlsbGljb3RoZQ",' \ '"ciphertext":"PURPOSEFULLYBROKENYGS4HffxPSUrfmqCHXaI9wOGY",' \ '"tag":"Mz-VPPyU4RlcuYv1IwIvzw"}' class TestMMA(unittest.TestCase): @classmethod def setUpClass(cls): import os cls.enableMMA = os.environ.get('JWCRYPTO_TESTS_ENABLE_MMA', False) cls.iterations = 500 cls.sub_iterations = 100 def test_MMA(self): if self.enableMMA: print('Testing MMA timing attacks') ok_cek = 0 ok_e = jwe.JWE(algs=jwe_algs_and_rsa1_5) ok_e.deserialize(MMA_vector_ok_cek) ko_cek = 0 ko_e = jwe.JWE(algs=jwe_algs_and_rsa1_5) ko_e.deserialize(MMA_vector_ko_cek) import time counter = getattr(time, 'perf_counter', time.time) for _ in range(self.iterations): start = counter() for _ in range(self.sub_iterations): with self.assertRaises(jwe.InvalidJWEData): ok_e.decrypt(MMA_vector_key) stop = counter() ok_cek += (stop - start) / self.sub_iterations start = counter() for _ in range(self.sub_iterations): with self.assertRaises(jwe.InvalidJWEData): ko_e.decrypt(MMA_vector_key) stop = counter() ko_cek += (stop - start) / self.sub_iterations ok_cek /= self.iterations ko_cek /= self.iterations deviation = ((ok_cek - ko_cek) / ok_cek) * 100 print('MMA ok cek: {}'.format(ok_cek)) print('MMA ko cek: {}'.format(ko_cek)) print('MMA deviation: {}% ({})'.format(int(deviation), deviation)) self.assertLess(deviation, 2) # RFC 7519 A1_header = { "alg": "RSA1_5", "enc": "A128CBC-HS256"} A1_claims = { "iss": "joe", "exp": 1300819380, "http://example.com/is_root": True} A1_token = \ "<KEY> + \ "<KEY>" + \ "oNfABIPJaZm0HaA415sv3aeuBWnD8J-Ui7Ah6cWafs3ZwwFKDFUUsWHSK-IPKxLG" + \ "TkND09XyjORj_CHAgOPJ-Sd8ONQRnJvWn_hXV1BNMHzUjPyYwEsRhDhzjAD26ima" + \ "sOTsgruobpYGoQcXUwFDn7moXPRfDE8-NoQX7N7ZYMmpUDkR-Cx9obNGwJQ3nM52" + \ "YCitxoQVPzjbl7WBuB7AohdBoZOdZ24WlN1lVIeh8v1K4krB8xgKvRU8kgFrEn_a" + \ "1rZgN5TiysnmzTROF869lQ." + \ "AxY8DCtDaGlsbGljb3RoZQ." + \ "MKOle7UQrG6nSxTLX6Mqwt0orbHvAKeWnDYvpIAeZ72deHxz3roJDXQyhxx0wKaM" + \ "HDjUEOKIwrtkHthpqEanSBNYHZgmNOV7sln1Eu9g3J8." + \ "fiK51VwhsxJ-siBMR-YFiA" A2_token = \ "<KEY>" + \ "In0." + \ "g_hEwksO1Ax8Qn7HoN-BVeBoa8FXe0kpyk_XdcSmxvcM5_P296JXXtoHISr_DD_M" + \ "qewaQSH4dZOQHoUgKLeFly-9RI11TG-_Ge1bZFazBPwKC5lJ6OLANLMd0QSL4fYE" + \ "b9ERe-epKYE3xb2jfY1AltHqBO-PM6j23Guj2yDKnFv6WO72tteVzm_2n17SBFvh" + \ "DuR9a2nHTE67pe0XGBUS_TK7ecA-iVq5COeVdJR4U4VZGGlxRGPLRHvolVLEHx6D" + \ "YyLpw30Ay9R6d68YCLi9FYTq3hIXPK_-dmPlOUlKvPr1GgJzRoeC9G5qCvdcHWsq" + \ "JGTO_z3Wfo5zsqwkxruxwA." + \ "UmVkbW9uZCBXQSA5ODA1Mg." + \ "VwHERHPvCNcHHpTjkoigx3_ExK0Qc71RMEParpatm0X_qpg-w8kozSjfNIPPXiTB" + \ "BLXR65CIPkFqz4l1Ae9w_uowKiwyi9acgVztAi-pSL8GQSXnaamh9kX1mdh3M_TT" + \ "-FZGQFQsFhu0Z72gJKGdfGE-OE7hS1zuBD5oEUfk0Dmb0VzWEzpxxiSSBbBAzP10" + \ "l56pPfAtrjEYw-7ygeMkwBl6Z_mLS6w6xUgKlvW6ULmkV-uLC4FUiyKECK4e3WZY" + \ "Kw1bpgIqGYsw2v_grHjszJZ-_I5uM-9RA8ycX9KqPRp9gc6pXmoU_-27ATs9XCvr" + \ "ZXUtK2902AUzqpeEUJYjWWxSNsS-r1TJ1I-FMJ4XyAiGrfmo9hQPcNBYxPz3GQb2" + \ "8Y5CLSQfNgKSGt0A4isp1hBUXBHAndgtcslt7ZoQJaKe_nNJgNliWtWpJ_ebuOpE" + \ "l8jdhehdccnRMIwAmU1n7SPkmhIl1HlSOpvcvDfhUN5wuqU955vOBvfkBOh5A11U" + \ "zBuo2WlgZ6hYi9-e3w29bR0C2-pp3jbqxEDw3iWaf2dc5b-LnR0FEYXvI_tYk5rd" + \ "_J9N0mg0tQ6RbpxNEMNoA9QWk5lgdPvbh9BaO195abQ." + \ "AVO9iT5AV4CzvDJCdhSFlQ" class TestJWT(unittest.TestCase): def test_A1(self): key = jwk.JWK(**E_A2_key) # first encode/decode ourselves t = jwt.JWT(A1_header, A1_claims, algs=jwe_algs_and_rsa1_5) t.make_encrypted_token(key) token = t.serialize() t.deserialize(token) # then try the test vector t = jwt.JWT(jwt=A1_token, key=key, check_claims=False, algs=jwe_algs_and_rsa1_5) # then try the test vector with explicit expiration date t = jwt.JWT(jwt=A1_token, key=key, check_claims={'exp': 1300819380}, algs=jwe_algs_and_rsa1_5) # Finally check it raises for expired tokens self.assertRaises(jwt.JWTExpired, jwt.JWT, jwt=A1_token, key=key, algs=jwe_algs_and_rsa1_5) def test_A2(self): sigkey = jwk.JWK(**A2_example['key']) touter = jwt.JWT(jwt=A2_token, key=E_A2_ex['key'], algs=jwe_algs_and_rsa1_5) tinner = jwt.JWT(jwt=touter.claims, key=sigkey, check_claims=False) self.assertEqual(A1_claims, json_decode(tinner.claims)) with self.assertRaises(jwe.InvalidJWEData): jwt.JWT(jwt=A2_token, key=E_A2_ex['key'], algs=jws_algs_and_rsa1_5) def test_decrypt_keyset(self): key = jwk.JWK(kid='testkey', **E_A2_key) keyset = jwk.JWKSet.from_json(json_encode(PrivateKeys)) # encrypt a new JWT with kid header = copy.copy(A1_header) header['kid'] = 'testkey' t = jwt.JWT(header, A1_claims, algs=jwe_algs_and_rsa1_5) t.make_encrypted_token(key) token = t.serialize() # try to decrypt without a matching key self.assertRaises(jwt.JWTMissingKey, jwt.JWT, jwt=token, key=keyset) # now decrypt with key keyset.add(key) jwt.JWT(jwt=token, key=keyset, algs=jwe_algs_and_rsa1_5, check_claims={'exp': 1300819380}) # encrypt a new JWT with wrong kid header = copy.copy(A1_header) header['kid'] = '1' t = jwt.JWT(header, A1_claims, algs=jwe_algs_and_rsa1_5) t.make_encrypted_token(key) token = t.serialize() self.assertRaises(jwe.InvalidJWEData, jwt.JWT, jwt=token, key=keyset) keyset = jwk.JWKSet.from_json(json_encode(PrivateKeys)) # encrypt a new JWT with no kid header = copy.copy(A1_header) t = jwt.JWT(header, A1_claims, algs=jwe_algs_and_rsa1_5) t.make_encrypted_token(key) token = t.serialize() # try to decrypt without a matching key self.assertRaises(jwt.JWTMissingKey, jwt.JWT, jwt=token, key=keyset) # now decrypt with key keyset.add(key) jwt.JWT(jwt=token, key=keyset, algs=jwe_algs_and_rsa1_5, check_claims={'exp': 1300819380}) def test_invalid_claim_type(self): key = jwk.JWK(**E_A2_key) claims = {"testclaim": "test"} claims.update(A1_claims) t = jwt.JWT(A1_header, claims, algs=jwe_algs_and_rsa1_5) t.make_encrypted_token(key) token = t.serialize() # Wrong string self.assertRaises(jwt.JWTInvalidClaimValue, jwt.JWT, jwt=token, key=key, algs=jwe_algs_and_rsa1_5, check_claims={"testclaim": "ijgi"}) # Wrong type self.assertRaises(jwt.JWTInvalidClaimValue, jwt.JWT, jwt=token, key=key, algs=jwe_algs_and_rsa1_5, check_claims={"testclaim": 123}) # Correct jwt.JWT(jwt=token, key=key, algs=jwe_algs_and_rsa1_5, check_claims={"testclaim": "test"}) def test_claim_params(self): key = jwk.JWK(**E_A2_key) default_claims = {"iss": "test", "exp": None} string_claims = '{"string_claim":"test"}' string_header = '{"alg":"RSA1_5","enc":"A128CBC-HS256"}' t = jwt.JWT(string_header, string_claims, default_claims=default_claims, algs=jwe_algs_and_rsa1_5) t.make_encrypted_token(key) token = t.serialize() # Check default_claims jwt.JWT(jwt=token, key=key, algs=jwe_algs_and_rsa1_5, check_claims={"iss": "test", "exp": None, "string_claim": "test"}) def test_claims_typ(self): key = jwk.JWK().generate(kty='oct') claims = '{"typ":"application/test"}' string_header = '{"alg":"HS256"}' t = jwt.JWT(string_header, claims) t.make_signed_token(key) token = t.serialize() # Same typ w/o application prefix jwt.JWT(jwt=token, key=key, check_claims={"typ": "test"}) self.assertRaises(jwt.JWTInvalidClaimValue, jwt.JWT, jwt=token, key=key, check_claims={"typ": "wrong"}) # Same typ w/ application prefix jwt.JWT(jwt=token, key=key, check_claims={"typ": "application/test"}) self.assertRaises(jwt.JWTInvalidClaimValue, jwt.JWT, jwt=token, key=key, check_claims={"typ": "application/wrong"}) # check that a '/' in the name makes it not be matched with # 'application/' prefix claims = '{"typ":"diffmime/test"}' t = jwt.JWT(string_header, claims) t.make_signed_token(key) token = t.serialize() self.assertRaises(jwt.JWTInvalidClaimValue, jwt.JWT, jwt=token, key=key, check_claims={"typ": "application/test"}) self.assertRaises(jwt.JWTInvalidClaimValue, jwt.JWT, jwt=token, key=key, check_claims={"typ": "test"}) # finally make sure it doesn't raise if not checked. jwt.JWT(jwt=token, key=key) def test_empty_claims(self): key = jwk.JWK().generate(kty='oct') # empty dict is valid t = jwt.JWT('{"alg":"HS256"}', {}) self.assertEqual('{}', t.claims) t.make_signed_token(key) token = t.serialize() c = jwt.JWT() c.deserialize(token, key) self.assertEqual('{}', c.claims) # empty string is also valid t = jwt.JWT('{"alg":"HS256"}', '') t.make_signed_token(key) token = t.serialize() # also a space is fine t = jwt.JWT('{"alg":"HS256"}', ' ') self.assertEqual(' ', t.claims) t.make_signed_token(key) token = t.serialize() c = jwt.JWT() c.deserialize(token, key) self.assertEqual(' ', c.claims) def test_Issue_209(self): key = jwk.JWK(**A3_key) t = jwt.JWT('{"alg":"ES256"}', {}) t.make_signed_token(key) token = t.serialize() ks = jwk.JWKSet() ks.add(jwk.JWK().generate(kty='oct')) ks.add(key) # Make sure this one does not assert when cycling through # the oct key before hitting the ES one jwt.JWT(jwt=token, key=ks) class ConformanceTests(unittest.TestCase): def test_unknown_key_params(self): key = jwk.JWK(kty='oct', k='secret', unknown='mystery') self.assertEqual('mystery', key.get('unknown')) def test_key_ops_values(self): self.assertRaises(jwk.InvalidJWKValue, jwk.JWK, kty='RSA', n=1, key_ops=['sign'], use='enc') self.assertRaises(jwk.InvalidJWKValue, jwk.JWK, kty='RSA', n=1, key_ops=['sign', 'sign']) def test_jwe_no_protected_header(self): enc = jwe.JWE(plaintext='plain') enc.add_recipient(jwk.JWK(kty='oct', k=base64url_encode(b'A' * 16)), '{"alg":"A128KW","enc":"A128GCM"}') def
<gh_stars>100-1000 #/usr/bin/python # -*- coding: utf-8 -*- """ .. currentmodule:: pylayers.antprop.radionode .. autosummary:: :members: """ from __future__ import print_function import doctest import os import glob import os import sys import doctest import numpy as np if sys.version_info.major==2: import ConfigParser as cp else: import configparser as cp #import pylayers.util.easygui as eg import pylayers.util.pyutil as pyu import pylayers.util.geomutil as geo from pylayers.mobility.trajectory import * from pylayers.util.project import * import numpy as np import scipy as sp class RadioNode(PyLayers): """ container for a Radio Node This class manages the spatial and temporal behavior of a radio node Attributes ---------- position position of the RadioNode np.array([],dtype=float) time time tag of the RadioNode np.array([],dtype=float) orientation orientation 3x3xn (rotation matrix for each position) points dictionnary of points (redundant information) type 0: undefined 1: Tx 2 : Rx Methods ------- info : display information about a RadioNode loadspa : load a spa file in PulsRay data format save : save a RadioNode file in .spa, .ini, .vect data format point : set a RadioNode point position points : set a RadioNode set of points position line : set a RadioNode route surface : set a RadioNode area volume : set a RadioNode volume gpoint : set a RadioNode point position (gui) gline : set a RadioNode route (gui) gsurface : set a RadioNode area (gui) gvolume : set a RadioNode volume (gui) show3 : display the RadioNode in the associated structure """ def __init__(self, name = '',typ='undefined', _fileini='radionode.ini', _fileant='defant.vsh3' ): """ the _fileini file must be placed in the ini directory Parameters ---------- typ : int 0 : undefined 1 : tx 2 : rx _fileini : string file of RadioNode coordinates Notes ----- The point [0,0,0] is defined as the first point (index 0) """ self.position = np.array([], dtype=float) self.position = np.array([0, 0, 0]).reshape(3, 1) self.time = np.array([], dtype=float) self.orientation = np.eye(3).reshape(3, 3, 1) self.typ = typ self.N = 1 self.name=name # # clean existing .ini file # if _fileini == 'radionode.ini': if typ == 'tx': _fileini = _fileini.replace('node', 'tx') if typ == 'rx': _fileini = _fileini.replace('node', 'rx') fileini = pyu.getlong(_fileini, 'ini') # delete radionode.ini if it exists try: os.remove(fileini) except: pass prefix = _fileini.replace('.ini','') prefix = prefix.replace('.spa','') self.fileini = prefix + '.ini' self.filespa = prefix + '.spa' self.filegeom = prefix + '.vect' fileini = pyu.getlong(self.fileini,'ini') # if file _fileini exists it is loaded try: fd = open(fileini,'r') fd.close() self.loadini(self.fileini, 'ini') except: pass self.save() def __repr__(self): """ representation of radio node Only position is shown """ st = '' for k in range(self.N): st = st + str(k)+ ' : ' \ + str(round(self.position[0,k]*1000)/1000.) + ' ' + \ str(round(self.position[1,k]*1000)/1000.) + ' ' + \ str(round(self.position[2,k]*1000)/1000.) + '\n' return(st) def pos2pt(self): """ convert position to points dict """ npt = np.shape(self.position)[1] self.points = {} for k in range(npt): self.points[k + 1] = self.position[:, k] def transform(self,alpha,trans): """ tranform position rotation + translation Parameters ---------- alpha : float angle (rad) trans : np.array() (,2) """ d2r = np.pi/180 Rot = np.array([[np.cos(d2r*alpha),-np.sin(d2r*alpha)], [np.sin(d2r*alpha),np.cos(d2r*alpha)]]) self.position[0:2,:] = np.dot(Rot,self.position[0:2,:]) self.position[0:2,:] = self.position[0:2,:]+trans[:,np.newaxis] def info(self): """ display RadioNodes informations """ print("npos : ", self.N) print("position : ", self.position) print("name : ", self.name) #print "orientation : ", self.orientation print("type : ", self.typ) print("fileini : ", self.fileini) print("filespa : ", self.filespa) print("filegeom : ", self.filegeom) print("fileant : ", self.fileant) def clear(self): """ clear positions The origin [0,0,0] is always defined as the first point """ self.position = np.array([], dtype=float) self.position = np.array([0., 0., 0.]).reshape(3, 1) self.N = 1 def points(self, pt=np.array([[0], [0], [0]])): """ add a set of points to RadioNode Parameters ---------- pt : ndarray point position (3 x Npt) """ if type(pt) == list: pt = np.array(pt) self.position = pt self.N = np.shape(self.position)[1] self.save() def point(self, pt=[0, 0, 0], time=[1], orientation=[], mode='subst'): """ add a position to RadioNode The new RadioNode is saved in .spa Parameters ---------- pt : ndarray point position (1 x 3) time : ndarray 1x1 orientation : ndarray 3x3 matrix mode: string 'subst' for replacement (default) 'append' for appending Examples -------- >>> from pylayers.simul.radionode import * >>> import numpy as np >>> tx = RadioNode() >>> tx.point([1,1,1],[1],np.eye(3),'subst') >>> tx.position array([[1], [1], [1]]) """ if isinstance(pt, list): pt = np.array(pt) if isinstance(time, list): time = np.array(time) orientation = np.reshape(np.eye(3), (3, 3, 1)) pt = np.array(pt) time = np.array(time) pt = np.reshape(pt, (3, 1)) if mode == 'subst': self.time = time self.position = pt self.orientation = orientation else: try: self.time = np.append(self.time, time, axis=0) self.position = np.append(self.position, pt, axis=1) self.orientation = np.append(self.orientation, orientation, axis=2) except: self.time = time self.position = pt self.orientation = orientation self.pos2pt() self.save() def linevect(self,npt=1, step=1.0 , ptt=[0, 0, 0], vec=[1, 0, 0], mode='subst'): """ create a line along a direction Parameters ---------- npt : int number of points step : float incremental distance in meters ptt : list or array 1x3 point tail (starting point) vec : list or arry 1x3 unitary vector mode : string 'subst' 'append' Examples -------- >>> from pylayers.simul.radionode import * >>> r = RadioNode() >>> r.linevect(npt=3) >>> r 0 : 0.0 0.0 0.0 1 : 1.0 0.0 0.0 2 : 2.0 0.0 0.0 <BLANKLINE> """ if isinstance(ptt, list): ptt = np.array(ptt) if isinstance(vec, list): vec = np.array(vec) if (npt <= 1): raise ValueError('npt should be greater than 1') ptt = np.reshape(ptt, (3, 1)) vec = np.reshape(vec, (3, 1)) k = np.arange(npt) pt = ptt + k*step*vec if mode == 'subst': self.position = pt else: self.position = np.append(self.position, pt, axis=1) self.pos2pt() self.N = np.shape(self.position)[1] self.save() def line(self, npt, ptt=[0, 0, 0], pth=[1, 0, 0], mode='subst'): """ build a line trajectory for a RadioNode Parameters ---------- npt : integer number of points ptt : list or ndarray starting point coordinates (default [0,0,0]) ptf : list or ndarray ending point coordinates mode : string 'subst' for replacement (default) 'append' for appending Examples -------- >>> from pylayers.simul.radionode import * >>> r = RadioNode() >>> r.line(3,[0,0,0],[1,0,0]) >>> r.position array([[ 0. , 0.5, 1. ], [ 0. , 0. , 0. ], [ 0. , 0. , 0. ]]) """ if isinstance(ptt, list): ptt = np.array(ptt) if isinstance(pth, list): pth = np.array(pth) if (npt <= 1): raise ValueError('npt should be greater than 1') ptt = np.reshape(ptt, (3, 1)) pth = np.reshape(pth, (3, 1)) pas = 1.0 / (npt - 1) k = np.arange(0.0, 1.0 + pas, pas) pt = ptt + k * (pth-ptt) if mode == 'subst': self.position = pt else: self.position = np.append(self.position, pt, axis=1) self.pos2pt() self.N = np.shape(self.position)[1] self.save() def surface(self, N1=2, N2=2, p0=[0, 0, 0], p1=[1, 0, 0], p2=[0, 1, 0], mode='subst'): """ add a surface to RadioNode add a surface with basis (p0p1,p0p2) Parameters ---------- N1 : int default 2 N2 : int default 2 p0 : array or list first point p1 : array or list second point p2 : array or list third point mode : string 'subst' 'append' Examples -------- >>> from pylayers.simul.radionode import * >>> tx= RadioNode() >>> tx.surface(10,10,[0,0,1.5],[3.0,0,1.5],[0.0,3.0,1.5],'subst') """ p0 = np.array(p0) p1 = np.array(p1) p2 = np.array(p2) p0 = np.reshape(p0, (3, 1)) p1 = np.reshape(p1, (3, 1)) p2 = np.reshape(p2, (3, 1)) pas1 = 1.0 / (N1 - 1) k1 = np.arange(0.0, 1.0 + pas1, pas1) pas2 = 1.0 / (N2 - 1) k2 = np.arange(0.0, 1.0 + pas2, pas2) n1 = len(k1) n2 = len(k2) kk1 = np.kron(np.ones(n2), k1) kk2 = np.kron(k2, np.ones(n1)) pt = p0 + kk1 * (p1 - p0) + kk2 * (p2 - p0) if mode == 'subst': self.position = pt else: self.position = np.append(self.position, pt, axis=1) self.pos2pt() self.N = np.shape(self.position)[1] self.save() def volume(self,N1=2,N2=2,N3=2,p0=[0, 0, 0],p1=[1, 0, 0], p2=[0, 1, 0], p3=[0, 0, 1], mode='subst'): """ add a volume to RadioNode build a volume with edges : p0p1, p0p2, p0p3 Parameters ---------- N1 : int number of points on axis 1 N2 : int number of points on axis 2 N3 : int number of
<gh_stars>0 """ Class that applies the LIME framework to a collective classification task. """ import os import random import pandas as pd from scipy.spatial.distance import pdist from sklearn.linear_model import LinearRegression import networkx as nx import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt class Interpretability: """Class that handles all operations to apply LIME to graphical models.""" def __init__(self, config_obj, connections_obj, generator_obj, pred_builder_obj, util_obj): """Initializes all object dependencies for this object.""" self.config_obj = config_obj """User settings.""" self.generator_obj = generator_obj """Finds and generates relational ids.""" self.connections_obj = connections_obj """Finds subnetworks for a specific data points.""" self.pred_builder_obj = pred_builder_obj """Predicate Builder.""" self.util_obj = util_obj """Utility methods.""" self.relations = None """Relations present in the subnetwork, can be modified.""" # public def explain(self, test_df): """Produce an explanation for a specific comment in the test set by applying the LIME framework. test_df: dataframe containing test comments.""" print('\nEXPLANATION') test_df = test_df.copy() # merge independent and relational model predictions together. k = self.settings() ip_f, rp_f, r_out_f, psl_f, psl_data_f = self.define_file_folders() ind_df, rel_df = self.read_predictions(ip_f, rp_f) merged_df = self.merge_predictions(test_df, ind_df, rel_df) self.show_biggest_improvements(merged_df) com_id = self.user_input(merged_df) while com_id != -1: self.display_raw_instance_to_explain(merged_df, com_id) # identify subnetwork pertaining to comment needing explanation. expanded_df = self.gen_group_ids(merged_df) connections = self.retrieve_all_connections(com_id, expanded_df) filtered_df = self.filter_comments(merged_df, connections) # write predicate data pertaining to subnetwork. self.clear_old_data(psl_data_f) self.write_predicates(filtered_df, psl_data_f) # generate labels for perturbed instances, then fit a linear model. self.do_inference_over_subnetwork(com_id, psl_f) # labels_df, perturbed_df = self.read_perturbed_labels(psl_data_f, # r_out_f) # x, y, wgts, features = self.preprocess(perturbed_df, labels_df, # similarities) # g = self.fit_linear_model(x, y, wgts) # sort features by importance, indicated by their coefficients. # coef_indices, coef_values = self.extract_and_sort_coefficients(g) # top_features = self.rearrange_and_filter_features(features, # coef_indices, coef_values, k=k) # obtain relation dataframes and display explanation. # relation_dict = self.read_subnetwork_relations(psl_data_f) # self.display_median_predictions(merged_df) # self.display_top_features(top_features, merged_df, relation_dict) # self.display_subnetwork(com_id, filtered_df) com_id = self.user_input(merged_df) # private def settings(self): """Settings for the interpretability procedure. Returns top k features.""" k = 50 return k def define_file_folders(self): """Returns absolute path directories.""" ind_dir = self.config_obj.ind_dir rel_dir = self.config_obj.rel_dir domain = self.config_obj.domain ind_pred_f = ind_dir + 'output/' + domain + '/predictions/' rel_pred_f = rel_dir + 'output/' + domain + '/predictions/' rel_out_f = rel_dir + 'output/' + domain + '/interpretability/' psl_f = rel_dir + 'psl/' psl_data_f = psl_f + 'data/' + domain + '/interpretability/' if not os.path.exists(psl_data_f): os.makedirs(psl_data_f) if not os.path.exists(rel_out_f): os.makedirs(rel_out_f) return ind_pred_f, rel_pred_f, rel_out_f, psl_f, psl_data_f def read_predictions(self, ind_pred_f, rel_pred_f): """Reads in the predictions from the independent and relational models. ind_pred_f: folder to the independent predictions. rel_pred_f: folder to the relational predictons. Returns predictions for each model in their respective dataframes.""" fold = self.config_obj.fold dset = 'test' ind_df = pd.read_csv(ind_pred_f + dset + '_' + fold + '_preds.csv') rel_df = pd.read_csv(rel_pred_f + 'predictions_' + fold + '.csv') return ind_df, rel_df def merge_predictions(self, test_df, ind_df, rel_df): """Merges independent and relational prediction dataframes. test_df: original testing dataframe. ind_df: dataframe with independent predictions. rel_df: dataframe with relational predictions. Returns merged dataframe.""" # rel_df['rel_pred'] = rel_df['rel_pred'] / rel_df['rel_pred'].max() temp_df = test_df.merge(ind_df).merge(rel_df) return temp_df def show_biggest_improvements(self, df): """Presents the user with spam comments that were difficult to detect. df: comments dataframe.""" temp_df = df[df['label'] == 1] temp_df['diff'] = temp_df['rel_pred'] - temp_df['ind_pred'] temp_df = temp_df.sort_values('diff', ascending=False) print(temp_df.head(10)) def gen_group_ids(self, df): """Generates any missing group_id columns. df: comments dataframe with predictions. Returns dataframe with filled in group_ids.""" for relation, group, group_id in self.config_obj.relations: df = self.generator_obj.gen_group_id(df, group_id) return df def user_input(self, merged_df): """Takes input about which comment to show an explanation for. merged_df: testing dataframe with predictions. Returns comment id of the comment needing an explanation.""" com_id = None com_ids = list(merged_df['com_id'].apply(str)) while com_id != '-1' and com_id not in com_ids: s = '\nEnter com_id for an explanation (-1 to quit): ' com_id = str(input(s)) return int(com_id) def retrieve_all_connections(self, com_id, expanded_df): """Recursively obtain all relations to this comment and all of those comments' relations, and so on. expanded_df: comments dataframe with multiple same com_id rows. com_id: comment to be explained. Returns subnetwork of com_ids directly or indirectly connected to com_id.""" debug = True possible_relations = self.config_obj.relations print('\nextracting subnetwork...') connections, relations = self.connections_obj.subnetwork(com_id, expanded_df, possible_relations, debug=debug) self.relations = [r for r in possible_relations if r[0] in relations] print('subnetwork size: ' + str(len(connections))) return connections def filter_comments(self, merged_df, connections): """Filters comments from test set to subnetwork. merged_df: test dataframe with predictions. connections: set of com_ids in the subnetwork. Returns: subnetwork dataframe with predictions.""" filtered_df = merged_df[merged_df['com_id'].isin(connections)] return filtered_df # def perturb_input(self, df, samples=100, p=1.0): # """Perturb the independent predictions to generate similar instances. # df: dataframe containing comments of the subnetwork. # samples: number of times to perturb the original instance. # p: amount to perturb each prediction. # Returns a dataframe with perturbed samples.""" # # perturb = lambda x: max(0, min(1, x + random.uniform(-p, p))) # temp_df = df.copy() # perturb = lambda x: random.uniform(0.0, p) # for i in range(samples): # temp_df[str(i)] = temp_df['ind_pred'].apply(perturb) # return temp_df # def compute_similarity(self, df, samples=100): # """Computes how similar each perturbed example is from the original. # df: dataframe with original and perturbed instances. # samples: number of perturbed samples. # Returns a dict of similarity scores, a list of sample ids.""" # similarities = {} # sample_ids = [] # for i in range(samples): # sample_id = str(i) # temp_df = df[['ind_pred', sample_id]] # similarities[sample_id] = pdist(temp_df.values.T)[0] # sample_ids.append(sample_id) # return similarities, sample_ids def clear_old_data(self, rel_data_f): """Clears out old predicate data and database stores. rel_data_f: relational model data folder.""" os.system('rm ' + rel_data_f + '*.csv') os.system('rm ' + rel_data_f + '*.tsv') os.system('rm ' + rel_data_f + 'db/*.db') def write_predicates(self, df, rel_data_f): """Writes predicate data for relations to be used by relational model. df: subnetwork dataframe. rel_data_f: relational model data folder.""" dset = 'test' self.pred_builder_obj.build_comments(df, dset, rel_data_f) for relation, group, group_id in self.relations: self.pred_builder_obj.build_relations(relation, group, group_id, df, dset, rel_data_f) # def write_perturbations(self, df, sample_ids, rel_data_f): # """Writes perturbed instances in a way to be easily loaded by the # relational model. # df: subnetwork dataframe. # rel_data_f: relational model data folder.""" # temp_df = df.filter(items=['com_id'] + sample_ids) # temp_df.to_csv(rel_data_f + 'perturbed.csv', index=None) def do_inference_over_subnetwork(self, com_id, psl_f): """Calls relational model to produce labels for the altered instances. com_id: id of the comment needing explainaing.""" fold = self.config_obj.fold domain = self.config_obj.domain relations = [r[0] for r in self.relations] arg_list = [str(com_id), fold, domain] + relations execute = 'java -Xmx60g -cp ./target/classes:`cat classpath.out` ' execute += 'spam.Interpretability ' + ' '.join(arg_list) os.chdir(psl_f) os.system(execute) def read_perturbed_labels(self, rel_data_f, rel_out_f): """Read in the generated labels for the perturbed instances. rel_data_f: relational model data folder. rel_out_f: relational model output folder. Returns dataframe with labels, dataframe with perturbed isntances.""" fold = self.config_obj.fold os.chdir('../scripts/') labels_df = pd.read_csv(rel_out_f + 'labels_' + fold + '.csv') perturbed_df = pd.read_csv(rel_data_f + 'perturbed.csv') return labels_df, perturbed_df def preprocess(self, perturbed_df, labels_df, similarities): """Processes the perturbed instances, generated labels, and perturbed similarities to be fitted by the interpretable model. perturbed_df: dataframe of perturbed instances. labels_df: dataframe of generated labels for perturbed instances. similarities: dict of similarity scores of each perturbed instance to the original: (key, value) = (sample_id, sim_score). Returns feature data, labels, weights, and feature ids.""" features = list(perturbed_df['com_id']) temp_df = perturbed_df.drop(['com_id'], axis=1) temp_df = temp_df.transpose() x = temp_df.values temp_df = labels_df.filter(items=['pred']) y = temp_df.values temp_list = list(similarities.items()) temp_list = sorted(temp_list, key=lambda x: int(x[0])) weights = [self.util_obj.div0(1.0, v) for k, v in temp_list] return x, y, weights, features def fit_linear_model(self, x, y, weights): """Fits a linear model using features of the perturbed instances to the labels generated for those perturbed instances. x: 2d array containing perturbed instance features. y: 2d array containing labels for the perturbed instances. wgts: 1d array containing weights for each perturbed instance. Returns fitted linear model.""" print('\nFitting linear model...') g = LinearRegression() g = g.fit(x, y, weights) return g def extract_and_sort_coefficients(self, g): """Sort coefficients by absolute value and then rearrange features to line up with their respective coefficient. g: fitted linear model. Returns list of coefficient indices, list of coefficient values."""
general_questionnaires.exclude(visibility=Questionnaire.Visibility.HIDDEN) contributor_questionnaires = contributor_questionnaires.exclude(visibility=Questionnaire.Visibility.HIDDEN) general_questionnaires_top = [ questionnaire for questionnaire in general_questionnaires if questionnaire.is_above_contributors ] general_questionnaires_bottom = [ questionnaire for questionnaire in general_questionnaires if questionnaire.is_below_contributors ] template_data = dict( general_questionnaires_top=general_questionnaires_top, general_questionnaires_bottom=general_questionnaires_bottom, contributor_questionnaires=contributor_questionnaires, filter_questionnaires=filter_questionnaires, ) return render(request, "staff_questionnaire_index.html", template_data) @manager_required def questionnaire_view(request, questionnaire_id): questionnaire = get_object_or_404(Questionnaire, id=questionnaire_id) # build forms contribution = Contribution(contributor=request.user) form = QuestionnaireVotingForm(request.POST or None, contribution=contribution, questionnaire=questionnaire) return render(request, "staff_questionnaire_view.html", dict(forms=[form], questionnaire=questionnaire)) @manager_required def questionnaire_create(request): questionnaire = Questionnaire() InlineQuestionFormset = inlineformset_factory( Questionnaire, Question, formset=AtLeastOneFormSet, form=QuestionForm, extra=1, exclude=("questionnaire",) ) form = QuestionnaireForm(request.POST or None, instance=questionnaire) formset = InlineQuestionFormset(request.POST or None, instance=questionnaire) if form.is_valid() and formset.is_valid(): form.save(force_highest_order=True) formset.save() messages.success(request, _("Successfully created questionnaire.")) return redirect("staff:questionnaire_index") return render(request, "staff_questionnaire_form.html", dict(form=form, formset=formset, editable=True)) def disable_all_except_named(fields: Dict[str, Any], names_of_editable: Container[str]): for name, field in fields.items(): if name not in names_of_editable: field.disabled = True def make_questionnaire_edit_forms(request, questionnaire, editable): if editable: formset_kwargs = {"extra": 1} else: question_count = questionnaire.questions.count() formset_kwargs = { "extra": 0, "can_delete": False, "validate_min": True, "validate_max": True, "min_num": question_count, "max_num": question_count, } InlineQuestionFormset = inlineformset_factory( Questionnaire, Question, formset=AtLeastOneFormSet, form=QuestionForm, exclude=("questionnaire",), **formset_kwargs, ) form = QuestionnaireForm(request.POST or None, instance=questionnaire) formset = InlineQuestionFormset(request.POST or None, instance=questionnaire) if not editable: disable_all_except_named( form.fields, ["visibility", "is_locked", "name_de", "name_en", "description_de", "description_en", "type"] ) for question_form in formset.forms: disable_all_except_named(question_form.fields, ["id"]) # disallow type changed from and to contributor form.fields["type"].choices = [ choice for choice in Questionnaire.Type.choices if (choice[0] == Questionnaire.Type.CONTRIBUTOR) == (questionnaire.type == Questionnaire.Type.CONTRIBUTOR) ] return form, formset @manager_required def questionnaire_edit(request, questionnaire_id): questionnaire = get_object_or_404(Questionnaire, id=questionnaire_id) editable = questionnaire.can_be_edited_by_manager form, formset = make_questionnaire_edit_forms(request, questionnaire, editable) if form.is_valid() and formset.is_valid(): form.save() if editable: formset.save() messages.success(request, _("Successfully updated questionnaire.")) return redirect("staff:questionnaire_index") template_data = dict(questionnaire=questionnaire, form=form, formset=formset, editable=editable) return render(request, "staff_questionnaire_form.html", template_data) def get_identical_form_and_formset(questionnaire): """ Generates a Questionnaire creation form and formset filled out like the already exisiting Questionnaire specified in questionnaire_id. Used for copying and creating of new versions. """ inline_question_formset = inlineformset_factory( Questionnaire, Question, formset=AtLeastOneFormSet, form=QuestionForm, extra=1, exclude=("questionnaire",) ) form = QuestionnaireForm(instance=questionnaire) return form, inline_question_formset(instance=questionnaire, queryset=questionnaire.questions.all()) @manager_required def questionnaire_copy(request, questionnaire_id): copied_questionnaire = get_object_or_404(Questionnaire, id=questionnaire_id) if request.method == "POST": questionnaire = Questionnaire() InlineQuestionFormset = inlineformset_factory( Questionnaire, Question, formset=AtLeastOneFormSet, form=QuestionForm, extra=1, exclude=("questionnaire",) ) form = QuestionnaireForm(request.POST, instance=questionnaire) formset = InlineQuestionFormset(request.POST.copy(), instance=questionnaire, save_as_new=True) if form.is_valid() and formset.is_valid(): form.save() formset.save() messages.success(request, _("Successfully created questionnaire.")) return redirect("staff:questionnaire_index") return render(request, "staff_questionnaire_form.html", dict(form=form, formset=formset, editable=True)) form, formset = get_identical_form_and_formset(copied_questionnaire) return render(request, "staff_questionnaire_form.html", dict(form=form, formset=formset, editable=True)) @manager_required def questionnaire_new_version(request, questionnaire_id): old_questionnaire = get_object_or_404(Questionnaire, id=questionnaire_id) # Check if we can use the old name with the current time stamp. timestamp = date.today() new_name_de = "{} (until {})".format(old_questionnaire.name_de, str(timestamp)) new_name_en = "{} (until {})".format(old_questionnaire.name_en, str(timestamp)) # If not, redirect back and suggest to edit the already created version. if Questionnaire.objects.filter(Q(name_de=new_name_de) | Q(name_en=new_name_en)): messages.error(request, _("Questionnaire creation aborted. A new version was already created today.")) return redirect("staff:questionnaire_index") if request.method == "POST": questionnaire = Questionnaire() InlineQuestionFormset = inlineformset_factory( Questionnaire, Question, formset=AtLeastOneFormSet, form=QuestionForm, extra=1, exclude=("questionnaire",) ) form = QuestionnaireForm(request.POST, instance=questionnaire) formset = InlineQuestionFormset(request.POST.copy(), instance=questionnaire, save_as_new=True) try: with transaction.atomic(): # Change old name before checking Form. old_questionnaire.name_de = new_name_de old_questionnaire.name_en = new_name_en old_questionnaire.visibility = Questionnaire.Visibility.HIDDEN old_questionnaire.save() if not form.is_valid() or not formset.is_valid(): raise IntegrityError form.save() formset.save() messages.success(request, _("Successfully created questionnaire.")) return redirect("staff:questionnaire_index") except IntegrityError: return render(request, "staff_questionnaire_form.html", dict(form=form, formset=formset, editable=True)) form, formset = get_identical_form_and_formset(old_questionnaire) return render(request, "staff_questionnaire_form.html", dict(form=form, formset=formset, editable=True)) @require_POST @manager_required def questionnaire_delete(request): questionnaire_id = request.POST.get("questionnaire_id") questionnaire = get_object_or_404(Questionnaire, id=questionnaire_id) if not questionnaire.can_be_deleted_by_manager: raise SuspiciousOperation("Deleting questionnaire not allowed") questionnaire.delete() return HttpResponse() # 200 OK @require_POST @manager_required def questionnaire_update_indices(request): updated_indices = request.POST for questionnaire_id, new_order in updated_indices.items(): questionnaire = Questionnaire.objects.get(pk=questionnaire_id) questionnaire.order = new_order questionnaire.save() return HttpResponse() @require_POST @manager_required def questionnaire_visibility(request): questionnaire_id = request.POST.get("questionnaire_id") visibility = int(request.POST.get("visibility")) if visibility not in Questionnaire.Visibility.values: raise SuspiciousOperation("Invalid visibility choice") questionnaire = get_object_or_404(Questionnaire, id=questionnaire_id) questionnaire.visibility = visibility questionnaire.save() return HttpResponse() @require_POST @manager_required def questionnaire_set_locked(request): questionnaire_id = request.POST.get("questionnaire_id") is_locked = bool(int(request.POST.get("is_locked"))) questionnaire = get_object_or_404(Questionnaire, id=questionnaire_id) questionnaire.is_locked = is_locked questionnaire.save() return HttpResponse() @manager_required def degree_index(request): degrees = Degree.objects.all() DegreeFormset = modelformset_factory( Degree, form=DegreeForm, formset=ModelWithImportNamesFormSet, can_delete=True, extra=1 ) formset = DegreeFormset(request.POST or None, queryset=degrees) if formset.is_valid(): formset.save() messages.success(request, _("Successfully updated the degrees.")) return redirect("staff:degree_index") return render(request, "staff_degree_index.html", dict(formset=formset)) @manager_required def course_type_index(request): course_types = CourseType.objects.all() CourseTypeFormset = modelformset_factory( CourseType, form=CourseTypeForm, formset=ModelWithImportNamesFormSet, can_delete=True, extra=1 ) formset = CourseTypeFormset(request.POST or None, queryset=course_types) if formset.is_valid(): formset.save() messages.success(request, _("Successfully updated the course types.")) return redirect("staff:course_type_index") return render(request, "staff_course_type_index.html", dict(formset=formset)) @manager_required def course_type_merge_selection(request): form = CourseTypeMergeSelectionForm(request.POST or None) if form.is_valid(): main_type = form.cleaned_data["main_type"] other_type = form.cleaned_data["other_type"] return redirect("staff:course_type_merge", main_type.id, other_type.id) return render(request, "staff_course_type_merge_selection.html", dict(form=form)) @manager_required def course_type_merge(request, main_type_id, other_type_id): main_type = get_object_or_404(CourseType, id=main_type_id) other_type = get_object_or_404(CourseType, id=other_type_id) if request.method == "POST": main_type.import_names += other_type.import_names main_type.save() Course.objects.filter(type=other_type).update(type=main_type) other_type.delete() messages.success(request, _("Successfully merged course types.")) return redirect("staff:course_type_index") courses_with_other_type = Course.objects.filter(type=other_type).order_by("semester__created_at", "name_de") return render( request, "staff_course_type_merge.html", dict(main_type=main_type, other_type=other_type, courses_with_other_type=courses_with_other_type), ) @manager_required def text_answer_warnings_index(request): text_answer_warnings = TextAnswerWarning.objects.all() TextAnswerWarningFormSet = modelformset_factory( TextAnswerWarning, form=TextAnswerWarningForm, can_delete=True, extra=1 ) formset = TextAnswerWarningFormSet(request.POST or None, queryset=text_answer_warnings) if formset.is_valid(): formset.save() messages.success(request, _("Successfully updated text warning answers.")) return redirect("staff:text_answer_warnings") return render( request, "staff_text_answer_warnings.html", dict( formset=formset, text_answer_warnings=TextAnswerWarning.objects.all(), ), ) @manager_required def user_index(request): filter_users = get_parameter_from_url_or_session(request, "filter_users") users = UserProfile.objects.all() if filter_users: users = users.exclude(is_active=False) users = ( users # the following six annotations basically add three bools indicating whether each user is part of a group or not. .annotate(manager_group_count=Sum(Case(When(groups__name="Manager", then=1), output_field=IntegerField()))) .annotate(is_manager=ExpressionWrapper(Q(manager_group_count__exact=1), output_field=BooleanField())) .annotate(reviewer_group_count=Sum(Case(When(groups__name="Reviewer", then=1), output_field=IntegerField()))) .annotate(is_reviewer=ExpressionWrapper(Q(reviewer_group_count__exact=1), output_field=BooleanField())) .annotate( grade_publisher_group_count=Sum( Case(When(groups__name="Grade publisher", then=1), output_field=IntegerField()) ) ) .annotate( is_grade_publisher=ExpressionWrapper(Q(grade_publisher_group_count__exact=1), output_field=BooleanField()) ) .prefetch_related( "contributions", "evaluations_participating_in", "evaluations_participating_in__course__semester", "represented_users", "ccing_users", "courses_responsible_for", ) .order_by("last_name", "first_name", "email") ) return render(request, "staff_user_index.html", dict(users=users, filter_users=filter_users)) @manager_required def user_create(request): form = UserForm(request.POST or None, instance=UserProfile()) if form.is_valid(): form.save() messages.success(request, _("Successfully created user.")) return redirect("staff:user_index") return render(request, "staff_user_form.html", dict(form=form)) @manager_required def user_import(request): excel_form = UserImportForm(request.POST or None, request.FILES or None) import_type = ImportType.USER errors = {} warnings = {} success_messages = [] if request.method == "POST": operation = request.POST.get("operation") if operation not in ("test", "import"): raise SuspiciousOperation("Invalid POST operation") if operation == "test": delete_import_file(request.user.id, import_type) # remove old files if still exist excel_form.fields["excel_file"].required = True if excel_form.is_valid(): excel_file = excel_form.cleaned_data["excel_file"] file_content = excel_file.read() __, success_messages, warnings, errors = UserImporter.process(file_content, test_run=True) if not errors: save_import_file(excel_file, request.user.id, import_type) elif operation == "import": file_content = get_import_file_content_or_raise(request.user.id, import_type) __, success_messages, warnings, __ = UserImporter.process(file_content, test_run=False) forward_messages(request, success_messages, warnings) delete_import_file(request.user.id, import_type) return redirect("staff:user_index") test_passed = import_file_exists(request.user.id, import_type) # casting warnings to a normal dict is necessary for the template to iterate over it. return render( request, "staff_user_import.html", dict( excel_form=excel_form, success_messages=success_messages, errors=sorted_messages(errors), warnings=sorted_messages(warnings), test_passed=test_passed, ), ) @manager_required def user_edit(request, user_id): # See comment in helper_evaluation_edit @receiver(RewardPointGranting.granted_by_removal, weak=True) def notify_reward_points(grantings, **_kwargs): assert len(grantings) == 1 messages.info( request, ngettext( 'The removal of evaluations has granted the user "{granting.user_profile.email}" {granting.value} reward point for the active semester.', 'The removal of evaluations has granted the user "{granting.user_profile.email}" {granting.value} reward points for the active semester.', grantings[0].value, ).format(granting=grantings[0]), ) user = get_object_or_404(UserProfile, id=user_id) form = UserForm(request.POST or None, request.FILES or None, instance=user) evaluations_contributing_to = ( Evaluation.objects.filter(Q(contributions__contributor=user) | Q(course__responsibles__in=[user])) .distinct() .order_by("course__semester") ) if form.is_valid(): form.save() delete_navbar_cache_for_users([user]) messages.success(request, _("Successfully updated user.")) for message in form.remove_messages: messages.warning(request, message) return redirect("staff:user_index") return render( request, "staff_user_form.html", dict(form=form, evaluations_contributing_to=evaluations_contributing_to) ) @require_POST @manager_required def user_delete(request): user_id = request.POST.get("user_id") user = get_object_or_404(UserProfile, id=user_id) if not user.can_be_deleted_by_manager: raise SuspiciousOperation("Deleting user not allowed") user.delete() return HttpResponse() # 200 OK @manager_required def user_bulk_update(request): form = UserBulkUpdateForm(request.POST or None, request.FILES or None) operation = request.POST.get("operation") test_run = operation == "test" import_type = ImportType.USER_BULK_UPDATE if request.POST: if operation not in ("test", "bulk_update"): raise SuspiciousOperation("Invalid POST operation") if test_run: delete_import_file(request.user.id, import_type) # remove old files if still exist form.fields["user_file"].required = True if form.is_valid(): user_file = form.cleaned_data["user_file"] file_content = user_file.read() success = False try: success = bulk_update_users(request, file_content, test_run) except Exception: # pylint: disable=broad-except if settings.DEBUG: raise messages.error( request, _("An error happened when processing the file. Make sure the file meets the requirements."), ) if success: save_import_file(user_file, request.user.id, import_type) else: file_content = get_import_file_content_or_raise(request.user.id, import_type) bulk_update_users(request, file_content, test_run) delete_import_file(request.user.id, import_type) return redirect("staff:user_index") test_passed = import_file_exists(request.user.id, import_type) return render(request, "staff_user_bulk_update.html", dict(form=form, test_passed=test_passed)) @manager_required def user_merge_selection(request): form = UserMergeSelectionForm(request.POST or None) if form.is_valid(): main_user = form.cleaned_data["main_user"] other_user = form.cleaned_data["other_user"] return redirect("staff:user_merge", main_user.id, other_user.id) return render(request, "staff_user_merge_selection.html", dict(form=form)) @manager_required def user_merge(request, main_user_id, other_user_id): main_user = get_object_or_404(UserProfile, id=main_user_id) other_user = get_object_or_404(UserProfile, id=other_user_id) if request.method == "POST": merged_user, errors, warnings = merge_users(main_user, other_user) if errors: messages.error(request, _("Merging the users failed. No data was changed.")) else: messages.success(request, _("Successfully merged users."))
<reponame>kushckwl/TWLight<gh_stars>0 # -*- coding: utf-8 -*- import json import os from jsonschema import validate from jsonschema.exceptions import ValidationError as JSONSchemaValidationError from django.conf import settings from django.contrib.auth.models import User from django.core.cache import cache from django.core.cache.utils import make_template_fragment_key from django.core.validators import MaxValueValidator from django.core.exceptions import ValidationError from django.urls import reverse_lazy, reverse from django.db import models from django_countries.fields import CountryField from django.utils.safestring import mark_safe from TWLight.resources.helpers import ( check_for_target_url_duplication_and_generate_error_message, get_tags_json_schema, ) # Use language autonyms from Wikimedia. # We periodically pull: # https://raw.githubusercontent.com/wikimedia/language-data/master/data/language-data.json # into locale/language-data.json language_data_json = open(os.path.join(settings.LOCALE_PATHS[0], "language-data.json")) languages = json.loads(language_data_json.read())["languages"] RESOURCE_LANGUAGES = [] for lang_code, lang_data in languages.items(): autonym = lang_data[-1] RESOURCE_LANGUAGES += [(lang_code, autonym)] RESOURCE_LANGUAGE_CODES = [lang[0] for lang in RESOURCE_LANGUAGES] def validate_language_code(code): """ Takes a language code and verifies that it is the first element of a tuple in RESOURCE_LANGUAGES. """ if code not in RESOURCE_LANGUAGE_CODES: raise ValidationError( "%(code)s is not a valid language code. You must enter an ISO " "language code, as in the LANGUAGES setting at " "https://github.com/WikipediaLibrary/TWLight/blob/master/TWLight/settings/base.py", params={"code": code}, ) class Language(models.Model): """ We want to be able to indicate the language(s) of resources offered by a Partner or in a Stream. While having a standalone model is kind of overkill, it offers the following advantages: * We need to be able to indicate multiple languages for a given Partner or Stream. * We will want to be able to filter Partners and Streams by language (e.g. in order to limit to the user's preferred language); we can't do that efficiently with something like django-multiselect or django-taggit. * In order to be able to filter by language, we also need to use a controlled vocabulary which we can validate; using a model makes this easy. * We default to Django's global LANGUAGES setting, which is extensive and already translated. We can always expand it if we find ourselves needing more languages, though. """ class Meta: verbose_name = "Language" verbose_name_plural = "Languages" language = models.CharField( choices=RESOURCE_LANGUAGES, max_length=12, validators=[validate_language_code], unique=True, ) def save(self, *args, **kwargs): """Cause validator to be run.""" self.clean_fields() super(Language, self).save(*args, **kwargs) def __str__(self): return self.get_language_display() class AvailablePartnerManager(models.Manager): def get_queryset(self): return ( super(AvailablePartnerManager, self) .get_queryset() .filter(status__in=[Partner.AVAILABLE, Partner.WAITLIST]) ) class Partner(models.Model): """ A partner organization which provides access grants to paywalled resources. This model tracks contact information for the partner as well as extra information they require on access grant applications. """ class Meta: app_label = "resources" verbose_name = "partner" verbose_name_plural = "partners" ordering = ["company_name"] # -------------------------------------------------------------------------- # Managers # -------------------------------------------------------------------------- # Define managers. Note that the basic manager must be first to make # Django internals work as expected, but we define objects as our custom # manager so that we don't inadvertently expose unavailable Partners to # end users. even_not_available = models.Manager() objects = AvailablePartnerManager() # -------------------------------------------------------------------------- # Attributes # -------------------------------------------------------------------------- company_name = models.CharField( max_length=255, help_text="Partner's name (e.g. McFarland). Note: " "this will be user-visible and *not translated*.", ) date_created = models.DateField(auto_now_add=True) coordinator = models.ForeignKey( User, blank=True, null=True, on_delete=models.SET_NULL, help_text="The coordinator for this Partner, if any.", ) featured = models.BooleanField( default=False, help_text="Mark as true to feature this partner on the front page.", ) company_location = CountryField(null=True, help_text="Partner's primary location.") # Status metadata # -------------------------------------------------------------------------- # AVAILABLE partners are displayed to users. # NOT AVAILABLE partners are only accessible through the admin interface. # These may be, e.g., partners TWL used to work with but no longer does # (kept in the database for recordkeeping), or they may be partners TWL # is setting up a relationship with but isn't ready to expose to public # view. # We default to NOT_AVAILABLE to avoid inadvertently exposing Partners to # the application process when they're not ready yet, and to give staff # a chance to build their record incrementally and fix errors. AVAILABLE = 0 NOT_AVAILABLE = 1 WAITLIST = 2 STATUS_CHOICES = ( (AVAILABLE, "Available"), (NOT_AVAILABLE, "Not available"), (WAITLIST, "Waitlisted"), ) # Authorization methods, used in both Partner and Stream EMAIL = 0 CODES = 1 PROXY = 2 BUNDLE = 3 LINK = 4 AUTHORIZATION_METHODS = ( (EMAIL, "Email"), (CODES, "Access codes"), (PROXY, "Proxy"), (BUNDLE, "Library Bundle"), (LINK, "Link"), ) status = models.IntegerField( choices=STATUS_CHOICES, default=NOT_AVAILABLE, help_text="Should this Partner be displayed to users? Is it " "open for applications right now?", ) renewals_available = models.BooleanField( default=False, help_text="Can access grants to this partner be renewed? If so, " "users will be able to request renewals at any time.", ) accounts_available = models.PositiveSmallIntegerField( blank=True, null=True, help_text="Add the number of new accounts to the existing value, not by resetting it to zero. If 'specific stream' is true, change accounts availability at the collection level.", ) # Optional resource metadata # -------------------------------------------------------------------------- target_url = models.URLField( blank=True, null=True, help_text="Link to partner resources. Required for proxied resources; optional otherwise.", ) terms_of_use = models.URLField( blank=True, null=True, help_text="Link to terms of use. Required if users must agree to " "terms of use to get access; optional otherwise.", ) send_instructions = models.TextField( blank=True, null=True, help_text="Optional instructions for sending application data to " "this partner.", ) user_instructions = models.TextField( blank=True, null=True, help_text="Optional instructions for editors to use access codes " "or free signup URLs for this partner. Sent via email upon " "application approval (for links) or access code assignment. " "If this partner has collections, fill out user instructions " "on each collection instead.", ) excerpt_limit = models.PositiveSmallIntegerField( blank=True, null=True, help_text="Optional excerpt limit in terms of number of words per article. Leave empty if no limit.", ) excerpt_limit_percentage = models.PositiveSmallIntegerField( blank=True, null=True, validators=[MaxValueValidator(100)], help_text="Optional excerpt limit in terms of percentage (%) of an article. Leave empty if no limit.", ) authorization_method = models.IntegerField( choices=AUTHORIZATION_METHODS, default=EMAIL, help_text="Which authorization method does this partner use? " "'Email' means the accounts are set up via email, and is the default. " "Select 'Access Codes' if we send individual, or group, login details " "or access codes. 'Proxy' means access delivered directly via EZProxy, " "and Library Bundle is automated proxy-based access. 'Link' is if we " "send users a URL to use to create an account.", ) mutually_exclusive = models.BooleanField( blank=True, null=True, default=None, help_text="If True, users can only apply for one Stream at a time " "from this Partner. If False, users can apply for multiple Streams at " "a time. This field must be filled in when Partners have multiple " "Streams, but may be left blank otherwise.", ) languages = models.ManyToManyField( Language, blank=True, help_text="Select all languages in which this partner publishes " "content.", ) account_length = models.DurationField( blank=True, null=True, help_text="The standard length of an access grant from this Partner. " "Entered as &ltdays hours:minutes:seconds&gt.", ) # New tag model that uses JSONField instead of Taggit to make tags translatable new_tags = models.JSONField( null=True, default=None, blank=True, help_text="Tag must be a valid JSON schema. Tag should be in the form of tag-name_tag.", ) # Non-universal form fields # -------------------------------------------------------------------------- # Some fields are required by all resources for all access grants. # Some fields are only required by some resources. This is where we track # whether *this* resource requires those optional fields. registration_url = models.URLField( blank=True, null=True, help_text="Link to registration page. Required if users must sign up " "on the partner's website in advance; optional otherwise.", ) real_name = models.BooleanField( default=False, help_text="Mark as true if this partner requires applicant names.", ) country_of_residence = models.BooleanField( default=False, help_text="Mark as true if this partner requires applicant countries " "of residence.", ) specific_title = models.BooleanField( default=False, help_text="Mark as true if this partner requires applicants to " "specify the title they want to access.", ) specific_stream = models.BooleanField( default=False, help_text="Mark as true if this partner requires applicants to " "specify the database they want to access.", ) occupation = models.BooleanField( default=False, help_text="Mark as true if this partner requires applicants to " "specify their occupation.", ) affiliation = models.BooleanField( default=False, help_text="Mark as true if this partner requires applicants to " "specify their institutional affiliation.", ) agreement_with_terms_of_use = models.BooleanField( default=False, help_text="Mark as true if this partner requires applicants to agree "
v.undef = self.undef v.tcount = t if self.dtype != 'station': v.ycount = y v.xcount = x # if v.storage in ['99', '0']: if v.storage in ['99', '0', '00', '000', '1', '11', '111']: v.strPos = vs.zcount * self.zRecLength + vs.strPos type1 = True elif v.storage == '-1,20': v.strPos = vs.zcount * self.zRecLength * t + vs.strPos type2 = True else: type3 = True if not type3 and type1 == type2: raise Exception('storage type should be the same') self.totalZCount += v.zcount self.vdef[i] = v self.tRecLength = self.zRecLength * self.totalZCount if fileContent[start + vnum].strip().lower() != 'endvars': raise Exception('endvars is expected') def _get_template_format(self, part): """ Get time format string. See the following URL for reference: http://cola.gmu.edu/grads/gadoc/templates.html %x1 1 digit decade %x3 3 digit decade %y2 2 digit year %y4 4 digit year %m1 1 or 2 digit month %m2 2 digit month (leading zero if needed) %mc 3 character month abbreviation %d1 1 or 2 digit day %d2 2 digit day (leading zero if needed) %h1 1 or 2 digit hour %h2 2 digit hour %h3 3 digit hour (e.g., 120 or 012) %n2 2 digit minute; leading zero if needed %f2 2 digit forecast hour; leading zero if needed; more digits added for hours >99; hour values increase indefinitely %f3 3 digit forecast hour; leading zeros if needed; more digits added for hours >999; hour values increase indefinitely %fn2 2 digit forecast minute; leading zero if needed; more digits added for minutes > 99; minute values increase indefinitely (2.0.a9+) %fhn forecast time expressed in hours and minutes (hhnn) where minute value (nn) is always <=59 and hour value (hh) increases indefinitely. If hh or nn are <=9, they are padded with a 0, so they are always at least 2 digits; more digits added for hours >99. (2.0.a9+) %fdhn forecast time expressed in days, hours, and minutes (ddhhnn) where minute value (nn) is always <=59, hour value (hh) is always <=23 and day value (dd) increases indefinitely. If dd, hh, or nn are <=9, they are padded with a 0 so they are always at least 2 digits; more digits added for days >99. (2.0.a9+) %j3 3 digit julian day (day of year) (2.0.a7+) %t1 1 or 2 digit time index (file names contain number sequences that begin with 1 or 01) (2.0.a7+) %t2 2 digit time index (file names contain number sequences that begin with 01) (2.0.a7+) %t3 3 digit time index (file names contain number sequences that begin with 001) (2.0.a7+) %t4 4 digit time index (file names contain number sequences that begin with 0001) (2.0.a8+) %t5 5 digit time index (file names contain number sequences that begin with 00001) (2.0.a8+) %t6 6 digit time index (file names contain number sequences that begin with 000001) (2.0.a8+) %tm1 1 or 2 digit time index (file names contain number sequences that begin with 0 or 00) (2.0.a7+) %tm2 2 digit time index (file names contain number sequences that begin with 00) (2.0.a7+) %tm3 3 digit time index (file names contain number sequences that begin with 000) (2.0.a7+) %tm4 4 digit time index (file names contain number sequences that begin with 0000) (2.0.a8+) %tm5 5 digit time index (file names contain number sequences that begin with 00000) (2.0.a8+) %tm6 6 digit time index (file names contain number sequences that begin with 000000) (2.0.a8+) Parameters ---------- part : str A string in the above format started with %. Returns ------- re : str A string represents the format in python datetime """ if part == '%y2': return '%y' elif part == '%y4': return '%Y' elif part == '%m1': return '%m' elif part == '%m2': return '%m' elif part == '%mc': return '%b' elif part == '%d1': return '%d' elif part == '%d2': return '%d' elif part == '%h1': return '%H' elif part == '%h2': return '%H' elif part == '%n2': return '%M' elif part in ['%f3', '%f2']: # this is not supported by strftime() return '_miniufo_' + part[1:] else: raise Exception('unsupported format: ' + part) def _replace_forecast_template(self, fname, l): """ Replace forecast str %f as a template in dset. Parameters ---------- fname: str A given string of binary file. l: int Index of file in a template. Returns ------- re : str A string after replacing the %f template. """ if fname.find('_miniufo_f3') != -1: dt_h = self.incre.astype('timedelta64[s]') / \ np.timedelta64(1, 'h') fname = fname.replace('_miniufo_f3', '{0:03d}'.format( int(dt_h * l))) if fname.find('_miniufo_f2') != -1: dt_h = self.incre.astype('timedelta64[s]') / \ np.timedelta64(1, 'h') fname = fname.replace('_miniufo_f2', '{0:02d}'.format( int(dt_h * l))) return fname def _split_by_len(self, s, size): """ Split a string by a given size. Parameters ---------- s : str A given string. size : int A given size. Returns ------- re : list A list contains the splitted strings. """ chunks = len(s) return [s[i:i + size] for i in range(0, chunks, size)] def _times_to_array(self, strTime, incre, tnum): """ Convert GrADS time string of strart time and increment to an array of numpy.datetime64. Parameters ---------- strTime : str Grads start time e.g., 00:00z01Jan2000. incre : str Grads time increment in str format e.g., 1dy. tnum : int Grads time increment in str format e.g., 1dy. Returns ---------- re : numpy array of datetime64 """ if 'mo' in incre: start = GrADStime_to_datetime(strTime) lst = [] for l in range(tnum): y, m = start.year, start.month y, m = y+int((m+l-1)/12), int((m+l-1)%12)+1 lst.append(start.replace(year=y, month=m)) return np.asarray(lst, dtype='datetime64[s]') elif 'yr' in incre: start = GrADStime_to_datetime(strTime) lst = [] for l in range(tnum): y = start.year + l lst.append(start.replace(year=y)) return np.asarray(lst, dtype='datetime64[s]') else: start = GrADStime_to_datetime64(strTime) intv = GrADS_increment_to_timedelta64(incre) return np.arange(start, start + intv * tnum, intv) def __repr__(self): """ Print this class as a string. """ vdef = np.array(self.vdef) pdef = self.pdef.proj if self.pdef is not None else '' return \ ' dsetPath: ' + str(self.dsetPath) + '\n'\ ' descPath: ' + str(self.descPath) + '\n'\ ' indxPath: ' + str(self.indxPath) + '\n'\ ' stnmPath: ' + str(self.stnmPath) + '\n'\ ' title: ' + str(self.title) + '\n'\ ' undef: ' + str(self.undef) + '\n'\ ' zrev: ' + str(self.zrev) + '\n'\ ' yrev: ' + str(self.yrev) + '\n'\ ' dtype: ' + str(self.dtype) + '\n'\ ' template: ' + str(self.template) + '\n'\ ' periodicX: ' + str(self.periodicX) + '\n'\ ' cal365Days: ' + str(self.cal365Days)+ '\n'\ ' sequential: ' + str(self.sequential)+ '\n'\ ' byteOrder: ' + str(self.byteOrder) + '\n'\ ' xdef: ' + str(self.xdef) + '\n'\ ' ydef: ' + str(self.ydef) + '\n'\ ' zdef: ' + str(self.zdef) + '\n'\ ' tdef: ' + str(self.tdef) + '\n'\ ' pdef: ' + str(pdef) + '\n'\ ' vdef: ' + str(vdef) class PDEF(object): """ PDEF class. Parse necessary info in PDEF. Reference: http://cola.gmu.edu/grads/gadoc/pdef.html """ def __init__(self, oneline): """ Constructor. Parameters ---------- oneline : str The ASCII line of PDEF in ctl file. """ lineLower = oneline.lower() if 'nps' in lineLower or 'sps' in lineLower: token = lineLower.split() if len(token) != 8: raise Exception('not enough tokens for PDEF, ' + 'expected 8 but found ' + str(len(token))) self.isize = int (token[1]) # size of native grid in x direction self.jsize = int (token[2]) # size of native grid in y direction self.proj = (token[3]) # type of projection self.ipole = int (token[4]) # i-coord of pole ref to ll corner self.jpole = int (token[5]) # j-coord of pole ref to ll corner self.lonref = float(token[6]) # reference longitude self.gridinc = float(token[7]) # distance between gripoints in km elif 'lccr' in lineLower or 'lcc' in lineLower: token = lineLower.split() if len(token) != 13: raise Exception('not enough tokens for PDEF, ' + 'expected 13 but found ' +
<reponame>hmarko75/netapp-dataops-toolkit """NetApp DataOps Toolkit for Traditional Environments import module. This module provides the public functions available to be imported directly by applications using the import method of utilizing the toolkit. """ import base64 import functools import json import os import re import subprocess import sys import time import warnings import datetime from concurrent.futures import ThreadPoolExecutor import boto3 from botocore.client import Config as BotoConfig from netapp_ontap import config as netappConfig from netapp_ontap.error import NetAppRestError from netapp_ontap.host_connection import HostConnection as NetAppHostConnection from netapp_ontap.resources import Flexcache as NetAppFlexCache from netapp_ontap.resources import SnapmirrorRelationship as NetAppSnapmirrorRelationship from netapp_ontap.resources import SnapmirrorTransfer as NetAppSnapmirrorTransfer from netapp_ontap.resources import Snapshot as NetAppSnapshot from netapp_ontap.resources import Volume as NetAppVolume from netapp_ontap.resources import ExportPolicy as NetAppExportPolicy from netapp_ontap.resources import SnapshotPolicy as NetAppSnapshotPolicy from netapp_ontap.resources import CLI as NetAppCLI import pandas as pd import requests from tabulate import tabulate import yaml __version__ = "2.1.0" # Using this decorator in lieu of using a dependency to manage deprecation def deprecated(func): @functools.wraps(func) def warned_func(*args, **kwargs): warnings.warn("Function {} is deprecated.".format(func.__name__), category=DeprecationWarning, stacklevel=2) return func(*args, **kwargs) return warned_func class CloudSyncSyncOperationError(Exception) : """Error that will be raised when a Cloud Sync sync operation fails""" pass class ConnectionTypeError(Exception): """Error that will be raised when an invalid connection type is given""" pass class InvalidConfigError(Exception): """Error that will be raised when the config file is invalid or missing""" pass class InvalidSnapMirrorParameterError(Exception) : """Error that will be raised when an invalid SnapMirror parameter is given""" pass class InvalidSnapshotParameterError(Exception): """Error that will be raised when an invalid snapshot parameter is given""" pass class InvalidVolumeParameterError(Exception): """Error that will be raised when an invalid volume parameter is given""" pass class MountOperationError(Exception): """Error that will be raised when a mount operation fails""" pass class SnapMirrorSyncOperationError(Exception) : """Error that will be raised when a SnapMirror sync operation fails""" pass class APIConnectionError(Exception) : '''Error that will be raised when an API connection cannot be established''' pass def _print_api_response(response: requests.Response): print("API Response:") print("Status Code: ", response.status_code) print("Header: ", response.headers) if response.text: print("Body: ", response.text) def _download_from_s3(s3Endpoint: str, s3AccessKeyId: str, s3SecretAccessKey: str, s3VerifySSLCert: bool, s3CACertBundle: str, s3Bucket: str, s3ObjectKey: str, localFile: str, print_output: bool = False): # Instantiate S3 session try: s3 = _instantiate_s3_session(s3Endpoint=s3Endpoint, s3AccessKeyId=s3AccessKeyId, s3SecretAccessKey=s3SecretAccessKey, s3VerifySSLCert=s3VerifySSLCert, s3CACertBundle=s3CACertBundle, print_output=print_output) except Exception as err: if print_output: print("Error: S3 API error: ", err) raise APIConnectionError(err) if print_output: print( "Downloading object '" + s3ObjectKey + "' from bucket '" + s3Bucket + "' and saving as '" + localFile + "'.") # Create directories that don't exist if localFile.find(os.sep) != -1: dirs = localFile.split(os.sep) dirpath = os.sep.join(dirs[:len(dirs) - 1]) if not os.path.exists(dirpath): os.makedirs(dirpath) # Download the file try: s3.Object(s3Bucket, s3ObjectKey).download_file(localFile) except Exception as err: if print_output: print("Error: S3 API error: ", err) raise APIConnectionError(err) def _get_cloud_central_access_token(refreshToken: str, print_output: bool = False) -> str: # Define parameters for API call url = "https://netapp-cloud-account.auth0.com/oauth/token" headers = { "Content-Type": "application/json" } data = { "grant_type": "refresh_token", "refresh_token": refreshToken, "client_id": "Mu0V1ywgYteI6w1MbD15fKfVIUrNXGWC" } # Call API to optain access token response = requests.post(url=url, headers=headers, data=json.dumps(data)) # Parse response to retrieve access token try: responseBody = json.loads(response.text) accessToken = responseBody["access_token"] except: errorMessage = "Error obtaining access token from Cloud Sync API" if print_output: print("Error:", errorMessage) _print_api_response(response) raise APIConnectionError(errorMessage, response) return accessToken def _get_cloud_sync_access_parameters(refreshToken: str, print_output: bool = False) -> (str, str): try: accessToken = _get_cloud_central_access_token(refreshToken=refreshToken, print_output=print_output) except APIConnectionError: raise # Define parameters for API call url = "https://cloudsync.netapp.com/api/accounts" headers = { "Content-Type": "application/json", "Authorization": "Bearer " + accessToken } # Call API to obtain account ID response = requests.get(url=url, headers=headers) # Parse response to retrieve account ID try: responseBody = json.loads(response.text) accountId = responseBody[0]["accountId"] except: errorMessage = "Error obtaining account ID from Cloud Sync API" if print_output: print("Error:", errorMessage) _print_api_response(response) raise APIConnectionError(errorMessage, response) # Return access token and account ID return accessToken, accountId def _instantiate_connection(config: dict, connectionType: str = "ONTAP", print_output: bool = False): if connectionType == "ONTAP": ## Connection details for ONTAP cluster try: ontapClusterMgmtHostname = config["hostname"] ontapClusterAdminUsername = config["username"] ontapClusterAdminPasswordBase64 = config["password"] verifySSLCert = config["verifySSLCert"] except: if print_output: _print_invalid_config_error() raise InvalidConfigError() # Decode base64-encoded password ontapClusterAdminPasswordBase64Bytes = ontapClusterAdminPasswordBase64.encode("ascii") ontapClusterAdminPasswordBytes = base64.b64decode(ontapClusterAdminPasswordBase64Bytes) ontapClusterAdminPassword = <PASSWORD>.decode("ascii") # Instantiate connection to ONTAP cluster netappConfig.CONNECTION = NetAppHostConnection( host=ontapClusterMgmtHostname, username=ontapClusterAdminUsername, password=<PASSWORD>, verify=verifySSLCert ) else: raise ConnectionTypeError() def _instantiate_s3_session(s3Endpoint: str, s3AccessKeyId: str, s3SecretAccessKey: str, s3VerifySSLCert: bool, s3CACertBundle: str, print_output: bool = False): # Instantiate session session = boto3.session.Session(aws_access_key_id=s3AccessKeyId, aws_secret_access_key=s3SecretAccessKey) config = BotoConfig(signature_version='s3v4') if s3VerifySSLCert: if s3CACertBundle: s3 = session.resource(service_name='s3', endpoint_url=s3Endpoint, verify=s3CACertBundle, config=config) else: s3 = session.resource(service_name='s3', endpoint_url=s3Endpoint, config=config) else: s3 = session.resource(service_name='s3', endpoint_url=s3Endpoint, verify=False, config=config) return s3 def _print_invalid_config_error() : print("Error: Missing or invalid config file. Run `netapp_dataops_cli.py config` to create config file.") def _retrieve_config(configDirPath: str = "~/.netapp_dataops", configFilename: str = "config.json", print_output: bool = False) -> dict: configDirPath = os.path.expanduser(configDirPath) configFilePath = os.path.join(configDirPath, configFilename) try: with open(configFilePath, 'r') as configFile: # Read connection details from config file; read into dict config = json.load(configFile) except: if print_output: _print_invalid_config_error() raise InvalidConfigError() return config def _retrieve_cloud_central_refresh_token(print_output: bool = False) -> str: # Retrieve refresh token from config file try: config = _retrieve_config(print_output=print_output) except InvalidConfigError: raise try: refreshTokenBase64 = config["cloudCentralRefreshToken"] except: if print_output: _print_invalid_config_error() raise InvalidConfigError() # Decode base64-encoded refresh token refreshTokenBase64Bytes = refreshTokenBase64.encode("ascii") refreshTokenBytes = base64.b64decode(refreshTokenBase64Bytes) refreshToken = refreshTokenBytes.decode("ascii") return refreshToken def _retrieve_s3_access_details(print_output: bool = False) -> (str, str, str, bool, str): # Retrieve refresh token from config file try: config = _retrieve_config(print_output=print_output) except InvalidConfigError: raise try: s3Endpoint = config["s3Endpoint"] s3AccessKeyId = config["s3AccessKeyId"] s3SecretAccessKeyBase64 = config["s3SecretAccessKey"] s3VerifySSLCert = config["s3VerifySSLCert"] s3CACertBundle = config["s3CACertBundle"] except: if print_output: _print_invalid_config_error() raise InvalidConfigError() # Decode base64-encoded refresh token s3SecretAccessKeyBase64Bytes = s3SecretAccessKeyBase64.encode("ascii") s3SecretAccessKeyBytes = base64.b64decode(s3SecretAccessKeyBase64Bytes) s3SecretAccessKey = s3SecretAccessKeyBytes.decode("ascii") return s3Endpoint, s3AccessKeyId, s3SecretAccessKey, s3VerifySSLCert, s3CACertBundle def _upload_to_s3(s3Endpoint: str, s3AccessKeyId: str, s3SecretAccessKey: str, s3VerifySSLCert: bool, s3CACertBundle: str, s3Bucket: str, localFile: str, s3ObjectKey: str, s3ExtraArgs: str = None, print_output: bool = False): # Instantiate S3 session try: s3 = _instantiate_s3_session(s3Endpoint=s3Endpoint, s3AccessKeyId=s3AccessKeyId, s3SecretAccessKey=s3SecretAccessKey, s3VerifySSLCert=s3VerifySSLCert, s3CACertBundle=s3CACertBundle, print_output=print_output) except Exception as err: if print_output: print("Error: S3 API error: ", err) raise APIConnectionError(err) # Upload file if print_output: print("Uploading file '" + localFile + "' to bucket '" + s3Bucket + "' and applying key '" + s3ObjectKey + "'.") try: if s3ExtraArgs: s3.Object(s3Bucket, s3ObjectKey).upload_file(localFile, ExtraArgs=json.loads(s3ExtraArgs)) else: s3.Object(s3Bucket, s3ObjectKey).upload_file(localFile) except Exception as err: if print_output: print("Error: S3 API error: ", err) raise APIConnectionError(err) def _convert_bytes_to_pretty_size(size_in_bytes: str, num_decimal_points: int = 2) -> str : # Convert size in bytes to "pretty" size (size in KB, MB, GB, or TB) prettySize = float(size_in_bytes) / 1024 if prettySize >= 1024: prettySize = float(prettySize) / 1024 if prettySize >= 1024: prettySize = float(prettySize) / 1024 if prettySize >= 1024: prettySize = float(prettySize) / 1024 prettySize = round(prettySize, 2) prettySize = str(prettySize) + "TB" else: prettySize = round(prettySize, 2) prettySize = str(prettySize) + "GB" else: prettySize = round(prettySize, 2) prettySize = str(prettySize) + "MB" else: prettySize = round(prettySize, 2) prettySize = str(prettySize) + "KB" return prettySize # # Public importable functions specific to the traditional package # def clone_volume(new_volume_name: str, source_volume_name: str, cluster_name: str = None, source_snapshot_name: str = None, source_svm: str = None, target_svm: str = None, export_hosts: str = None, export_policy: str = None, split: bool = False, unix_uid: str = None, unix_gid: str = None, mountpoint: str = None, junction: str= None, readonly: bool = False, snapshot_policy: str = None, refresh: bool = False, svm_dr_unprotect: bool = False, print_output: bool = False): # Retrieve config details from config file try: config = _retrieve_config(print_output=print_output) except InvalidConfigError: raise try: connectionType = config["connectionType"] except: if print_output: _print_invalid_config_error() raise InvalidConfigError() if cluster_name: config["hostname"] = cluster_name if connectionType == "ONTAP": # Instantiate connection to ONTAP cluster try: _instantiate_connection(config=config, connectionType=connectionType, print_output=print_output) except InvalidConfigError: raise # Retrieve values from config file if not passed into function try: sourcesvm = config["svm"] if source_svm: sourcesvm = source_svm targetsvm = sourcesvm if target_svm: targetsvm = target_svm if not unix_uid: unix_uid = config["defaultUnixUID"] if not unix_gid: unix_gid = config["defaultUnixGID"] except Exception as e: if print_output: print(e) _print_invalid_config_error() raise InvalidConfigError() # Check unix uid for validity try: unix_uid = int(unix_uid) except: if print_output: print("Error: Invalid unix uid specified. Value be an integer. Example: '0' for root user.") raise InvalidVolumeParameterError("unixUID") # Check unix gid for validity try: unix_gid = int(unix_gid) except: if print_output: print("Error:
<gh_stars>1-10 #!/usr/bin/env python #coding: utf-8 # === Back-end server for Pline web application === # http://plineapp.org/pline # Compatible with Python 2.7+ and Python 3+ # <NAME> (andres.veidenberg[at]helsinki.fi), University of Helsinki, 2019 # Distributed under the MIT license [https://opensource.org/licenses/MIT] #import some standard libraries import argparse import cgi try: #if python 3 import configparser except ImportError: #rename for python 2 import ConfigParser as configparser from glob import glob import json import logging import logging.handlers import multiprocessing import os try: #python 3 import queue except ImportError: #python 2 import Queue as queue import re import resource import shlex import shutil import smtplib import socket from subprocess import Popen, PIPE import sys import tempfile import threading import time try: #python 3 from urllib.request import urlopen from urllib.parse import unquote from urllib.error import URLError except ImportError: #python 2 from urllib import unquote, urlopen from urllib2 import URLError import webbrowser try: #python 3 from http.server import BaseHTTPRequestHandler, HTTPServer from socketserver import ThreadingMixIn except ImportError: #python 2 from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer from SocketServer import ThreadingMixIn #Define globals serverpath = os.path.realpath(__file__) plinedir = os.path.dirname(serverpath) os.chdir(plinedir) #configuration file parser def getconf(opt='', vtype=''): section = 'server_settings' try: if(vtype == 'int'): return config.getint(section, opt) elif(vtype == 'bool'): return config.getboolean(section, opt) else: val = config.get(section, opt) return '' if(vtype == 'file' and '.' not in val) else val except configparser.Error: return 0 if(vtype == 'int') else '' #Set globals from config file config = configparser.ConfigParser() config.read('server_settings.cfg') datadir = os.path.join(plinedir, (getconf('datadir') or 'analyses')) if not os.path.exists(datadir): os.makedirs(datadir, 0o775) tempdir = os.path.join(plinedir, 'downloads') #dir for temporary zip files if not os.path.exists(tempdir): os.makedirs(tempdir, 0o775) plugindir = os.path.join(plinedir, (getconf('plugindir') or 'plugins')) if not os.path.exists(plugindir): os.makedirs(plugindir, 0o775) serverport = getconf('serverport', 'int') or 8000 num_workers = getconf('workerthreads', 'int') or multiprocessing.cpu_count() timelimit = getconf('timelimit', 'int') datalimit = getconf('datalimit', 'int') filelimit = getconf('filelimit', 'int') logtofile = getconf('logtofile','bool') debug = getconf('debug','bool') local = getconf('local','bool') gmail = getconf('gmail') openbrowser = getconf('openbrowser', 'bool') hostname = getconf('hostname') or '' dataids = getconf('dataids', 'bool') dataexpire = getconf('dataexpire', 'int') expiremsg = getconf('expiremsg', 'bool') prev_cleanup = '' #last datafiles cleanup date job_queue = None #queue for running programs #set up logging class TimedFileHandler(logging.handlers.TimedRotatingFileHandler): def _open(self): oldmask = os.umask(0o002) #make the logfile group-writable fstream = logging.handlers.TimedRotatingFileHandler._open(self) os.umask(oldmask) #restore default mask return fstream def start_logging(): if logtofile: loghandler = TimedFileHandler('server.log', when='d', interval=1, backupCount=1) else: loghandler = logging.StreamHandler() loglevel = logging.DEBUG if(debug) else logging.INFO loghandler.setLevel(loglevel) loghandler.setFormatter(logging.Formatter('%(asctime)s - %(message)s', '%d.%m.%y %H:%M:%S')) logging.getLogger().setLevel(loglevel) logging.getLogger().addHandler(loghandler) def info(msg): logging.info(msg) ### Utility functions ### #check if a filepath is confied to the served directory def apath(path, d=datadir): path = os.path.realpath(path) testdir = os.path.realpath(d) if(d is 'skip' or path.startswith(testdir)): return path else: raise IOError('Restricted path: '+path) #join paths with confinment check def joinp(*args, **kwargs): confinedir = kwargs.pop('d', datadir) return apath(os.path.join(*args), d=confinedir) #write data to file def write_file(filepath, filedata='', checkdata=False): if(checkdata and not filedata): return False else: f = open(filepath, 'wb') f.write(filedata) f.close() return os.path.basename(f.name) #send a notification email def sendmail(subj='Email from Pline', msg='', to=''): if not gmail: return 'Failed: sendmail(): no gmail user' if not msg or not to: return 'Failed: sendmail(): message or address missing' global hostname #add footer to the message msg += '\r\n\r\n-------------------------------------------------\r\n' msg += 'Message sent by Pline (http://wasabiapp.org/pline) from '+hostname+'\r\n\r\n' #send the message guser, gpass = gmail.split(':') if '@' not in guser: guser += '@gmail.com' if guser and gpass and '@' in to: try: gserver = smtplib.SMTP('smtp.gmail.com:587') gserver.ehlo() gserver.starttls() gserver.login(guser,gpass) mailstr = '\r\n'.join(['From: '+guser, 'To: '+to, 'Subject: '+subj, '', msg]) gserver.sendmail(guser, [to], mailstr) gserver.quit() return 'Sent' except: return 'Failed: sendmail()' else: return 'Failed: sendmail(): faulty user or address' #send email with job data download link def send_job_done(jobid): if not jobid: return md = Metadata(jobid).metadata if 'email' not in md: return msg = '''This is just a notification that your pipeline ({name}) has finished. You can download the results from http://{host}?data={jobid} '''.format(name=md.name, host=hostname, jobid=jobid) sendmail('Pline pipeline finished', msg, md.email) #remove obsolete data files def cleanup(): global prev_cleanup #previous cleanup date def oversized(): #check datadir size if datalimit and getsize(datadir) > datalimit*1000: return True return False osize = oversized() today = time.strftime("%d%m%y") if osize or prev_cleanup is not today: #throttle: 1 cleanup/day try: for filename in os.listdir(tempdir): #remove temp. download files filepath = os.path.join(tempdir, filename) if os.path.isfile(filename): os.remove(filename) if dataexpire or osize: #remove overflow/expired task files for dirname in os.listdir(datadir): dirpath = os.path.join(datadir, dirname) metafile = os.path.join(dirpath, Metadata.FILE) if(not os.path.isdir(dirpath) or not os.path.isfile(metafile)): continue md = Metadata(metafile) if(md['keepData']): continue #keep flagged data dirs edittime = os.path.getmtime(metafile) dirage = (time.time()-edittime)//86400 #file age in days if(dirage > dataexpire or oversized()): #remove obsolete datadir job_queue.terminate(dirname) #might include a queued/running task dircount = len(sum([trio[1] for trio in os.walk(dirpath)],[])) #nr of subdirs shutil.rmtree(apath(dirpath, datadir)) info('Cleanup: removed data dir %s (%s analyses from %s days ago)' % (dirname, dircount, int(dirage))) elif(dirage == dataexpire-1 and expiremsg and gmail and md['email']): #send a reminder email msg = 'The result files from your program run is about to exire in 24h.\r\n' msg += 'You can download the files before the expiry date from: %s/%s' % (hostname, dirname) sendmail('Data expiry reminder', msg, md['email']) except (OSError, IOError) as why: logging.error('Data cleanup failed: '+str(why)) prev_cleanup = today #init dir for new program run def create_job_dir(name='analysis', d=datadir): dirpath = os.path.join(d, name) if(d == datadir and dataids): #use randomized root dirname dirpath = tempfile.mkdtemp(prefix='', dir=d) os.chmod(dirpath, 0o775) dirpath = os.path.join(dirpath, name) else: i = 2 inputpath = dirpath while(os.path.isdir(dirpath)): #unique dirname check dirpath = inputpath + str(i) i += 1 os.mkdir(dirpath) os.chmod(dirpath, 0o775) md = Metadata.create(dirpath) return dirpath #get filesize of a file/dirpath def getsize(start_path = datadir): total_size = 0 for dirpath, dirnames, filenames in os.walk(start_path): for f in filenames: fp = os.path.join(dirpath, f) try: total_size += os.path.getsize(fp) except OSError: pass return total_size #handle client browser => Pline server requests class plineServer(BaseHTTPRequestHandler): #disable console printout of server events def log_message(self, format, *args): return #send error response (and log error details) def sendError(self, errno=404, msg='', action='', skiplog=False): if(not skiplog): logging.error('Error: request "'+action+'" => '+str(errno)+': '+msg) if(debug and action!='GET'): logging.exception('Error details: ') if(msg[0]!='{'): msg = '{"error":"'+msg+'"}' #add json padding self.send_error(errno, msg) #send OK response (status 200) def sendOK(self, msg='', size=0): self.send_response(200) if size: self.send_header("Content-Type", "text/octet-stream") self.send_header("Content-Length", str(size)) self.send_header("Cache-Control", "no-cache") else: self.send_header("Content-Type", "text/plain") self.end_headers() if msg: try: self.wfile.write(msg) except TypeError: #Python3: unicode => bytestr self.wfile.write(msg.encode()) #serve files (GET requests) def do_GET(self): path = unquote(self.path) params = {} filename = '' filecontent = '' rootdir = plinedir logfile = False if path.startswith('/'): path = path[1:] path = path.replace('../','') #parse GET params if('?' in path): url = path.split('?') path = url[0] try: params = dict([ (x.split('=') if '=' in x else (x,'')) for x in url[1].split('&') ]) except ValueError: #faulty params format pass logging.debug("GET: %s" % (str(params))) #POST request mirrors postreq = ['checkserver', 'status', 'plugins', 'terminate', 'restart'] for req in postreq: if req in params: getattr(self, "post_"+req)(params[req]) return #split path to dirpath and filename def splitpath(p=path): fname = '' if('/' in p): parr = p.split('/') fname = parr.pop() if('.' not in fname): parr.append(fname) fname = '' p = '/'.join(parr) elif('.' in p): fname = p p = '' return (p, fname) try: #send a file if 'data' in params and params['data']: #from the data dir rootdir = datadir (path, filename) = splitpath(params['data']) if not filename: #(job files) direcotry requested: send as a zip archive jobdir = os.path.join(rootdir, path) jobroot = os.path.dirname(jobdir) ziproot = os.path.basename(jobdir) zipfile = os.path.join(tempdir, ziproot) if os.path.isdir(jobdir): filename = os.path.basename( shutil.make_archive(zipfile, 'zip', jobroot, ziproot) ) rootdir = tempdir path = '' else: raise IOError('Datadir not found: '+params['data']) elif 'plugin' in params and params['plugin']: #from the plugins dir rootdir = plugindir (path, filename) = splitpath(params['plugin']) if(not filename): filename = 'plugin.json' else: #from the server dir (path, filename) = splitpath() if(not filename): filename = 'index.html' #set file content-type ctype = 'application/octet-stream' def ftype(f=filename): ctypes = { 'text/css': ['css'], 'text/javascript': ['js'], 'text/html': ['htm', 'html'], 'application/json': ['json'], 'image/jpg': ['jpg', 'jpeg'], 'image/gif': ['gif'], 'image/png': ['png'] } for t in ctypes: for ext in ctypes[t]: if filename.endswith('.'+ext): return t return 'application/octet-stream' if 'type' in params and params['type']: ctype = params['type'] elif filename: #use file extension ctype = ftype(filename) if 'text' in ctype: ctype += '; charset=utf-8' #resolve filepath (with confinment check) fpath = joinp(rootdir, path, filename, d=rootdir) #send headers self.send_response(200) self.send_header("Content-Type", ctype) self.send_header("Content-Length", os.path.getsize(fpath)) if('image' in ctype): self.send_header("Cache-Control", "max-age=300000") if(rootdir is not plinedir): #send as file download self.send_header("Content-Disposition", "attachment; filename="+filename) self.end_headers() #read &
""" A program to generate a set of mapping tiles from GMT. Usage: make_gmt_tiles [-t] [-s <size>] [-v] <tile_dir> <stop_level> [<start_level>] where <size> is the tile width/height in pixels (default 256) <tile_dir> is the tile directory to create <stop_level> is the maximum level number to create <start_level> is the (optional) level to start at The -t option forces the use of topo data in the tiles. The -v option makes the process verbose. This program attempts to use more than one core, if available. You *must* have installed the GMT package (and data files) [http://gmt.soest.hawaii.edu/] as well as the GEBCO data file if you want oceanfloor topo [http://www.gebco.net/]. Note: this requires python 3.x. """ import sys import os import getopt import tempfile import shutil import pickle import multiprocessing import queue import traceback import datetime from PIL import Image import numofcpus # number of worker processes NumberOfWorkers = numofcpus.determineNumberOfCPUs() # where the GEBCO elevation file lives GEBCOElevationFile = '/home/r-w/GEBCO/gridone.nc' # default tile size, pixels DefaultTileSize = 256 # name of info file for each tileset TileInfoFilename = 'tile.info' # name of the 'missing tile' picture file MissingTilePic = 'missing_tile.png' # various colours and widths (empty string means default) PoliticalBorderColour = '255/0/0' PoliticalBorderWidth = '' WaterColourTuple = (254,254,255) WaterColour = '%s/%s/%s' % WaterColourTuple LandColourTuple = (253,233,174) LandColour = '%s/%s/%s' % LandColourTuple # dictionary mapping level to detail Level2Detail = {0: 'l', # low 1: 'l', 2: 'l', 3: 'i', # intermediate 4: 'i', 5: 'i', 6: 'h', # high 7: 'h', 8: 'h', 9: 'f', # full } class Worker(multiprocessing.Process): def __init__(self, work_queue, w_num, tmp_dir): # base class initialization multiprocessing.Process.__init__(self) # job management stuff self.work_queue = work_queue self.w_num = w_num self.tmp_dir = os.path.join(tmp_dir, '%02d' % w_num) self.kill_received = False # set up logging self.logfile = 'worker_%02d.log' % w_num self.logf = open(self.logfile, 'w') # our own handler for uncaught exceptions def excepthook(type, value, tb): msg = '\n' + '=' * 80 msg += '\nUncaught exception:\n' msg += ''.join(traceback.format_exception(type, value, tb)) msg += '=' * 80 + '\n' self.log(msg) sys.exit(1) # plug our handler into the python system self.save_excepthook = sys.excepthook sys.excepthook = excepthook self.log('Started, UseTopo=%s' % str(UseTopo)) def log(self, msg): # get time to = datetime.datetime.now() hr = to.hour min = to.minute sec = to.second msec = to.microsecond msg = ('%02d:%02d:%02d.%06d|%s\n' % (hr, min, sec, msec, msg)) self.logf.write('%s\n' % msg) self.logf.flush() def run(self): self.log('%d starting\n' % self.w_num) while not self.kill_received: # get a task try: (tile_file, tile_size, d_opt, r_opt) = self.work_queue.get(timeout=1) except queue.Empty as e: self.log('Empty queue: %s' % str(e)) break # the actual processing - pathnames for temp files ps_file = os.path.join(self.tmp_dir, 'tile.ps') png_file = os.path.join(self.tmp_dir, 'tile.png') # draw the coastline tiles if UseTopo: cmd = ('gmt grdimage %s %s -JX17.5d -fig -P -C%s -I%s -K > %s' % (GEBCOElevationFile, r_opt, CptFile, GridFile, ps_file)) self.do_cmd(cmd) cmd = ('gmt pscoast -P %s -JX17.5d %s ' '-N1/%s,%s -N3/%s,%s -W0.5 -S%s -G%s -O >> %s' % (r_opt, d_opt, PoliticalBorderWidth, PoliticalBorderColour, PoliticalBorderWidth, PoliticalBorderColour, WaterColour, LandColour, ps_file)) self.do_cmd(cmd) else: cmd = ('gmt pscoast -P %s -JX17.5d %s ' '-N1/%s,%s -N3/%s,%s -W0.5 -S%s -G%s > %s' % (r_opt, d_opt, PoliticalBorderWidth, PoliticalBorderColour, PoliticalBorderWidth, PoliticalBorderColour, WaterColour, LandColour, ps_file)) self.do_cmd(cmd) cmd = 'gmt psconvert %s -A -Tg' % ps_file self.do_cmd(cmd) # cmd = ('gmt convert -quality 100 -resize %dx%d! %s %s' # % (tile_size, tile_size, png_file, tile_file)) cmd = ('gmt convert %s %s' % (png_file, tile_file)) self.do_cmd(cmd) self.log('stopping') def do_cmd(self, cmd): """Execute a command. cmd the command string to execute """ self.log(cmd) if Verbose: print(cmd) sys.stdout.flush() sys.stderr.flush() res = os.system(cmd) if res: self.log('Error doing above command: res=%d' % res) def do_cmd(cmd): if Verbose: print(cmd) sys.stdout.flush() sys.stderr.flush() res = os.system(cmd) def make_gmt_tiles(tile_dir, min_level, max_level, tile_size): """Make a set of mapping tiles. tile_dir the directory for output tilesets min_level minimum tileset level number to create max_level maximum tileset level number to create tile_size size of tiles (width & height) in pixels """ # generate the topo grid file, if required if UseTopo: global GridFile, CptFile CptFile = './bath.cpt' cmd = 'gmt makecpt -Cglobe > %s' % CptFile do_cmd(cmd) print(cmd) GridFile = './IO_int.grd' cmd = 'gmt grdgradient %s -A0 -Nt -G%s' % (GEBCOElevationFile, GridFile) do_cmd(cmd) print(cmd) # prepare queue for workers work_queue = multiprocessing.Queue() # create a temporary working directory tmp_dir = tempfile.mkdtemp(prefix='make_gmt_tiles_') for i in range(NumberOfWorkers): os.mkdir(os.path.join(tmp_dir, '%02d' % i)) # define the extent of the world we are mapping # this is the whole world, with the break through South America # so we have the South Sandwich Islands and points east in one piece # (W, E, S, N) extent = (-65.0, 295.0, -66.66, 66.66) # delete the output directory if it exists before recreating #shutil.rmtree(tile_dir, ignore_errors=True) try: os.mkdir(tile_dir) except OSError: pass # ignore error if directory already exists # create top-level info file - contains extent info_file = os.path.join(tile_dir, TileInfoFilename) fd = open(info_file, 'wb') obj = (extent, (DefaultTileSize, DefaultTileSize), WaterColourTuple, LandColourTuple) pickle.dump(obj, fd) fd.close() # generate each required tileset level for level in range(min_level, max_level+1): make_tileset(work_queue, tmp_dir, tile_dir, extent, level, tile_size) # start the workers and wait until all finished workers = [] for i in range(NumberOfWorkers): worker = Worker(work_queue, i, tmp_dir) worker.start() workers.append(worker) for worker in workers: worker.join() # destroy the temporary working directory shutil.rmtree(tmp_dir, ignore_errors=True) def make_tileset(q, tmp_dir, tile_dir, extent, level, tile_size): """Make one tileset directory. q work queue tmp_dir temporary scratch directory tile_dir path to the base of the tileset directories extent global map extent (w, e, s, n) level the level of the tileset to generate tile_size size (width & height) of each tile in set """ # unpack the extent (w, e, s, n) = extent # get deltas for lon and lat d_lon = (e - w) / pow(2, level) / 2 d_lat = (n - s) / pow(2, level) # figure out pixels/degree (for info file) ppd_x = tile_size / d_lon ppd_y = tile_size / d_lat # this should give us number of steps in X and Y directions num_tiles_x = int((e - w) / d_lon) num_tiles_y = int((n - s) / d_lat) # create the actual tileset directory tile_dir = os.path.join(tile_dir, '%02d' % level) try: os.mkdir(tile_dir) except OSError: pass # ignore error if directory already exists # calculate the detail appropriate for the level d_opt = '-D%s' % Level2Detail.get(level, 'f') w_num = 0 # step through each tile for x in range(num_tiles_x): for y in range(num_tiles_y): # get a worker number w_num += 1 if w_num > NumberOfWorkers: w_num = 0 # get output tile filename tile_file = os.path.join(tile_dir, 'tile_%d_%d.png' % (x, y)) # figure out -R bits r_w = w + x * d_lon r_e = r_w + d_lon r_n = n - y * d_lat r_s = r_n - d_lat r_opt = '-R%f/%f/%f/%f' % (r_w, r_e, r_s, r_n) # prepare data on queue q.put((tile_file, tile_size, d_opt, r_opt)) # now create a tileset info file info_file = os.path.join(tile_dir, TileInfoFilename) obj = (num_tiles_x, num_tiles_y, ppd_x, ppd_y) with open(info_file, 'wb') as fd: pickle.dump(obj, fd) ################################################################################ # Program start ################################################################################ def usage(msg=None): if msg: print(msg+'\n') print(__doc__) # module docstring used def main(argv=None): global Verbose global UseTopo Verbose = False UseTopo = False # parse the command line parameters try: opts, args = getopt.getopt(sys.argv[1:], 'hs:tv', ['help', 'size=', 'topo', 'verbose']) except getopt.error as msg: usage() return 1 # get all the options tile_size = DefaultTileSize Verbose = False for (opt, param) in opts: if opt in ['-s', '--size']: try: tile_size = int(param) except ValueError: usage('Tile size must be an integer > 0') return 1 if tile_size < 1: usage('Tile size must be an integer > 0') return 1 elif opt in ['-t', '--topo']: UseTopo = True elif opt in ['-v', '--verbose']: Verbose = True elif opt in ['-h', '--help']: usage() return 0 # check we have required params if len(args) != 2 and len(args) != 3: usage() return 1 tile_dir = args[0] min_level = 0 try: max_level = int(args[1]) except ValueError: usage('Stop level must be a positive integer') return 1 if max_level < 0: usage('Stop level must be a positive integer') return 1 if len(args) == 3: try: min_level = int(args[2]) except ValueError: usage('Start level must
<filename>client.py<gh_stars>0 #!/usr/bin/env python from dotenv import load_dotenv from requests.auth import HTTPBasicAuth import argparse import datetime import errno import requests import signal import sys, json, os import time import yaml import re #import pdb load_dotenv() # Remove proxy if set (as it might block or send unwanted requests to the proxy) if "http_proxy" in os.environ: del os.environ['http_proxy'] if "https_proxy" in os.environ: del os.environ['https_proxy'] # number normalization def number(x): rnd_x = round(x, 0) if abs(x - rnd_x) < 0.000001: return int(rnd_x) else: return x # Client is a partial implementation of the Rancher API class RancherClient: """ """ # valid mem units: E, P, T, G, M, K, Ei, Pi, Ti, Gi, Mi, Ki # nb: 'm' suffix found after setting 0.7Gi MUMAP = {"E":-3, "P":-2, "T":-1, "G":0, "M":1, "K":2, "m":3} # Init can be passed in the API info # but if you have the same values in your config, that will override them def __init__(self, config=None): self.config = config self.headers = { 'Content-Type': 'application/json' } self.name_mappings = {} # Cache for human name to rancher id. eg. front = 1s5 def g_to_unit(self, size, convert_to): ''' Converts a size value in Gi to another size :param size: the size in Gi of the value to be converted :param convert_to: the units to convert to :returns: the converted size or the original value if unsupported. ''' for units , power in self.MUMAP.items(): if convert_to.startswith(units): size = ( float(size) * 1024 ** power ) break return number(size) def unit_to_g(self, val, convert_from): ''' Converts a size value from arbitrary unit unto Gi :param val: the value to be converted to Gi :param convert_from: the units to convert from :returns: the converted value in Gi units, or the original value if unsupported. ''' for units, power in self.MUMAP.items(): if convert_from.startswith(units): val = ( float(val) / 1024 ** power ) break return number(val) def names_to_ids(self, response): """ Pulls all names and their related ids into a returned hash :param response: :returns: hash of name to id mappings """ hash = {} for datum in response['data']: hash[datum['name']] = datum['id'] return hash def name_to_id(self, name, type, function): """ Given a name looks up the id of that name :param name: the name to look up :param type: the type of object we are looking up (project/stack/service) :param function: an api function to call which will return the list of objects :returns: the id requested or the originally requested name (so we support ids as well) """ if name == None: return None if self.name_mappings.get(type) == None: self.name_mappings[type] = self.names_to_ids(function()) return self.name_mappings[type].get(name, name) def project_id(self, name): """ Converts a project name to its id :param name: the name of the project :returns: the id of the project """ return self.name_to_id(name, 'project', lambda: self.projects()) def service_id(self, name): """ Converts a service name to its id :param name: the name of the service :returns: the id of the service """ return self.name_to_id(name, 'service' + self.config.stack, lambda: self.services()) def stack_id(self, name): """ Converts a stack name to its id :param name: the name of the stack :returns: the id of the stack """ return self.name_to_id(name, 'stack', lambda: self.stacks()) def capabilities(self, service_name=None): """ Returns a service's adjustable parameters :param service_name: (Default value = None) :returns:: the adjustable parametesrs for the provided service """ service = self.config.services_config.get(service_name, {}) if service is None: service = {} merged = { 'settings': self.config.services_defaults, 'environment': service.get('environment', {}) } if merged.get('exclude', None) != None: return None return merged def merge(self, source = {}, destination = {}): """ Python's default dict merge is shallow. This is a recursive deep dictionary merge :param source: The dictionary to merge from :param destination: The dictionary to merge into :returns: a merged hash """ destination = {} if destination is None else destination for key, value in source.items(): if isinstance(value, dict): # get node or create one node = destination.setdefault(key, {}) self.merge(value, node) else: destination[key] = value return destination def scope_uri(self, base, option=None): """ Constructs a URI if given some options. Most URIs will take /foo and /foo/{id} This helper does the right thing if you have an {id} (in the above case) or not :param base: The base uri ('/projects') :param option: an optional object param (a project id) (Default value = None) :returns: a constrcted URI like /projects/1s5 """ if option: base = base + '/' + option return base def projects_uri(self, name=None): """ https://rancher.com/docs/rancher/v1.6/en/api/v2-beta/api-resources/project/ :param name: the requested project name (Default value = None) :param default: if True, will use the configured default project (Default value = False) :returns: the uri to a project """ return self.scope_uri('/projects', self.project_id(name)) # https://rancher.com/docs/rancher/v1.6/en/api/v2-beta/api-resources/project/ def projects(self, name=None): """ :param name: (Default value = None) :returns: the project or all projects """ return self.render(self.projects_uri(name)) def services_uri(self, project_name=None, stack_name=None, name=None): """ :param project_name: (Default value = None) :param name: (Default value = None) :returns: the service or all services """ if project_name == None: project_name = self.config.project if stack_name == None: stack_name = self.config.stack prefix = self.projects_uri(project_name) if name else self.stacks_uri(project_name, stack_name) return self.scope_uri(prefix + '/services', self.service_id(name)) def services(self, project_name=None, stack_name=None, name=None, action=None, body=None): """ Allows for querying and upgrading of services. https://rancher.com/docs/rancher/v1.6/en/api/v2-beta/api-resources/service/ :param project_name: Project the service is in (Default value = None) :param name: The name of the service (Default value = None) :param action: An action to perform on the service (Default value = None) :param body: A new launchConfig hash (Default value = None) :returns: A dict of the API response. """ if self.excluded(name): raise PermissionError('{} is not allowed to be modified due to exclusion rules'.format(name)) uri = self.services_uri(project_name, stack_name, name) if body and action == 'upgrade': strategy = self.prepare_service_upgrade(name, body) service = self.services(name=name) # only try to upgrade if the service is active if service.get('state') == 'active': self.render(uri, action=action, body=strategy) self.wait_for_upgrade(name) # this commits response = self.services(name=name, action='finishupgrade') #print("finished", file=sys.stderr) # now we can scale the service if needed scale_target = self.dig(body, ['settings', 'replicas', 'value']) if service.get('scale') != scale_target: return self.services(project_name=project_name, stack_name=stack_name, name=name, action=None, body={'id': service.get('id'), 'scale': scale_target}) else: return response else: return self.render(uri, action, body=body) def stacks_uri(self, project_name=None, name=None): """ :param project_name: (Default value = None) :param name: (Default value = None) """ if project_name == None: project_name = self.config.project prefix = '' if name else self.projects_uri(project_name) return self.scope_uri(self.projects_uri(project_name) + '/stacks', self.stack_id(name)) # https://rancher.com/docs/rancher/v1.6/en/api/v2-beta/api-resources/stack/ def stacks(self, project_name=None, name=None, action=None, body=None): """ :param project_name: (Default value = None) :param name: (Default value = None) :param action: (Default value = None) :param body: (Default value = None) """ uri = self.stacks_uri(project_name, name) return self.render(uri, action, body) def filter_environment(self, service_name, environment = {}): """ Filters out any environment variables which are not configured in our config.yaml. If a config variable has a 'units' option, then it will convert an integer value from Gb (servo's base unit) into the requested units. :param service_name: The service on which we are working :param environment: The launchConfig environment changes (Defaule value = {}) :returns: An dictionary environment filtred based on our config rules """ allowed_env = self.config.services_config.get(service_name, {}).get('environment', {}) for key in list(environment.keys()): value = allowed_env.get(key, {}) units = value.get('units') if isinstance(value, dict) else None if key not in allowed_env.keys(): del environment[key] elif units: size = self.g_to_unit(environment[key], units) environment[key] = str(size) + units return environment def map_servo_to_rancher(self, service): """ Maps servo keys to keys which rancher understands { "settings": { "vcpu": 1 }, "environment": { "KEY": "VALUE" } } """ rancher = { 'environment': {} } settings = self.dig(service, ['settings']) for setting in settings.keys(): if setting == 'cpu': rancher['cpuQuota'] = self.dig(settings, [setting, 'value']) * 1000 * 100 rancher['cpuPeriod'] = 1000 * 100 elif setting == 'replicas': rancher['scale'] = self.dig(settings, [setting, 'value']) elif setting == 'mem': value = self.dig(settings, [setting, 'value']) rancher['memory'] = value * 1024**3 # convert mem GiB into bytes else: rancher['environment'][setting] = self.dig(settings, [setting, 'value']) return rancher def prepare_service_upgrade(self, service_name, body): """ Builds a request for the service upgrade call. https://rancher.com/docs/rancher/v1.6/en/api/v2-beta/api-resources/service/#upgrade :param service_name: The name of the
optimized by a Graph model is the sum of all loss functions over the different outputs. Inherits from `containers.Graph`. ''' def compile(self, optimizer, loss, sample_weight_modes={}): '''Configure the learning process. # Arguments optimizer: str (name of optimizer) or optimizer object. See [optimizers](optimizers.md). loss: dictionary mapping the name(s) of the output(s) to a loss function (string name of objective function or objective function. See [objectives](objectives.md)). sample_weight_modes: optional dictionary mapping certain output names to a sample weight mode ("temporal" and None are the only supported modes). If you need to do timestep-wise loss weighting on one of your graph outputs, you will need to set the sample weight mode for this output to "temporal". ''' self.sample_weight_modes = sample_weight_modes ys = [] ys_train = [] ys_test = [] weights = [] train_loss = 0. test_loss = 0. for output_name in self.output_order: loss_fn = loss[output_name] output = self.outputs[output_name] y_train = output.get_output(True) y_test = output.get_output(False) y = K.placeholder(ndim=K.ndim(y_train)) ys.append(y) ys_train.append(y_train) ys_test.append(y_test) if hasattr(output, "get_output_mask"): mask = output.get_output_mask() else: mask = None if sample_weight_modes.get(output_name) == 'temporal': weight = K.placeholder(ndim=2) else: weight = K.placeholder(ndim=1) weights.append(weight) weighted_loss = weighted_objective(objectives.get(loss_fn)) train_loss += weighted_loss(y, y_train, weight, mask) test_loss += weighted_loss(y, y_test, weight, mask) ins = [self.inputs[name].input for name in self.input_order] train_ins = ins + ys + weights test_ins = ins + ys + weights for r in self.regularizers: train_loss = r(train_loss) self.optimizer = optimizers.get(optimizer) updates = self.optimizer.get_updates(self.params, self.constraints, train_loss) updates += self.updates self.loss = loss self._train = K.function(train_ins, [train_loss], updates=updates) self._test = K.function(test_ins, [test_loss], updates=self.state_updates) self._predict = K.function(inputs=ins, outputs=ys_test, updates=self.state_updates) def fit(self, data, batch_size=128, nb_epoch=100, verbose=1, callbacks=[], validation_split=0., validation_data=None, shuffle=True, class_weight={}, sample_weight={}): '''Train the model for a fixed number of epochs. Returns a history object. Its `history` attribute is a record of training loss values at successive epochs, as well as validation loss values (if applicable). # Arguments data: dictionary mapping input names and outputs names to appropriate numpy arrays. All arrays should contain the same number of samples. batch_size: int. Number of samples per gradient update. nb_epoch: int. verbose: 0 for no logging to stdout, 1 for progress bar logging, 2 for one log line per epoch. callbacks: `keras.callbacks.Callback` list. List of callbacks to apply during training. See [callbacks](callbacks.md). validation_split: float (0. < x < 1). Fraction of the data to use as held-out validation data. validation_data: dictionary mapping input names and outputs names to appropriate numpy arrays to be used as held-out validation data. All arrays should contain the same number of samples. Will override validation_split. shuffle: boolean. Whether to shuffle the samples at each epoch. class_weight: dictionary mapping output names to class weight dictionaries. sample_weight: dictionary mapping output names to numpy arrays of sample weights. ''' X = [data[name] for name in self.input_order] y = [standardize_y(data[name]) for name in self.output_order] if len(set([len(a) for a in X] + [len(a) for a in y])) != 1: raise Exception('All input arrays and target arrays must have ' 'the same number of samples.') sample_weight_list = [standardize_weights(y[i], sample_weight=sample_weight.get(self.output_order[i]), sample_weight_mode=self.sample_weight_modes.get(self.output_order[i])) for i in range(len(self.output_order))] class_weight_list = [class_weight.get(name) for name in self.output_order] val_f = None val_ins = None if validation_data or validation_split: val_f = self._test if validation_data: # can't use sample weights with validation data at this point y_val = [standardize_y(validation_data[name]) for name in self.output_order] sample_weight = [standardize_weights(y_val[i]) for i in range(len(y_val))] val_ins = [validation_data[name] for name in self.input_order] + [standardize_y(validation_data[name]) for name in self.output_order] + sample_weight elif 0 < validation_split < 1: split_at = int(len(X[0]) * (1 - validation_split)) X, X_val = (slice_X(X, 0, split_at), slice_X(X, split_at)) y, y_val = (slice_X(y, 0, split_at), slice_X(y, split_at)) sample_weight_list, sample_weight_list_val = (slice_X(sample_weight_list, 0, split_at), slice_X(sample_weight_list, split_at)) val_ins = X_val + y_val + sample_weight_list_val f = self._train out_labels = ['loss'] metrics = ['loss', 'val_loss'] sample_weight_list = [standardize_weights(y[i], sample_weight=sample_weight_list[i], class_weight=class_weight_list[i], sample_weight_mode=self.sample_weight_modes.get(self.output_order[i])) for i in range(len(self.output_order))] ins = X + y + sample_weight_list history = self._fit(f, ins, out_labels=out_labels, batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose, callbacks=callbacks, val_f=val_f, val_ins=val_ins, shuffle=shuffle, metrics=metrics) return history def evaluate(self, data, batch_size=128, verbose=0, sample_weight={}): '''Compute the loss on some input data, batch by batch. Arguments: see `fit` method. ''' sample_weight = [standardize_weights(data[name], sample_weight=sample_weight.get(name), sample_weight_mode=self.sample_weight_modes.get(name)) for name in self.output_order] ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight if len(set([len(a) for a in ins])) != 1: raise Exception('All input arrays and target arrays must have ' 'the same number of samples.') outs = self._test_loop(self._test, ins, batch_size, verbose) return outs[0] def predict(self, data, batch_size=128, verbose=0): '''Generate output predictions for the input samples batch by batch. Arguments: see `fit` method. ''' ins = [data[name] for name in self.input_order] if len(set([len(a) for a in ins])) != 1: raise Exception('All input arrays and target arrays must have ' 'the same number of samples.') outs = self._predict_loop(self._predict, ins, batch_size, verbose) return dict(zip(self.output_order, outs)) def train_on_batch(self, data, class_weight={}, sample_weight={}): '''Single gradient update on a batch of samples. Arguments: see `fit` method. ''' sample_weight = [standardize_weights(data[name], sample_weight=sample_weight.get(name), class_weight=class_weight.get(name), sample_weight_mode=self.sample_weight_modes.get(name)) for name in self.output_order] ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight if len(set([len(a) for a in ins])) != 1: raise Exception('All input arrays and target arrays must have ' 'the same number of samples.') return self._train(ins) def test_on_batch(self, data, sample_weight={}): '''Compute the loss on a single batch of samples. Arguments: see `fit` method. ''' sample_weight = [standardize_weights(data[name], sample_weight=sample_weight.get(name), sample_weight_mode=self.sample_weight_modes.get(name)) for name in self.output_order] ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight if len(set([len(a) for a in ins])) != 1: raise Exception('All input arrays and target arrays must have ' 'the same number of samples.') return self._test(ins) def predict_on_batch(self, data): '''Generate predictions for a single batch of samples. ''' ins = [data[name] for name in self.input_order] if len(set([len(a) for a in ins])) != 1: raise Exception('All input arrays and target arrays must have ' 'the same number of samples.') outs = self._predict(ins) return dict(zip(self.output_order, outs)) def save_weights(self, filepath, overwrite=False): '''Save weights from all layers to a HDF5 files. ''' import h5py import os.path # if file exists and should not be overwritten if not overwrite and os.path.isfile(filepath): import sys get_input = input if sys.version_info[:2] <= (2, 7): get_input = raw_input overwrite = get_input('[WARNING] %s already exists - overwrite? ' '[y/n]' % (filepath)) while overwrite not in ['y', 'n']: overwrite = get_input('Enter "y" (overwrite) or "n" (cancel).') if overwrite == 'n': return print('[TIP] Next time specify overwrite=True in save_weights!') f = h5py.File(filepath, 'w') g = f.create_group('graph') weights = self.get_weights() g.attrs['nb_params'] = len(weights) for n, param in enumerate(weights): param_name = 'param_{}'.format(n) param_dset = g.create_dataset(param_name, param.shape, dtype=param.dtype) param_dset[:] = param f.flush() f.close() def load_weights(self, filepath): '''Load weights from a HDF5 file. ''' import h5py f = h5py.File(filepath) g = f['graph'] weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])] self.set_weights(weights) f.close() def fit_generator(self, generator, samples_per_epoch, nb_epoch, verbose=1, callbacks=[], validation_data=None, class_weight={}, nb_worker=1): '''Fit a model on data generated batch-by-batch by a Python generator. The generator is run in parallel to the model, for efficiency, and can be run by multiple workers at the same time. For instance, this allows you to do real-time data augmentation on images on CPU in parallel to training your model on GPU. # Arguments generator: a generator. The output of the generator must be either a dictionary mapping inputs and outputs names to numpy arrays, or a tuple of dictionaries (input_data, sample_weight). All arrays should contain the same number of samples. The generator is expected to loop over its data indefinitely. An epoch finishes when `samples_per_epoch` samples have been seen by the model. samples_per_epoch: integer, number of samples to process before going to the next epoch. nb_epoch: integer, total number of iterations on the data. verbose: verbosity mode, 0, 1, or 2. callbacks: list of callbacks to be called during training. validation_data: dictionary mapping input names and outputs names to appropriate numpy arrays to be used as held-out validation data.
@property def center(self): return Vector.centroid(self.dots) def concat(self): return Vector(x_val=[xx for xx in self.x], y_val=[yy for yy in self.y]) def __str__(self): st = '<Vector>' for ss, vv in [ ['x_val', self.x_val], ['y_val', self.y_val], ['dots', self.dots], ]: st += f'\n\t.{ss}: {vv}' st += '\n' return st def __len__(self): return len(self.x_val) @property def x(self): return [xx for xx in self.x_val] @property def y(self): return [xx for xx in self.y_val] @property def dt(self): return [[xx, yy] for xx, yy in self.dots] # return Vector that sum xs eq sum ys eq 100 def percent(self): return Vector( x_val=[xe / sum(self.x_val) * 100 for xe in self.x_val], y_val=[ye / sum(self.y_val) * 100 for ye in self.y_val], ) def sample(self, n): if n > len(self.x_val): n = len(self.x_val) each = len(self.x_val) / n rdt = [[self.dots[0][0], self.dots[0][1]]] for ii, dd in enumerate(self.dots): if round(each) == ii: rdt.append([dd[0], dd[1]]) each += each return Vector(dots=rdt) # get rid of duplicated dots, return a new Vector def single(self): dots = [] for dt in self.dots: if dt not in dots: dots.append(dt) return Vector(dots=dots) def mono(self, operate='<', x=True, y=True): if x: m1 = Mono(array=self.x, operate=operate, along=self.y).mono self.x_val = m1['array'] self.y_val = m1['along'] if y: m1 = Mono(array=self.y, operate=operate, along=self.x).mono self.y_val = m1['array'] self.x_val = m1['along'] return self def __iter__(self): for dt in self.dots: yield dt def min(self, x=False, y=False): if x: return min(self.x_val) elif y: return min(self.y_val) else: raise ValueError('<Vector> min or max must have either x or y set to True') def max(self, x=False, y=False): if x: return max(self.x_val) elif y: return max(self.y_val) else: raise ValueError('<Vector> min or max must have either x or y set to True') def average(self, x=False, y=False): if x: return sum(self.x_val) / len(self.x_val) elif y: return sum(self.y_val) / len(self.y_val) else: raise ValueError('<Vector> average must have either x or y set to True') class Preset: """ Monotone both x and y values """ # return either > or < @staticmethod def linear_trend_operate(arr): le = 0 ge = 0 for ii in range(len(arr) - 1): if arr[ii] < arr[ii + 1]: le += 1 elif arr[ii] > arr[ii + 1]: ge += 1 if le > ge: return '<' else: return '>' # take vector as arg, return vector # x|y_mono -> bool: decide whether to regulate x|y monotonicity def __init__(self, x_mono, y_mono, x_val=None, y_val=None, dots=None): vector = Vector(x_val=x_val, y_val=y_val, dots=dots) if not x_mono: self.vector = vector else: x_op = Preset.linear_trend_operate(vector.x) mo = Mono(array=x_val, operate=x_op, along=y_val).mono x_nv = mo['array'] y_nv = mo['along'] if y_mono: y_op = Preset.linear_trend_operate(y_nv) mon = Mono(array=y_nv, operate=y_op, along=x_nv).mono y_res = mon['array'] x_res = mon['along'] self.vector = Vector(x_val=x_res, y_val=y_res) else: self.vector = Vector(x_val=x_nv, y_val=y_nv) class Regression(Preset): """ Similarities of all regression: * the line is assumed to always passes the center """ @staticmethod def store(x_val, y_val, dots): vt = Vector(x_val=x_val, y_val=y_val, dots=dots) xs = vt.x ys = vt.y if Expression.DATA is None: if isinstance(Expression.SIZE, int): if Expression.SIZE >= len(vt): Expression.DATA = vt.concat() else: Expression.DATA = Vector( x_val=Brief.extract(array=xs, row=Expression.SIZE, avg=False), y_val=Brief.extract(array=ys, row=Expression.SIZE, avg=False), ) elif isinstance(Expression.DATA, float): if Expression.SIZE < 0 or Expression.SIZE > 1: raise ValueError( '\n'.join([ "Sample Size as a float representing what's the percentage of data take into account", f"It can't be out of domain [0, 1], invalid value {Expression.SIZE},", "call set_sample_size() before regression to change it" ]) ) siz = int(Expression.SIZE * len(vt)) Expression.DATA = Vector( x_val=Brief.extract(array=xs, row=siz, avg=False), y_val=Brief.extract(array=ys, row=siz, avg=False), ) else: raise TypeError( '\n'.join([ 'Sample Size can either be a float which is percentage or an int which is exact number', 'it is used in result matching calculation, smaller number makes quicker calculation', 'Default to 20', ]) ) @staticmethod def initial(vt): if vt: Expression.INITIAL = vt.concat() # rev<True>: from center to edge # rev<False>: from edge to center # x_mono -> false: no regulation # y_mono -> true: y_reg(x_reg(val)) # y_mono -> false: x_reg(val) # avg -> true: take average of data # avg -> false: select from raw data def __init__( self, required, many, centered, avg, x_mono, y_mono, x_val=None, y_val=None, dots=None, rev=False, initial_vector=None, # help inverse exponential ): # store data in Expression class variable for finish comparison usage Regression.initial(initial_vector) Regression.store(x_val=x_val, y_val=y_val, dots=dots) super().__init__( x_mono=x_mono, y_mono=y_mono, x_val=x_val, y_val=y_val, dots=dots, ) self.required = required if centered: req = self.required - 1 else: req = self.required self.slice = Brief( x_val=self.vector.x, y_val=self.vector.y, required=req, many=many, avg=avg, ).slicer(rev) self.centered = centered # params # xy -> [[x1, x2..], [y1, y2..]] # cen -> [x, y] # return # vector -> [[x1, x2, x..], [y1, y2, y..]] | [[x, y], [x1, y1], [x2, y2]..] @staticmethod def join(xy, cen): x_vars = [xx for xx in xy[0]] x_vars.append(cen[0]) y_vars = [yy for yy in xy[1]] y_vars.append(cen[1]) return Vector(x_val=x_vars, y_val=y_vars) # generate vector object of required length # notice dots always contains center def eject(self): if self.centered: for sl in self.slice: yield Regression.join(sl, self.vector.center) else: for sl in self.slice: yield Vector( x_val=[xx for xx in sl[0]], y_val=[yy for yy in sl[1]], ) class Comparison: """ static class as variable collector """ EXPRESSIONS = [] def __init__(self, expression): Comparison.EXPRESSIONS.append(expression) # return the most accurate regressed calculations obj # should be called after regression are done and all expressions are evaluated @staticmethod def compare(): return min(Comparison.EXPRESSIONS) @staticmethod def forget(): Comparison.EXPRESSIONS = [] class Expression: """ formula representing regressed data """ DATA = None INITIAL = None SIZE = 20 SAMPLE_RATE = 10 # evaluating percentage of similarity @staticmethod def discrete(arr): length = float(len(arr)) mean = sum(arr) / length dif = 0 for ar in arr: dif += abs(ar - mean) return dif / length # Expression is inited when we got kwargs as, for example, # {hook:<func>, a:[1, 2], b: [3, 4], pro:1, y_exp:1} # coefficients are solitary alphabet # after separation, use each set of dots to work out y value, # compare it to what it should be from sample data # get an efficiency, the lower, the more matching def __init__(self, **kwargs): self.category = None # e.g. Linear effect = None su = None self.coe = {} # avg: {k: 2, b: 3} self.recorders = {} # all data: {k: [1, 2, 3], b: [2, 3, 4]} self.param_error = {} self.length = 0 self.variables = [] self.hook = kwargs['hook'] del kwargs['hook'] del_keys = [] if Expression.INITIAL: initial_vector = Expression.INITIAL.sample(Expression.SAMPLE_RATE) else: initial_vector = Expression.DATA.sample(Expression.SAMPLE_RATE) for kk in kwargs.keys(): if len(kk) != 1: del_keys.append(kk) self.variables.append(kwargs[kk]) for kk in del_keys: del kwargs[kk] for uk in range(len(list(kwargs.values())[0])): sub = {} for lk in kwargs.keys(): sub[lk] = kwargs[lk][uk] fo = self.inspect(list(sub.values()) + self.variables) eum = 0 for ii, xv in enumerate(initial_vector.x): try: eum += abs(initial_vector.y[ii] - fo(xv)) except ZeroDivisionError: pass evg = eum / Expression.SAMPLE_RATE if effect is None or evg < effect: effect = evg su = sub self.coe = su self.sum_param_error = sum(self.param_error.values()) self.efficiency = effect def set_category(self, category): self.category = category return self def __str__(self): st = '<Expression>' for ss, vv in [ ['category', self.category], ['coe', self.coe], ['recorders', self.recorders], ['param_error', self.param_error], ['length', self.length], ['sum_param_error', self.sum_param_error], ['efficiency', self.efficiency], ]: st += f'\n\t.{ss}: {vv}' st += '\n' return st """ support <sort> <min> <max> """ # == equal to def __eq__(self, other): if isinstance(other, Expression): return self.efficiency == other.efficiency return self.efficiency == other # != not equal to def __ne__(self, other): if isinstance(other, Expression): return self.efficiency != other.efficiency return self.efficiency != other.efficiency # < less than def __lt__(self, other): if isinstance(other, Expression): return self.efficiency < other.efficiency return self.efficiency < other # > greater than def __gt__(self, other): if isinstance(other, Expression): return self.efficiency > other.efficiency return self.efficiency > other # <= less than or equal to def __le__(self, other): if isinstance(other, Expression): return self.efficiency <= other.efficiency return self.efficiency <= other # >= greater than or equal to def __ge__(self, other): if isinstance(other, Expression): return self.efficiency >= other.efficiency return self.efficiency >= other def inspect(self, li): return self.hook(*li) # param sequence of regression hook must correspond
<filename>python/paddle_fl/mpc/data_utils/data_utils.py # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module provide data utilities for PrivC protocol, including data encryption, decryption, share save and loading. """ import abc import six import os import numpy as np import six import paddle import paddle.fluid as fluid import mpc_data_utils as mdu from ..layers import __all__ as all_ops from .op_extra_desc import add_extra_desc # operators that should be skipped when encrypt and decrypt op_to_skip = ['feed', 'fetch', 'scale', 'mpc_init'] # operators that are supported currently for model encryption and decryption supported_mpc_ops = all_ops + ['fill_constant', 'sgd'] + op_to_skip # variables that used as plain variables and need no encryption plain_vars = ['learning_rate_0'] MPC_MODEL_NAME = "__model__.mpc" MODEL_NAME = "__model__" MODEL_SHARE_DIR = "model_share" MPC_OP_PREFIX = "mpc_" @six.add_metaclass(abc.ABCMeta) class DataUtils(object): """ abstract class for data utils. """ def __init__(self): self.SHARE_NUM = None self.PRE_SHAPE = None self.MPC_ONE_SHARE = None def encrypt(self, number): """ Encrypts the plaintext number into secret shares Args: number: float, the number to share Returns: shares of input number """ pass def decrypt(self, shares): """ Reveal plaintext value from raw secret shares Args: shares: shares to reveal from (list) Return: the plaintext number (float) """ pass def make_shares(self, num_array): """ Create raw shares for an array. Args: num_array: the input data array Returns: shares of the num_array in type of ndarray """ old_size = num_array.size flat_num_array = num_array.reshape(old_size,) new_shape = (self.SHARE_NUM, ) + num_array.shape result = np.empty((old_size, self.SHARE_NUM), dtype=np.int64) for idx in six.moves.range(0, old_size): result[idx] = self.encrypt(flat_num_array[idx]) result = result.transpose(1, 0) result = result.reshape(new_shape) return result def get_shares(self, shares, index): """ Build mpc shares from raw shares according to index Args: shares: the input raw share array index: index of the mpc share Returns: mpc shares array corresponding to index """ pass def save_shares(self, share_reader, part_name): """ Combine raw shares to mpc shares, and persists to files. Each mpc share will be put into the corresponding file, e.g., ${part_name}.part[0/1/2]. Args: share_reader: iteratable function object returning a single array of raw shares in shape of [2/3, ...] each time part_name: file name Returns: files with names of ${part_name}.part[0/1/2] """ pass def load_shares(self, part_name, id, shape, append_share_dim=True): """ Load mpc shares from file with name ${part_name}.part{id} in shape of ${shape}. Args: part_name and id: use to build the file name of ${part_name}.part{id} shape: the shape of output array Returns: iteratable function object returing a share array with give shape each time """ if append_share_dim == True: shape = self.PRE_SHAPE + shape ext = ".part{}".format(id) share_size = np.prod(shape) * 8 # size of int64 in bytes def reader(): """ internal reader """ with open(part_name + ext, 'rb') as part_file: share = part_file.read(share_size) while share: yield np.frombuffer(share, dtype=np.int64).reshape(shape) share = part_file.read(share_size) return reader def reconstruct(self, shares, type=np.float): """ Reconstruct plaintext from mpc shares Args: shares: all the mpc share arrays, where the share slices are stored rowwise type: expected type of final result Returns: plaintext array reconstructed from the mpc shares, with shape of (dims) """ pass def batch(self, reader, batch_size, drop_last=False): """ A batch reader return a batch data meeting the shared data's shape. E.g., a batch arrays with shape (3, 4) of batch_size will be transform to (batch_size, 3, 4). Args: see paddle.batch method Returns: the batched reader """ pass def transpile(self, program=None): """ Transpile Paddle program into MPC program. Args: program: The plain Paddle model program, default to default_main_program. Returns: The MPC program. """ if program is None: program = fluid.default_main_program() place = fluid.CPUPlace() if program.num_blocks > 1: raise NotImplementedError( "The number of blocks in current main program" "is {}, which is not supported in this version." .format(program.num_blocks())) global_block = program.global_block() g_scope = fluid.global_scope() mpc_vars_names = _transpile_type_and_shape(block=global_block) # encrypt tensor values for each variable in mpc_var_names for mpc_var_name in mpc_vars_names: if g_scope.find_var(mpc_var_name) is not None: param = g_scope.find_var(mpc_var_name) param_tensor = np.array(param.get_tensor()) mpc_var = global_block.var(mpc_var_name) if mpc_var_name not in plain_vars: param.get_tensor()._set_dims(mpc_var.shape) # process initialized params that should be 0 set_tensor_value = np.array([param_tensor, param_tensor]).astype(np.int64) param.get_tensor().set(set_tensor_value, place) #else: # param.get_tensor().set(np.array(param.get_tensor()).astype('float64'), place) # trigger sync to replace old ops. op_num = global_block.desc.op_size() _ = global_block.desc.append_op() global_block.desc._remove_op(op_num, op_num + 1) return program def _transpile_type_and_shape(self, block): """ Transpile dtype and shape of plain variables into MPC dtype and shape. And transpile op type into MPC type. Args: block: The block in Paddle program. Returns: A set of variable names to encrypt. """ mpc_vars_names = set() # store variable name in mpc_vars_names, and encrypt dtype and shape for var_name in block.vars: var = block.var(var_name) if var.name != "feed" and var.name != "fetch": mpc_vars_names.add(var.name) if var_name in plain_vars: # var.desc.set_dtype(fluid.framework.convert_np_dtype_to_dtype_(np.float64)) continue encrypted_var_shape = self.PRE_SHAPE + var.shape var.desc.set_dtype(fluid.framework.convert_np_dtype_to_dtype_(np.int64)) var.desc.set_shape(encrypted_var_shape) # encrypt op type, or other attrs if needed for op in block.ops: if _is_supported_op(op.type): if op.type == 'fill_constant': op._set_attr(name='shape', val=MPC_ONE_SHARE.shape) # set default MPC value for fill_constant OP op._set_attr(name='value', val=MPC_ONE_SHARE) op._set_attr(name='dtype', val=3) elif op.type in self.op_to_skip: pass else: add_extra_desc(op, block) op.desc.set_type(MPC_OP_PREFIX + op.type) else: raise NotImplementedError('Operator {} is unsupported.' .format(op.type)) return mpc_vars_names def encrypt_model(self, program, mpc_model_dir=None, model_filename=None): """ Encrypt model, and save encrypted model (i.e., MPC model shares) into files for MPC training, updating, or inference. Args: program: The loaded program of paddle model. mpc_model_dir: The directory that save MPC model shares. model_filename: The name of MPC model file, default is __model__.mpc. """ place = fluid.CPUPlace() exe = fluid.Executor(place) # TODO(xukun): support more blocks. Tips: may just adding "for loop" for all blocks. if program.num_blocks > 1: raise NotImplementedError( "The number of blocks in current main program" "is {}, which is not supported in this version." .format(program.num_blocks())) global_block = program.global_block() g_scope = fluid.global_scope() mpc_vars_names = _transpile_type_and_shape(global_block) # encrypt tensor values for each variable in mpc_var_names for mpc_var_name in mpc_vars_names: if g_scope.find_var(mpc_var_name) is not None: param = g_scope.find_var(mpc_var_name) param_tensor = np.array(param.get_tensor()) param_tensor_shares = self.make_shares(param_tensor) mpc_var = global_block.var(mpc_var_name) for idx in six.moves.range(self.SHARE_NUM): if mpc_var_name not in plain_vars: param.get_tensor()._set_dims(mpc_var.shape) set_tensor_value = self.get_shares(param_tensor_shares, idx) param.get_tensor().set(set_tensor_value, place) #else: # param.get_tensor().set(np.array(param.get_tensor()).astype('float64'), place) param_share_dir = os.path.join( mpc_model_dir, MODEL_SHARE_DIR + "_" + str(idx)) fluid.io.save_vars( executor=exe, dirname=param_share_dir, vars=[mpc_var], filename=mpc_var_name) # trigger sync to replace old ops. op_num = global_block.desc.op_size() _ = global_block.desc.append_op() global_block.desc._remove_op(op_num, op_num + 1) # save mpc model file model_basename = os.path.basename( model_filename) if model_filename is not None else MPC_MODEL_NAME for idx in six.moves.range(self.SHARE_NUM): model_share_dir = os.path.join(mpc_model_dir, MODEL_SHARE_DIR + "_" + str(idx)) if not os.path.exists(model_share_dir): os.makedirs(model_share_dir) model_name = os.path.join(model_share_dir, model_basename) with open(model_name, "wb") as f: f.write(program.desc.serialize_to_string()) def decrypt_model(self, mpc_model_dir, plain_model_path, mpc_model_filename=None, plain_model_filename=None): """ Reveal a paddle model. Load encrypted model (i.e., MPC model shares) from files and decrypt it into paddle model. Args: mpc_model_dir: The directory of all model shares. plain_model_path: The directory to save revealed paddle model. mpc_model_filename: The name of encrypted model file. plain_model_filename: The name of decrypted model file. """ share_dirs = [] for sub_dir in os.listdir(mpc_model_dir): if not sub_dir.startswith("."): share_dirs.append(os.path.join(mpc_model_dir, sub_dir)) place = fluid.CPUPlace() exe = fluid.Executor(place=place) mpc_model_basename = os.path.basename( mpc_model_filename) if mpc_model_filename is not None else MPC_MODEL_NAME [main_prog, _, _] = fluid.io.load_inference_model( dirname=share_dirs[0], executor=exe, model_filename=mpc_model_basename) if main_prog.num_blocks > 1: raise NotImplementedError( "The number of blocks in current main program" "is {}, which is not supported in this version" .format(main_prog.num_blocks())) global_block = main_prog.global_block() g_scope = fluid.global_scope() # a set storing unique variables to decrypt vars_set = set() # store variable name in vars_set, and decrypt dtype and shape for mpc_var_name in global_block.vars: mpc_var = global_block.var(mpc_var_name) if mpc_var.name != "feed" and mpc_var.name != "fetch": vars_set.add(mpc_var.name) if mpc_var_name in plain_vars: # var.desc.set_dtype(fluid.framework.convert_np_dtype_to_dtype_(np.float64)) continue else: plain_var_shape = mpc_var.shape mpc_var.desc.set_shape(plain_var_shape) mpc_var.desc.set_dtype(fluid.framework.convert_np_dtype_to_dtype_(np.float32)) # remove init op first_mpc_op = global_block.ops[0] if first_mpc_op.type == 'mpc_init': global_block._remove_op(0) # decrypt op
<filename>array_problems/Solutions.py import collections import heapq from typing import List def merge(intervals: List[List[int]]) -> List[List[int]]: if not intervals: return intervals queue = [intervals[0]] intervals.sort(key=lambda x: x[0]) index = 1 while index != len(intervals): if intervals[index][0] <= queue[-1][1]: queue[-1][1] = max(intervals[index][1], queue[-1][1]) else: queue.append(intervals[index]) index += 1 return queue def largest_parameter(input): def check_directions(x, y): valid_directions = [] on_parimeter = False directions = [[-1, 0], [0, -1], [1, 0], [0, 1]] for x_direction, y_direction in directions: new_x = x + x_direction new_y = y + y_direction if 0 <= new_x < len(input) and 0 <= new_y < len(input[0]): if input[new_x][new_y] == 1: valid_directions.append([new_x, new_y]) elif input[new_x][new_y] == 0: on_parimeter = True else: on_parimeter = True return on_parimeter, valid_directions def gather_perimeter_size(x, y): if input[x][y] == 1: input[x][y] = -1 on_parimeter = 0 parameter, new_directions = check_directions(x, y) if parameter: on_parimeter += 1 if new_directions: for new_x, new_y in new_directions: size = gather_perimeter_size(new_x, new_y) on_parimeter += size return on_parimeter return 0 max_parameter = 0 for x, row in enumerate(input): for y, value in enumerate(row): if input[x][y] == 1: max_parameter = max(max_parameter, gather_perimeter_size(x, y)) return max_parameter def findNumbers(nums: List[int]) -> int: number_even = 0 for num in str(nums): if len(num) % 2 == 0: number_even += 1 return number_even class CustomStack: def __init__(self, maxSize: int): self.stack = [] self.max_size = maxSize def push(self, x: int) -> None: if len(self.stack) != self.max_size: self.stack.append(x) def pop(self) -> int: if not self.stack: return -1 else: return self.stack.pop() def increment(self, k: int, val: int) -> None: for i in range(k): if i < len(self.stack): self.stack[i] += val def sortArrayByParity(nums: List[int]) -> List[int]: odd_pointer = 0 even_pointer = 0 while even_pointer < len(nums) and odd_pointer < len(nums): if nums[odd_pointer] % 2 == 0: nums[odd_pointer], nums[even_pointer] = nums[even_pointer], nums[odd_pointer] even_pointer += 1 else: odd_pointer += 1 print(nums) return nums def replaceElements(arr: List[int]) -> List[int]: max_so_far = -1 for i in range(len(arr) - 1, -1, -1): max_so_far, arr[i] = max(max_so_far, arr[i]), max_so_far return arr def countSquares(matrix: List[List[int]]) -> int: result = 0 for x, row in enumerate(matrix): for y, value in enumerate(row): if x != 0 and y != 0: matrix[x][y] += min(matrix[x - 1][y], matrix[x][y - 1], matrix[x - 1][y - 1]) result += matrix[x][y] return result def countBattleships(board: List[List[str]]) -> int: def yield_directions(x, y): directions = [[-1, 0], [1, 0], [0, -1], [0, 1]] for x_direction, y_direction in directions: new_x = x_direction + x new_y = y_direction + y if 0 <= new_x < len(board) and 0 <= new_y < len(board[0]): if board[new_x][new_y] == 'X': yield new_x, new_y def traverse(x, y): board[x][y] = "." for new_x, new_y in yield_directions(x, y): traverse(new_x, new_y) battleship_count = 0 for x, row in enumerate(board): for y, value in enumerate(row): if value == "X": battleship_count += 1 traverse(x, y) return battleship_count def intervalIntersection(firstList: List[List[int]], secondList: List[List[int]]) -> List[ List[int]]: result_set = [] pointer_a = 0 pointer_b = 0 while pointer_a < len(firstList) and pointer_b < len(secondList): if secondList[pointer_b][0] <= firstList[pointer_a][0] <= secondList[pointer_b][1] or \ firstList[pointer_a][0] <= secondList[pointer_b][0] <= firstList[pointer_a][1]: result_set.append( [ max(firstList[pointer_a][0], secondList[pointer_b][0]), min(firstList[pointer_a][1], secondList[pointer_b][1]) ] ) if firstList[pointer_a][0] <= secondList[pointer_b][0]: pointer_a += 1 else: pointer_b += 1 return result_set def permute(nums: List[int]) -> List[List[int]]: def permutation_function(numbers, temp_result): result = [] if not numbers: result.append(temp_result) else: for index, number in enumerate(numbers): result.extend( permutation_function(numbers[:index] + numbers[index + 1:], temp_result + [number])) return result return permutation_function(nums, []) def minFallingPathSum(matrix: List[List[int]]) -> int: result = [[float('inf') for _ in range(len(matrix[0]))] for _ in range(len(matrix))] for x, row in enumerate(matrix): for y, value in enumerate(row): if x == 0: result[x][y] = value elif y == 0: result[x][y] = min(result[x - 1][y], result[x - 1][y + 1]) + matrix[x][y] elif y == len(matrix[0]) - 1: result[x][y] = min(result[x - 1][y - 1], result[x - 1][y]) + matrix[x][y] else: result[x][y] = min(result[x - 1][y - 1], result[x - 1][y], result[x - 1][y + 1]) + \ matrix[x][y] return min(result[-1]) def getMaximumGold(grid: List[List[int]]) -> int: def yield_valid_directions(x, y): directions = [[-1, 0], [1, 0], [0, 1], [0, -1]] for x_direction, y_direction in directions: x_position, y_position = x + x_direction, y + y_direction if 0 <= x_position < len(grid) and 0 <= y_position < len(grid[0]) and grid[x_position][ y_position] != 0: yield x_position, y_position def traverse(x, y): gold_at_position, grid[x][y] = grid[x][y], 0 max_path_gold = 0 for x_position, y_position in yield_valid_directions(x, y): max_path_gold = max(max_path_gold, traverse(x_position, y_position)) grid[x][y] = gold_at_position return max_path_gold + gold_at_position max_gold = 0 for x, row in enumerate(grid): for y, value in enumerate(row): if value != 0: max_gold = max(max_gold, traverse(x, y)) return max_gold def validateStackSequences(pushed: List[int], popped: List[int]) -> bool: stack = [] pushed = collections.deque(pushed) popped = collections.deque(popped) while True: if not popped and not pushed and not stack: return True if not pushed and popped and popped[0] != stack[-1]: return False if not stack and pushed or popped[0] != stack[-1]: stack.append(pushed.popleft()) else: popped.popleft() stack.pop() def minesweeper(board, click): def get_valid_directions(x, y): directions = [[-1, 0], [1, 0], [1, 1], [1, -1], [0, 1], [0, -1], [-1, -1], [-1, 1]] new_directions = [] number_of_mines = 0 for x_direction, y_direction in directions: x_position, y_position = x + x_direction, y + y_direction if 0 <= x_position < len(board) and 0 <= y_position < len(board[0]): if board[x_position][y_position] == 'M': number_of_mines += 1 elif board[x_position][y_position] == 'E': new_directions.append([x_position, y_position]) return number_of_mines, new_directions def traverse(x, y): if board[x][y] == 'M': board[x][y] = 'X' else: number_of_mines, new_directions = get_valid_directions(x, y) if number_of_mines: board[x][y] = str(number_of_mines) else: board[x][y] = 'B' for x_direction, y_direction in new_directions: traverse(x_direction, y_direction) traverse(click[0], click[1]) return board def lastStoneWeight(stones: List[int]) -> int: stones = [-x for x in stones] heapq.heapify([-x for x in stones]) while len(stones) != 1: heaviest = heapq.heappop(stones) second_heaviest = heapq.heappop(stones) if heaviest != second_heaviest: heapq.heappush(stones, -abs(heaviest - second_heaviest)) return abs(stones.pop()) def canReach(arr: List[int], start: int) -> bool: queue = collections.deque([start]) while queue: index = queue.popleft() if arr[index] == 0: return True if arr[index] != -1: position_magnitude, arr[index] = arr[index], -1 if 0 <= index + position_magnitude < len(arr) and arr[index + position_magnitude] != -1: queue.append(index + position_magnitude) if 0 <= index - position_magnitude < len(arr) and arr[index - position_magnitude] != -1: queue.append(index - position_magnitude) return False def longestOnes(nums: List[int], k: int) -> int: changed_zeros = 0 pointer_start = 0 pointer_end = 0 longest = 0 while pointer_end != len(nums): while changed_zeros == k and nums[pointer_end] == 0: if nums[pointer_start] == 0: changed_zeros -= 1 pointer_start += 1 if nums[pointer_end] == 0: changed_zeros += 1 longest = max(longest, pointer_end - pointer_start + 1) pointer_end += 1 return longest def numEnclaves(grid: List[List[int]]) -> int: def get_valid_directions(x, y): on_boundary = False new_positions = [] directions = [[-1, 0], [1, 0], [0, -1], [0, 1]] for x_direction, y_direction in directions: x_position = x + x_direction y_position = y + y_direction if 0 <= x_position < len(grid) and 0 <= y_position < len(grid[0]): if grid[x_position][y_position] == 1: new_positions.append([x_position, y_position]) else: on_boundary = True return on_boundary, new_positions def traverse(x, y): area = 1 grid[x][y] = 0 boundary, new_positions = get_valid_directions(x, y) for x_position, y_position in new_positions: position_boundary, position_area = traverse(x_position, y_position) boundary = boundary or position_boundary area += position_area return boundary, area full_area = 0 for x, row in enumerate(grid): for y, value in enumerate(row): if value == 1: boundary, area = traverse(x, y) if not boundary: full_area += area return full_area def minPathSum(grid: List[List[int]]) -> int: directions = [[-1, 0], [1, 0], [0, -1], [0, 1]] priority_queue = [(0, [0, 0])] heapq.heapify(priority_queue) while priority_queue: sum_so_far, position = heapq.heappop(priority_queue) x = position[0] y = position[1] if grid[x][y] != 0: if x == len(grid) - 1 and y == len(grid[0]) - 1: return sum_so_far + grid[-1][-1] grid[x][y], sum_so_far = 0, sum_so_far + grid[x][y] for x_direction, y_direction in directions: x_position = x + x_direction y_position = y + y_direction if 0 <= x_position < len(grid) and 0 <= y_position < len(grid[0]) and grid[x_position][ y_position] != 0: heapq.heappush(priority_queue, (sum_so_far, [x_position, y_position])) def isMonotonic(nums: List[int]) -> bool: increasing = None for i, num
# bulk_data_extraction/unbounce_connection.py #************************************************************************************* # Programmer: <NAME> # Class Name: UnbounceConnection # Super Class: None # # Revision Date Release Comment # -------- ---------- ------------------------------------------------------------ # 1.0 08/09/2019 Initial Release # # File Description # ------------------------------------------------------------------------------------ # This Python class is a wrapper for the unbounceapi package and, contains methods for # simple querying of bulk data from the Unbounce server. The only 2 extractable # objects are Pages and Leads via the bulk_extract() method. The return value for # both objects are lists of JSON objects. Please investigate each method for any # further details. # # Class Methods # ------------------------------------------------------------------------------------ # Name Description # -------------------- ------------------------------------------------------- # __init__() The constructor used to instantiate this class. # # bulk_get_pages() The method that interacts with the unbounceapi wrapper # to retrive and return Page objects as lists of JSON # objects. # # bulk_get_leads() The method that interacts with the unbounceapi wrapper # to retrive and return Lead objects as lists of JSON # objects. # # process_date_range() The method that checks and processes any date filters. # # process_bulk_pages() The method that checks and processes all Page filters # applied to the bulk_extract() method. This method also # initiates the call to the bulk_get_pages() method. # # process_bulk_leads() The method that checks and processes all Lead filters # applied to the bulk_extract() method. This method also # initiates the call to the bulk_get_leads() method. # # bulk_extract() The method used to initiate any bulk extract for # Page and/or Lead objects from the Unbounce server. #************************************************************************************* # Imported Packages: # Name Description # ----------- ---------------------------------------------------------------- # Unbounce An API wrapper for the Unbounce server. Installation of this # library is required. Enter the following command from the # command line: 'pip install unbounce-python-api' # # pandas A package containing data structures and data analysis tools. # # datetime A package imported for manipulating date data type variables. # # time A package used for stalling methods that have the potential for # reaching Unbounce API limitations. #************************************************************************************* from unbounceapi.client import Unbounce import pandas as pd from datetime import datetime, timedelta import time class UnbounceConnection(): #************************************************************************************** # Constructor: __init__(self) # # Description # ------------------------------------------------------------------------------------ # This constructor instantiates the Unbounce API wrapper. This wrapper contains API # routes for easy retrieval and manipulation of data within the Unbounce server. # # For further detail please explore the following webpages: # Unbounce API guide: https://developer.unbounce.com/getting_started/ # Unbounce API wrapper (PyPi): https://pypi.org/project/unbounce-python-api/ # Unbounce API wrapper (GitHub): https://github.com/YoshioHasegawa/unbounce-python-api # # ------------------------------- Arguments ------------------------------------------ # Type Name Description # ------------- --------------------- -------------------------------------------- # string api_key The API key to your Ubounce account. # int get_timeout The timeout limit for the get method in the # underlying API wrapper (unbounceapi.client). # int bulk_extract_timeout The runtime limit for this class' bulk get # methods. #************************************************************************************* def __init__(self, api_key, get_timeout=600, bulk_extract_timeout=3600): # Initialize the timeout limit for the underlying API wrapper's get method. self.get_timeout_time = get_timeout # Initialize the bulk extract timeout limit. This time limit is in seconds and, # will timeout the bulk get methods (Pages/Leads) if the time limit has been # reached. self.extract_timeout_time = bulk_extract_timeout # Establish connection with Unbounce, via Unbounce API wrapper. # The timeout limit is the limit for a given get method call. self.client = Unbounce(api_key, timeout_limit=self.get_timeout_time) #************************************************************************************* # Method: bulk_get_pages(self, string=None, string=None, list=None, list=None, # string=None) # # Description # ------------------------------------------------------------------------------------ # This method makes the calls to the Unbounce API wrapper in order to retrieve Page # objects from the Unbounce server. Specifically, this method makes an initial call # to retrieve Page objects. Then, continues to make further calls to retrive potential # Page objects that were not extracted initially due to a 1000 object/request limit. # All request calls are done within a while-loop using the last Page's 'createdAt' # data point from the previous call, to know where the previous call left off. # DataFrame comparisons are done to remove any duplicate rows. An iteration variable # is also used to record number of calls to handle a 500 requests/min limit. Finally, # filters passed to this method are handled within the get requests and, at the end of # the method with DataFrame slicing and indexing techniques. # # RETurn # Type Description # ---------- ----------------------------------------------------------------------- # list The final list consisting of Pages objects requested by the user. # Page objects are formatted as JSON objects. # # ------------------------------- Arguments ------------------------------------------ # Type Name Description # ------------- -------------- --------------------------------------------------- # string date_start To filter Page objects by Pages created later than # or equal to a given date (inclusive). # string date_end To filter Page objects by Pages created earlier # than a given date (exclusive). # list domain_list To filter Page objects by Page domain. # list page_id_list To filter Page objects by Page ID(s). # string state To filter Page objects by Page publishing status. #************************************************************************************* def bulk_get_pages(self, date_start=None, date_end=None, domain_list=None, page_id_list=None, state=None): # Initialize method starting time to set a max runtime limit. START_TIME = datetime.now() print('- Extracting Pages -') # Initialize empty DataFrame for Initial Page objects to be appended, # as well as additional Page objects requested. pages_df = pd.DataFrame() # Initialize boolean variable that enters/continues the while-loop below. cont = True # Initialize iteration variable to handle the 1000 object/request limitation. itr = 0 # Initialize iteration variable to keep count of requests. call_counter = 1 # Enter the while loop and run request calls for Page objects until all desired objects are extracted. # Due to the 1000 object/request limitation, we will run an initial request call, # re-initialize the starting date as the last Page objects created date (line 150) and, # continue to run the request call iteratively until we have extracted all desired objects. while cont: print('\nPage Request Call #: {0}'.format(call_counter)) # If the current time less of the method start time is greater than the given extract runtime limit # (default is 1 hour), raise an error with an explanation. This error is included to limit the runtime # of this method, in case we encounter a Recursion Error. if (datetime.now() - START_TIME).seconds > self.extract_timeout_time: raise RecursionError('The max run-time limit of {0} seconds has been reached.'.format(self.extract_timeout_time)) print('Page Extract Starting Date: {0}'.format(date_start)) print('Page Extract Ending Date: {0}'.format(date_end)) # Run request call for Page objects, with the given date range. pages_meta_data = self.client.pages.get_pages(_from=date_start, to=date_end, limit='1000', with_stats='true') # Initialize new Page specific data as DataFrame. new_pages_df = pd.DataFrame(pages_meta_data['pages']) # Iterate requests counter. call_counter += 1 print(' > New Pages Extracted: {0}'.format(len(new_pages_df))) # If the Pages extracted is empty, no Pages were returned. Thus, break from this while-loop. # This will stop us from appending an empty list to the final DataFrame. if pages_meta_data['pages'] == []: print('\n>> No new Pages extracted. Exiting request loop...') break # Append the new Page objects to the Pages DataFrame. pages_df = pages_df.append(new_pages_df) # Initialize duplicate Page objects as DataFrame and drop duplicates from Pages DataFrame. pages_dropped = pages_df[pages_df.duplicated(subset=['id'], keep='first')] pages_df.drop_duplicates(subset=['id'], keep='first', inplace=True) print(' > Duplicated Pages Dropped: {0}'.format(len(pages_dropped))) print(' > Total Pages: {0}'.format(len(pages_df))) # Re-initialize the starting date variable as the Last Page's created date. # This tells us where we left off in case we hit the 1000 object/request limitation. # If needed, this while-loop will continue until we pull zero net new Page objects. # Note that the starting date is inclusive. Thus, we should expect at least one duplicated Page object. date_start = pages_meta_data['pages'][-1]['createdAt'] # If the new Page objects pulled equals the Pages dropped, # we know that no net new Pages were pulled and all desired Pages were extracted. # Thus, exit the while-loop (cont = False). if new_pages_df.equals(pages_dropped): print('\n>> No net new Pages extracted. Exiting request loop...') cont = False # Increment the iteration variable to count requests made. # If this variable reached 495, the program will pause for 1 minute. # This is implemented to account for the 500 requests/minute limitation. itr += 1 if itr == 495: time.sleep(60) itr = 0 # Rename the Page ID column from 'id' to 'page_id'. # This
<gh_stars>1-10 """ Models the proteome (non-pride) object. """ import json import logging import os import string from cutlass.iHMPSession import iHMPSession from cutlass.Base import Base from cutlass.aspera import aspera from cutlass.Util import * # pylint: disable=W0703, C1801 # Create a module logger named after the module module_logger = logging.getLogger(__name__) # Add a NullHandler for the case if no logging is configured by the application module_logger.addHandler(logging.NullHandler()) class ProteomeNonPride(Base): """ The class encapsulates iHMP proteome non-pride data. It contains all the fields required to save a such an object in OSDF. Attributes: namespace (str): The namespace this class will use in OSDF. """ namespace = "ihmp" aspera_server = "aspera.ihmpdcc.org" def __init__(self, *args, **kwargs): """ Constructor for the ProteomeNonPride class. This initializes the fields specific to the class, and inherits from the Base class. Args: None """ self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__) self.logger.addHandler(logging.NullHandler()) self._id = None self._version = None self._links = {} self._tags = [] # Required properties self._comment = None self._data_processing_protocol = None self._processing_method = None self._study = None self._subtype = None self._local_other_file = None self._local_peak_file = None self._local_protmod_file = None self._local_raw_file = None # Optional properties self._analyzer = None self._date = None self._detector = None self._exp_description = None self._instrument_name = None self._other_url = [''] self._peak_url = [''] self._private_files = None self._protmod_format = None self._protmod_url = [''] self._protocol_name = None self._protocol_steps = None self._raw_url = [''] self._reference = None self._search_engine = None self._short_label = None self._software = None self._source = None self._title = None super(ProteomeNonPride, self).__init__(*args, **kwargs) @property def analyzer(self): """ str: Returns the single or multiple component setting of the mass analyzer. """ self.logger.debug("In 'analyzer' getter.") return self._analyzer @analyzer.setter @enforce_string def analyzer(self, analyzer): """ The setter for single or multiple components of the mass analyzer. Args: analyzer (str): The single/multiple components of the mass analyzer. Returns: None """ self.logger.debug("In 'analyzer' setter.") self._analyzer = analyzer @property def comment(self): """ str: A descriptive comment for the proteome. """ self.logger.debug("In 'comment' getter.") return self._comment @comment.setter @enforce_string def comment(self, comment): """ The setter for a descriptive comment for the proteome. Args: comment (str): The comment text. Returns: None """ self.logger.debug("In 'comment' setter.") self._comment = comment @property def data_processing_protocol(self): """ str: A short description of the data processing protocol followed to generate associated data sets. """ self.logger.debug("In 'data_processing_protocol' getter.") return self._data_processing_protocol @data_processing_protocol.setter @enforce_string def data_processing_protocol(self, data_processing_protocol): """ The setter for the data processing protocol. Args: data_processing_protocol (str): a short description of the protocol. Returns: None """ self.logger.debug("In 'data_processing_protocol' setter.") self._data_processing_protocol = data_processing_protocol @property def date(self): """ str: The date on which the data were generated. """ self.logger.debug("In 'date' getter.") return self._date @date.setter @enforce_past_date def date(self, date): """ The setter for the date. Args: date (str): The date on which the data were generated. Returns: None """ self.logger.debug("In 'date' setter.") self._date = date @property def detector(self): """ str: The detector type used. """ self.logger.debug("In 'detector' getter.") return self._detector @detector.setter @enforce_string def detector(self, detector): """ The setter for the detector type used. Args: detector (str): The detector type used. Returns: None """ self.logger.debug("In 'detector' setter.") self._detector = detector @property def exp_description(self): """ str: The goals and description of the study. """ self.logger.debug("In 'exp_description' getter.") return self._exp_description @exp_description.setter @enforce_string def exp_description(self, exp_description): """ The setter for the goals and description of the study. Args: exp_description (str): The goals and description of the study. Returns: None """ self.logger.debug("In 'exp_description' setter.") self._exp_description = exp_description @property def instrument_name(self): """ str: The instrument make, model, significant customizations. """ self.logger.debug("In 'instrument_name' getter.") return self._instrument_name @instrument_name.setter @enforce_string def instrument_name(self, instrument_name): """ The descriptive name of the instrument make, model, significant customizations. Args: instrument_name (str): Instrument make, model, etc. Returns: None """ self.logger.debug("In 'instrument_name' setter.") self._instrument_name = instrument_name @property def local_other_file(self): """ str: Path to the local proteome 'other' file to upload. """ self.logger.debug("In 'local_other_file' getter.") return self._local_other_file @local_other_file.setter @enforce_string def local_other_file(self, local_other_file): """ The setter for the local proteome 'other' file. Args: local_other_file (str): The path to the local 'other' file. Returns: None """ self.logger.debug("In 'local_other_file' setter.") self._local_other_file = local_other_file @property def local_peak_file(self): """ str: Path to the local proteome 'peak' file to upload. """ self.logger.debug("In 'local_peak_file' getter.") return self._local_peak_file @local_peak_file.setter @enforce_string def local_peak_file(self, local_peak_file): """ The setter for the local proteome 'peak' file. Args: local_peak_file (str): The path to the local 'peak' file. Returns: None """ self.logger.debug("In 'local_peak_file' setter.") self._local_peak_file = local_peak_file @property def local_protmod_file(self): """ str: Path to the local proteome 'protmod' file to upload. """ self.logger.debug("In 'local_protmod_file' getter.") return self._local_protmod_file @local_protmod_file.setter @enforce_string def local_protmod_file(self, local_protmod_file): """ The setter for the local proteome 'protmod' file. Args: local_protmod_file (str): The path to the local 'protmod' file. Returns: None """ self.logger.debug("In 'local_protmod_file' setter.") self._local_protmod_file = local_protmod_file @property def local_raw_file(self): """ str: Path to the local proteome 'raw' file to upload. """ self.logger.debug("In 'local_raw_file' getter.") return self._local_raw_file @local_raw_file.setter @enforce_string def local_raw_file(self, local_raw_file): """ The setter for the local proteome 'raw' file. Args: local_raw_file (str): The path to the local 'raw' file. Returns: None """ self.logger.debug("In 'local_raw_file' setter.") self._local_raw_file = local_raw_file @property def private_files(self): """ bool: Whether this object describes private data that should not be uploaded to the DCC. Defaults to false. """ self.logger.debug("In 'private_files' getter.") return self._private_files @private_files.setter @enforce_bool def private_files(self, private_files): """ The setter for the private files flag to denote this object describes data that should not be uploaded to the DCC. Args: private_files (bool): Returns: None """ self.logger.debug("In 'private_files' setter.") self._private_files = private_files @property def processing_method(self): """ str: The description of the default peak processing method. """ self.logger.debug("In 'processing_method' getter.") return self._processing_method @processing_method.setter @enforce_string def processing_method(self, processing_method): """ The description of the default peak processing method. Args: processing_method (str): Default peak processing method. Returns: None """ self.logger.debug("In 'processing_method' setter.") self._processing_method = processing_method @property def protmod_format(self): """ str: The file format of the protein modifications file. """ self.logger.debug("In 'protmod_format' getter.") return self._protmod_format @protmod_format.setter @enforce_string def protmod_format(self, protmod_format): """ The file format of the protein modifications file. Args: protmod_format (str): File format of the protein modifications file. Returns: None """ self.logger.debug("In 'protmod_format' setter.") self._protmod_format = protmod_format @property def protocol_name(self): """ str: The protocol name with versioning. """ self.logger.debug("In 'protocol_name' getter.") return self._protocol_name @protocol_name.setter @enforce_string def protocol_name(self, protocol_name): """ The protocol name with versioning, ideally pointing to a URL. Args: protocol_name (str): Protocol title with versioning. Returns: None """ self.logger.debug("In 'protocol_name' setter.") self._protocol_name = protocol_name @property def protocol_steps(self): """ str: Description of the sample processing steps that have been performed. """ self.logger.debug("In 'protocol_steps' getter.") return self._protocol_steps @protocol_steps.setter @enforce_string def protocol_steps(self, protocol_steps): """ Description of the sample processing steps that have been performed. Args: protocol_steps (str): sample processing steps. Returns: None """ self.logger.debug("In 'protocol_steps' setter.") self._protocol_steps = protocol_steps @property def reference(self): """ str: Link to literature citation. """ self.logger.debug("In 'reference' getter.") return self._reference @reference.setter @enforce_string def reference(self, reference): """ Link to literature citation for which this experiment provides supporting evidence. Args: reference (str): links to literature citations. Returns: None """ self.logger.debug("In 'reference' setter.") self._reference = reference @property def search_engine(self): """ str: Name of the protein search engine used, e.g. Mascot 2.2.1. """ self.logger.debug("In 'search_engine' getter.") return self._search_engine @search_engine.setter @enforce_string def search_engine(self, search_engine): """ Name of the protein search engine used, e.g. Mascot 2.2.1. Args: search_engine (str): search engine name. Returns: None """ self.logger.debug("In 'search_engine' setter.") self._search_engine = search_engine @property def short_label(self): """ str: Nomenclature used to organize experiments. """ self.logger.debug("In 'short_label' getter.") return self._short_label @short_label.setter @enforce_string def short_label(self, short_label): """ Nomenclature used to group/organize experiments in a meaningful way, e.g. Control Exp II. Args: short_label (str): the nomenclature. Returns: None """ self.logger.debug("In 'short_label' setter.") self._short_label = short_label @property def software(self): """ str: All software used during data acquisition and data processing, including the software that produced the peak list, with versions. """ self.logger.debug("In 'software' getter.") return self._software @software.setter @enforce_string def software(self, software): """ All software used during data acquisition and data processing, including the software that produced the peak list, with versions. Args: software (str): software used. Returns: None """ self.logger.debug("In 'software'
""" Base settings to build other settings files upon. """ from datetime import timedelta from pathlib import Path import environ from config.huey_app import huey ROOT_DIR = Path(__file__).parents[2] # boundlexx/) APPS_DIR = ROOT_DIR / "boundlexx" env = environ.Env() READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False) if READ_DOT_ENV_FILE: # OS environment variables take precedence over variables from .env env.read_env(str(ROOT_DIR / ".env")) # GENERAL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#debug DEBUG = env.bool("DJANGO_DEBUG", False) ENABLE_PROMETHEUS = env.bool("ENABLE_PROMETHEUS", default=False) # Local time zone. Choices are # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # though not all of them may be available with every OS. # In Windows, this must be set to your system time zone. TIME_ZONE = env("TZ", default="UTC") # https://docs.djangoproject.com/en/dev/ref/settings/#language-code LANGUAGE_CODE = "en-us" # https://docs.djangoproject.com/en/dev/ref/settings/#site-id SITE_ID = 1 # https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n USE_I18N = True # https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n USE_L10N = True # https://docs.djangoproject.com/en/dev/ref/settings/#use-tz USE_TZ = True # https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths LOCALE_PATHS = [str(ROOT_DIR / "locale")] CACHES = { "default": { "BACKEND": "redis_lock.django_cache.RedisCache", "LOCATION": env("CACHE_URL"), "OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient"}, } } SESSION_ENGINE = "django.contrib.sessions.backends.cached_db" SESSION_CACHE_ALIAS = "default" # DATABASES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = {"default": env.db("DATABASE_URL")} DATABASES["default"]["ATOMIC_REQUESTS"] = True if ENABLE_PROMETHEUS: DATABASES["default"]["ENGINE"] = "django_prometheus.db.backends.postgresql" CACHES["default"]["BACKEND"] = "boundlexx.utils.backends.RedisCache" AZURE_ACCOUNT_NAME = env("AZURE_ACCOUNT_NAME", default=None) AZURE_ACCOUNT_KEY = env("AZURE_ACCOUNT_KEY", default=None) AZURE_CONTAINER = env("AZURE_CONTAINER", default=None) AZURE_CONTAINER_PREFIX = env("AZURE_CONTAINER_PREFIX", default="local-") AZURE_CUSTOM_DOMAIN = env("AZURE_CUSTOM_DOMAIN", default=None) AZURE_FILENAME_PREFIX = env("AZURE_FILENAME_PREFIX", default=None) AZURE_CDN_RESOURCE_GROUP = env("AZURE_CDN_RESOURCE_GROUP", default=None) AZURE_CDN_PROFILE_NAME = env("AZURE_CDN_PROFILE_NAME", default=None) AZURE_CDN_ENDPOINT_NAME = env("AZURE_CDN_ENDPOINT_NAME", default=None) AZURE_CDN_DYNAMIC_PURGE = env.bool("AZURE_CDN_DYNAMIC_PURGE", False) AZURE_STATIC_CDN_RESOURCE_GROUP = env( "AZURE_STATIC_CDN_RESOURCE_GROUP", default=AZURE_CDN_RESOURCE_GROUP ) AZURE_STATIC_CDN_PROFILE_NAME = env( "AZURE_STATIC_CDN_PROFILE_NAME", default=AZURE_CDN_PROFILE_NAME ) AZURE_STATIC_CDN_ENDPOINT_NAME = env( "AZURE_STATIC_CDN_ENDPOINT_NAME", default=AZURE_CDN_ENDPOINT_NAME ) AZURE_CLIENT_ID = env("AZURE_CLIENT_ID", default=None) AZURE_CLIENT_SECRET = env("AZURE_CLIENT_SECRET", default=None) AZURE_TENANT_ID = env("AZURE_TENANT_ID", default=None) AZURE_SUBSCRIPTION_ID = env("AZURE_SUBSCRIPTION_ID", default=None) if AZURE_ACCOUNT_NAME and AZURE_ACCOUNT_KEY and AZURE_CONTAINER: DEFAULT_FILE_STORAGE = "storages.backends.azure_storage.AzureStorage" # URLS # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf ROOT_URLCONF = "config.urls" # https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application WSGI_APPLICATION = "config.wsgi.application" # APPS # ------------------------------------------------------------------------------ ADMIN_APPS = [ # "boundlexx.admin.theme", # "admin_tools", # "admin_tools.theming", # "admin_tools.menu", # "admin_tools.dashboard", "boundlexx.admin.apps.BoundlexxAdminConfig", # "django.contrib.admin", ] DJANGO_APPS = [ "django.contrib.auth", "polymorphic", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.sites", "django.contrib.messages", "django.contrib.staticfiles", "django.contrib.humanize", "django.forms", ] THIRD_PARTY_APPS = [ "huey.contrib.djhuey", "django_celery_beat", "django_celery_results", "rest_framework", "rest_framework.authtoken", "django_json_widget", "corsheaders", "crispy_forms", "robots", ] LOCAL_APPS = [ "boundlexx.users", "boundlexx.celery", "boundlexx.boundless", "boundlexx.api", "boundlexx.notifications", "boundlexx.ingest", ] # https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps INSTALLED_APPS = ADMIN_APPS + DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS if ENABLE_PROMETHEUS: INSTALLED_APPS = ["django_prometheus"] + INSTALLED_APPS # MIGRATIONS # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules MIGRATION_MODULES = {"sites": "boundlexx.contrib.sites.migrations"} # AUTHENTICATION # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends AUTHENTICATION_BACKENDS = [ "django.contrib.auth.backends.ModelBackend", "boundlexx.boundless.backends.BoundlessAuthenticationBackend", ] DEFAULT_AUTO_FIELD = "django.db.models.AutoField" # https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model AUTH_USER_MODEL = "users.User" # https://docs.djangoproject.com/en/dev/ref/settings/#login-url LOGIN_URL = "account_login" # PASSWORDS # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers PASSWORD_HASHERS = [ # https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django "django.contrib.auth.hashers.Argon2PasswordHasher", "django.contrib.auth.hashers.PBKDF2PasswordHasher", "django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher", "django.contrib.auth.hashers.BCryptSHA256PasswordHasher", ] # https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator" # noqa: E501 }, {"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"}, { "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator" # noqa: E501 }, { "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator" # noqa: E501 }, ] # MIDDLEWARE # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#middleware MIDDLEWARE = [ "django.middleware.security.SecurityMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "corsheaders.middleware.CorsMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", # "django.middleware.common.BrokenLinkEmailsMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", ] if ENABLE_PROMETHEUS: MIDDLEWARE = ( ["django_prometheus.middleware.PrometheusBeforeMiddleware"] + MIDDLEWARE + ["django_prometheus.middleware.PrometheusAfterMiddleware"] ) # STATIC # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#static-root STATIC_ROOT = str(ROOT_DIR / "staticfiles") # https://docs.djangoproject.com/en/dev/ref/settings/#static-url STATIC_URL = env("STATIC_URL", default="/static/") # https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS STATICFILES_DIRS = [str(APPS_DIR / "static")] # https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders STATICFILES_FINDERS = [ "django.contrib.staticfiles.finders.FileSystemFinder", "django.contrib.staticfiles.finders.AppDirectoriesFinder", ] # MEDIA # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#media-root MEDIA_ROOT = str(APPS_DIR / "media") # https://docs.djangoproject.com/en/dev/ref/settings/#media-url MEDIA_URL = "/media/" # TEMPLATES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#templates TEMPLATES = [ { # https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND "BACKEND": "django.template.backends.django.DjangoTemplates", # https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs "DIRS": [str(APPS_DIR / "templates")], "OPTIONS": { # https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders # https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types "loaders": [ "django.template.loaders.filesystem.Loader", "django.template.loaders.app_directories.Loader", # "admin_tools.template_loaders.Loader", ], # https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors "context_processors": [ "django.template.context_processors.debug", "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", "django.template.context_processors.i18n", "django.template.context_processors.media", "django.template.context_processors.static", "django.template.context_processors.tz", "django.contrib.messages.context_processors.messages", "boundlexx.utils.context_processors.settings_context", ], }, } ] # https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer FORM_RENDERER = "django.forms.renderers.TemplatesSetting" # FIXTURES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),) # SECURITY # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly SESSION_COOKIE_HTTPONLY = True # https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly CSRF_COOKIE_HTTPONLY = True # https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter SECURE_BROWSER_XSS_FILTER = True # https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options X_FRAME_OPTIONS = "DENY" # EMAIL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#email-backend EMAIL_BACKEND = env( "DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend", ) # https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout EMAIL_TIMEOUT = 5 # ADMIN # ------------------------------------------------------------------------------ # Django Admin URL. ADMIN_URL = "admin/" # https://docs.djangoproject.com/en/dev/ref/settings/#admins ADMINS = [("Angellus", "<EMAIL>")] # https://docs.djangoproject.com/en/dev/ref/settings/#managers MANAGERS = ADMINS CORS_ALLOW_ALL_ORIGINS = True # LOGGING # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#logging # See https://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { "version": 1, "disable_existing_loggers": True, "filters": { "require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}, "require_debug_true": {"()": "django.utils.log.RequireDebugTrue"}, }, "formatters": { "colored": { "()": "coloredlogs.ColoredFormatter", "format": "%(levelname)s %(asctime)s %(name)s %(module)s " "%(process)d %(thread)d %(message)s", # noqa E501 }, "huey_colored": { "()": "coloredlogs.ColoredFormatter", "format": "%(levelname)s %(asctime)s %(name)s %(threadName)s %(message)s", }, }, "handlers": { "console": { "level": "INFO", "class": "logging.StreamHandler", "formatter": "colored", }, "huey": { "level": "INFO", "class": "logging.StreamHandler", "formatter": "huey_colored", }, "django.server": { "level": "INFO", "class": "logging.StreamHandler", "formatter": "colored", }, }, "loggers": { "root": {"level": "INFO", "handlers": ["console"]}, "ingest": {"level": "INFO", "handlers": ["console"]}, "azure": {"level": "WARNING"}, "huey": {"handlers": ["huey"], "propagate": False}, "django": {"handlers": ["console"], "level": "INFO"}, "django.server": { "handlers": ["django.server"], "level": "INFO", "propagate": False, }, }, } # Celery # ------------------------------------------------------------------------------ CELERY_ENABLE_UTC = True CELERY_BROKER_URL = env("CELERY_BROKER_URL") CELERY_RESULT_BACKEND = "django-db" CELERY_RESULT_EXTENDED = True CELERY_RESULT_EXPIRES = timedelta(days=7) CELERY_CACHE_BACKEND = "default" CELERY_ACCEPT_CONTENT = ["json"] CELERY_TASK_SERIALIZER = "json" CELERY_TASK_TRACK_STARTED = True CELERY_RESULT_SERIALIZER = "json" # http://docs.celeryproject.org/en/latest/userguide/configuration.html#beat-scheduler CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler" # Huey # ------------------------------------------------------------------------------ HUEY = huey # Your stuff... # ------------------------------------------------------------------------------ REST_FRAMEWORK = { # Use Django's standard `django.contrib.auth` permissions, # or allow read-only access for unauthenticated users. "DEFAULT_PERMISSION_CLASSES": [ "rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly" ], "DEFAULT_AUTHENTICATION_CLASSES": [ "rest_framework.authentication.SessionAuthentication", "rest_framework.authentication.TokenAuthentication", ], "DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.NamespaceVersioning", # noqa "ALLOWED_VERSIONS": ["v1", "v2"], "DEFAULT_PAGINATION_CLASS": "boundlexx.api.pagination.MaxLimitOffsetPagination", # noqa "DEFAULT_FILTER_BACKENDS": [ "django_filters.rest_framework.DjangoFilterBackend" ], # noqa "PAGE_SIZE": 100, "DEFAULT_RENDERER_CLASSES": [ "rest_framework.renderers.JSONRenderer", "rest_framework_msgpack.renderers.MessagePackRenderer", "rest_framework.renderers.BrowsableAPIRenderer", ], "DEFAULT_THROTTLE_RATES": { "user": "1/minute", "anon": f"{int(env('API_RATE_LIMIT', default=10))}/second", }, "DEFAULT_THROTTLE_CLASSES": [ "rest_framework.throttling.AnonRateThrottle", ], "DEFAULT_CONTENT_NEGOTIATION_CLASS": "boundlexx.api.negotiation.IgnoreClientContentNegotiation", # noqa: E501 } REST_FRAMEWORK_EXTENSIONS = { "DEFAULT_PARENT_LOOKUP_KWARG_NAME_PREFIX": "", } CRISPY_TEMPLATE_PACK = "bootstrap4" SERVE_STATIC_FILES_DEV = env.bool("SERVE_STATIC_FILES_DEV", False) SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_SSL", "on") CACHE_DURATION = 60 * 60 * 24 ROBOTS_USE_SITEMAP = False EMOJI_API_KEY = env("EMOJI_API_KEY", default=None) # base URL for discovery server BOUNDLESS_API_URL_BASE = env( "BOUNDLESS_API_URL_BASE", default="http://host.docker.internal:8950" ) BOUNDLESS_ACCOUNTS_BASE_URL = "https://account.playboundless.com" BOUNDLESS_USERNAMES = env.list("BOUNDLESS_USERNAMES") BOUNDLESS_PASSWORDS = env.list("BOUNDLESS_PASSWORDS", default=[]) BOUNDLESS_DS_REQUIRES_AUTH = env.bool("BOUNDLESS_DS_REQUIRES_AUTH", default=False) # number of seconds between calls to each world BOUNDLESS_API_WORLD_DELAY = float(env("BOUNDLESS_API_WORLD_DELAY", default=1.0)) BOUNDLESS_API_DS_DELAY = float(env("BOUNDLESS_API_DS_DELAY", default=1.0)) BOUNDLESS_LOCATION = "/boundless/" BOUNDLESS_WORLDS_LOCATIONS = "/boundless-worlds/" BOUNDLESS_ICONS_LOCATION = "/boundless-icons/" BOUNDLESS_ICONS_MAPPING = { 11632: "ITEM_CROPDROP_RICE", 11633: "ITEM_CROPDROP_WHEAT", 11634: "ITEM_CROPDROP_OATS", 11641: "ITEM_CROPDROP_NUTS", 11646: "ITEM_CROPDROP_FUEL_ENRICHER", 11645: "ITEM_CROPDROP_FUEL", 11647: "ITEM_CROPDROP_DRIPSTONE", 10703: "DECORATION_HALLOWEEN_BAT", 13718: "DECORATION_HALLOWEEN_SPIDER", 13812: "TOOL_SEASONAL_TOTEM_CANDY", 11635: "FOOD_EARTHYAM_BASE_RAW", 11637: "FOOD_EARTHYAM_EXOTIC_RAW", 11636: "FOOD_EARTHYAM_WAXY_RAW", 11642: "FOOD_STARBERRY_RAW", 11643: "FOOD_STARBERRY_GLOSSY_RAW", 11644: "FOOD_STARBERRY_JUICY_RAW", } BOUNDLESS_DEFAULT_COLORS = { # misc. 33566: 228, # oortmas 13812: 24, 33570: 24, # goo 11647: 228, 32983: 228, 32993: 228, # halloween 10702: 1, 10703: 1, 13717: 1, 13718: 1, # game defined 10850: 28, 9555: 87, 9838: 25, 9839: 82, 9840: 60, 9841: 93, 10775: 93, 10774: 25, 10788: 25, 10789: 93, 10790: 93, 10792: 60, 10793: 93, 10791: 25, 3085: 82, 6157: 87, 13: 108, 10814: 84, 10870: 28, 10842: 208, 10806: 204, 10866: 28, 10846: 82, 10779: 25, 10778: 93, 10781: 93, 10776: 60, 10780: 93, 10777: 82, 10798: 63, 10794: 111, 10802: 102, 10810: 84, 11588: 28, 11592: 82, 11584: 84, 10854: 28, 10858: 28, 10862: 28, 10830: 82, 10822: 222, 10818: 87, 10838: 28, 10834: 82, 10826: 55, } # timeout for making an API request BOUNDLESS_API_TIMEOUT = 5 BOUNDLESS_AUTH_AUTO_CREATE = True # minutes BOUNDLESS_API_KEY = env("BOUNDLESS_API_KEY", default=None) BOUNDLESS_MAX_WORLDS_PER_POLL = int(env("BOUNDLESS_MAX_WORLDS_PER_POLL", default=100)) BOUNDLESS_MAX_PERM_WORLDS_PER_PRICE_POLL = int( env("BOUNDLESS_MAX_PERM_WORLDS_PER_PRICE_POLL", default=10) ) BOUNDLESS_MAX_SOV_WORLDS_PER_PRICE_POLL = int( env("BOUNDLESS_MAX_SOV_WORLDS_PER_PRICE_POLL", default=100) ) BOUNDLESS_MIN_ITEM_DELAY = int(env("BOUNDLESS_MIN_ITEM_DELAY", default=20)) BOUNDLESS_BASE_ITEM_DELAY = int(env("BOUNDLESS_BASE_ITEM_DELAY", default=60)) BOUNDLESS_POPULAR_ITEM_DELAY_OFFSET = int( env("BOUNDLESS_POPULAR_ITEM_DELAY_OFFSET", default=5) ) BOUNDLESS_INACTIVE_ITEM_DELAY_OFFSET = int( env("BOUNDLESS_INACTIVE_ITEM_DELAY_OFFSET", default=30) ) BOUNDLESS_MAX_ITEM_DELAY = int(env("BOUNDLESS_MAX_ITEM_DELAY", default=720)) BOUNDLESS_DEAD_ITEM_MULTIPLIER = int(env("BOUNDLESS_DEAD_ITEM_MULTIPLIER", default=1)) BOUNDLESS_EXO_SEARCH_RADIUS = 10 BOUNDLEXX_WORLD_SEARCH_OFFSET = timedelta(days=60) BOUNDLESS_MAX_WORLD_ID = 5000 BOUNDLESS_MAX_SCAN_CHUNK = 50 BOUNDLESS_EXO_EXPIRED_BASE_ID = 2000000000 BOUNDLESS_FORUM_BAD_TOPICS = [ 28861, 28592, 28593, 38617, 50102, 42678, 50801, 51538, 50754, 51356, 51374, 51239, 51064, ] BOUNDLESS_BLACKLISTED_ITEMS = [ 10649, # dormant meteorite 33102, # coin 11077, # LED1 11081, # SPECIAL_BLINK_LED2 11085, # SPECIAL_BLINK_LED3 11089, # SPECIAL_BLINK_LED4 11093, # SPECIAL_BLINK_LED5 11097, # SPECIAL_BLINK_LED6 11101, # SPECIAL_BLINK_LED7 11105, # SPECIAL_BLINK_LED8 11109, # SPECIAL_BLINK_LED9 11113, # SPECIAL_BLINK_LED10 11117, # SPECIAL_BLINK_LED11 11121, # SPECIAL_BLINK_LED12 11125, # SPECIAL_BLINK_LED13 11129, # SPECIAL_BLINK_LED14 11133, # SPECIAL_BLINK_LED15 11620, # clay tilled 11624, # peaty tilled 11628, # silty tilled 13560, # marble frieze1 13580, # marble frieze2 13600, # marble frieze3 ] BOUNDLESS_NO_SELL = [ 13, # verdant grass block item 3085, # barbed grass block item 6157, # gnarled grass block item ] BOUNDLESS_TESTING_FEATURES = env.bool("BOUNDLESS_TESTING_FEATURES", default=False) BOUNDLESS_FORUM_BASE_URL = env("BOUNDLESS_FORUM_BASE_URL") BOUNDLESS_FORUM_POST_USER = env("BOUNDLESS_FORUM_POST_USER") BOUNDLESS_FORUM_POST_KEY = env("BOUNDLESS_FORUM_POST_KEY") BOUNDLESS_FORUM_NAME_MAPPINGS = { "Ancient": "Ancient Wood Trunk", "Barbed": "Barbed Grass", "Branch Funnel": "Branch Funnel Fungus", "Clay": "Clay Soil", "Clustered Tongue": "Clustered Tongue Fungus", "Exotic": "Exotic Foliage", "Glow Cap": "Glow Cap Fungus", "Gnarled": "Gnarled Grass", "Igneous": "Igneous Rock", "Lush": "Lush Foliage", "Lustrous": "Lustrous Wood Trunk", "Metamorphic": "Metamorphic Rock", "Tar Spot": "Mottled Tar Spot Fungus", "Oortians Staff": "Oortian's Staff", "Peaty": "Peaty Soil", "Silty": "Silty Soil", "Sedimentary": "Sedimentary Rock", "Thorn": "Thorns", "Tinted-Burst": "Tinted-Burst Fungus", "Travellers Perch": "Traveller's Perch", "Verdant": "Verdant Grass", "Weeping Waxcap": "Weeping Waxcap Fungus", } BOUNDLESS_LANGUAGES = [ ("english", "English"), ("french", "French"), ("german", "German"), ("italian", "Italian"), ("spanish", "Spanish"), ] BOUNDLESS_WORLD_LIQUIDS = { "DEFAULT": ("Water", "Lava"), "LUSH": ("Water", "Water"), "METAL": ("Lava", "Lava"), "CHILL": ("Water", "Water"), "BURN": ("Lava", "Lava"), "DARKMATTER": ("Lava", "Water"), } BOUNDLESS_WORLD_TYPE_MAPPING = { 1: "LUSH", 2: "COAL", 3: "METAL", 4: "CORROSIVE", 5: "BURN", 6: "CHILL", 7: "TOXIC", 8: "SHOCK", 9: "BLAST", 10: "RIFT", 11: "BLINK", 12: "DARKMATTER", } BOUNDLESS_TRANSFORMATION_GROUPS = { 10842: [10806], # Ice 10806: [10842], # Glacier 10798: [10794, 10802], # Igneous Rock 10794: [10798, 10802], # Metamorphic Rock 10802: [10798, 10794], # Sedimentary Rock 11588: [11592, 11584], # Clay Soil 11592: [11588, 11584], # Peaty Soil 11584: [11588, 11592],
assert_is_type(positive_response, bool) assert_is_type(seed, int, None) assert_is_type(seed_for_column_types, int, None) check_frame_id(frame_id) if randomize and value: raise H2OValueError("Cannot set data to a `value` if `randomize` is true") if (categorical_fraction or integer_fraction) and not randomize: raise H2OValueError("`randomize` should be True when either categorical or integer columns are used.") # The total column fraction that the user has specified explicitly. This sum should not exceed 1. We will respect # all explicitly set fractions, and will auto-select the remaining fractions. frcs = [real_fraction, categorical_fraction, integer_fraction, binary_fraction, time_fraction, string_fraction] wgts = [0.5, 0.2, 0.2, 0.1, 0.0, 0.0] sum_explicit_fractions = sum(0 if f is None else f for f in frcs) count_explicit_fractions = sum(0 if f is None else 1 for f in frcs) remainder = 1 - sum_explicit_fractions if sum_explicit_fractions >= 1 + 1e-10: raise H2OValueError("Fractions of binary, integer, categorical, time and string columns should add up " "to a number less than 1.") elif sum_explicit_fractions >= 1 - 1e-10: # The fractions already add up to almost 1. No need to do anything (the server will absorb the tiny # remainder into the real_fraction column). pass else: # sum_explicit_fractions < 1 => distribute the remainder among the columns that were not set explicitly if count_explicit_fractions == 6: raise H2OValueError("Fraction of binary, integer, categorical, time and string columns add up to a " "number less than 1.") # Each column type receives a certain part (proportional to column's "weight") of the remaining fraction. sum_implicit_weights = sum(wgts[i] if frcs[i] is None else 0 for i in range(6)) for i, f in enumerate(frcs): if frcs[i] is not None: continue if sum_implicit_weights == 0: frcs[i] = remainder else: frcs[i] = remainder * wgts[i] / sum_implicit_weights remainder -= frcs[i] sum_implicit_weights -= wgts[i] for i, f in enumerate(frcs): if f is None: frcs[i] = 0 real_fraction, categorical_fraction, integer_fraction, binary_fraction, time_fraction, string_fraction = frcs parms = {"dest": frame_id if frame_id else py_tmp_key(append=h2oconn.session_id), "rows": rows, "cols": cols, "randomize": randomize, "categorical_fraction": categorical_fraction, "integer_fraction": integer_fraction, "binary_fraction": binary_fraction, "time_fraction": time_fraction, "string_fraction": string_fraction, # "real_fraction" is not provided, the backend computes it as 1 - sum(5 other fractions) "value": value, "real_range": real_range, "factors": factors, "integer_range": integer_range, "binary_ones_fraction": binary_ones_fraction, "missing_fraction": missing_fraction, "has_response": has_response, "response_factors": response_factors, "positive_response": positive_response, "seed": -1 if seed is None else seed, "seed_for_column_types": -1 if seed_for_column_types is None else seed_for_column_types, } H2OJob(api("POST /3/CreateFrame", data=parms), "Create Frame").poll() return get_frame(parms["dest"]) def interaction(data, factors, pairwise, max_factors, min_occurrence, destination_frame=None): """ Categorical Interaction Feature Creation in H2O. Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by the user. :param data: the H2OFrame that holds the target categorical columns. :param factors: factor columns (either indices or column names). :param pairwise: If True, create pairwise interactions between factors (otherwise create one higher-order interaction). Only applicable if there are 3 or more factors. :param max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra catch-all factor will be made). :param min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms :param destination_frame: a string indicating the destination key. If empty, this will be auto-generated by H2O. :returns: :class:`H2OFrame` :examples: >>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv") >>> iris = iris.cbind(iris[4] == "Iris-setosa") >>> iris[5] = iris[5].asfactor() >>> iris.set_name(5,"C6") >>> iris = iris.cbind(iris[4] == "Iris-virginica") >>> iris[6] = iris[6].asfactor() >>> iris.set_name(6, name="C7") >>> two_way_interactions = h2o.interaction(iris, ... factors=[4,5,6], ... pairwise=True, ... max_factors=10000, ... min_occurrence=1) >>> from h2o.utils.typechecks import assert_is_type >>> assert_is_type(two_way_interactions, H2OFrame) >>> levels1 = two_way_interactions.levels()[0] >>> levels2 = two_way_interactions.levels()[1] >>> levels3 = two_way_interactions.levels()[2] >>> two_way_interactions """ assert_is_type(data, H2OFrame) assert_is_type(factors, [str, int]) assert_is_type(pairwise, bool) assert_is_type(max_factors, int) assert_is_type(min_occurrence, int) assert_is_type(destination_frame, str, None) factors = [data.names[n] if is_type(n, int) else n for n in factors] parms = {"dest": py_tmp_key(append=h2oconn.session_id) if destination_frame is None else destination_frame, "source_frame": data.frame_id, "factor_columns": [quoted(f) for f in factors], "pairwise": pairwise, "max_factors": max_factors, "min_occurrence": min_occurrence, } H2OJob(api("POST /3/Interaction", data=parms), "Interactions").poll() return get_frame(parms["dest"]) def as_list(data, use_pandas=True, header=True): """ Convert an H2O data object into a python-specific object. WARNING! This will pull all data local! If Pandas is available (and use_pandas is True), then pandas will be used to parse the data frame. Otherwise, a list-of-lists populated by character data will be returned (so the types of data will all be str). :param data: an H2O data object. :param use_pandas: If True, try to use pandas for reading in the data. :param header: If True, return column names as first element in list :returns: List of lists (Rows x Columns). :examples: >>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv") >>> from h2o.utils.typechecks import assert_is_type >>> res1 = h2o.as_list(iris, use_pandas=False) >>> assert_is_type(res1, list) >>> res1 = list(zip(*res1)) >>> assert abs(float(res1[0][9]) - 4.4) < 1e-10 and abs(float(res1[1][9]) - 2.9) < 1e-10 and \ ... abs(float(res1[2][9]) - 1.4) < 1e-10, "incorrect values" >>> res1 """ assert_is_type(data, H2OFrame) assert_is_type(use_pandas, bool) assert_is_type(header, bool) return H2OFrame.as_data_frame(data, use_pandas=use_pandas, header=header) def demo(funcname, interactive=True, echo=True, test=False): """ H2O built-in demo facility. :param funcname: A string that identifies the h2o python function to demonstrate. :param interactive: If True, the user will be prompted to continue the demonstration after every segment. :param echo: If True, the python commands that are executed will be displayed. :param test: If True, `h2o.init()` will not be called (used for pyunit testing). :example: >>> import h2o >>> h2o.demo("gbm") """ import h2o.demos as h2odemo assert_is_type(funcname, str) assert_is_type(interactive, bool) assert_is_type(echo, bool) assert_is_type(test, bool) demo_function = getattr(h2odemo, funcname, None) if demo_function and type(demo_function) is type(demo): demo_function(interactive, echo, test) else: print("Demo for %s is not available." % funcname) def load_dataset(relative_path): """Imports a data file within the 'h2o_data' folder. :examples: >>> fr = h2o.load_dataset("iris") """ assert_is_type(relative_path, str) h2o_dir = os.path.split(__file__)[0] for possible_file in [os.path.join(h2o_dir, relative_path), os.path.join(h2o_dir, "h2o_data", relative_path), os.path.join(h2o_dir, "h2o_data", relative_path + ".csv")]: if os.path.exists(possible_file): return upload_file(possible_file) # File not found -- raise an error! raise H2OValueError("Data file %s cannot be found" % relative_path) def make_metrics(predicted, actual, domain=None, distribution=None, weights=None, auc_type="NONE"): """ Create Model Metrics from predicted and actual values in H2O. :param H2OFrame predicted: an H2OFrame containing predictions. :param H2OFrame actuals: an H2OFrame containing actual values. :param domain: list of response factors for classification. :param distribution: distribution for regression. :param H2OFrame weights: an H2OFrame containing observation weights (optional). :param auc_type: auc For multinomial classification you have to specify which type of agregated AUC/AUCPR will be used to calculate this metric. Possibilities are MACRO_OVO, MACRO_OVR, WEIGHTED_OVO, WEIGHTED_OVR, NONE and AUTO (OVO = One vs. One, OVR = One vs. Rest). Default is "NONE" (AUC and AUCPR are not calculated). :examples: >>> fr = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip") >>> fr["CAPSULE"] = fr["CAPSULE"].asfactor() >>> fr["RACE"] = fr["RACE"].asfactor() >>> response = "AGE" >>> predictors = list(set(fr.names) - {"ID", response}) >>> for distr in ["gaussian", "poisson", "laplace", "gamma"]: ... print("distribution: %s" % distr) ... model = H2OGradientBoostingEstimator(distribution=distr, ... ntrees=2, ... max_depth=3, ... min_rows=1, ... learn_rate=0.1, ... nbins=20) ... model.train(x=predictors, ... y=response, ... training_frame=fr) ... predicted = h2o.assign(model.predict(fr), "pred") ... actual = fr[response] ... m0 = model.model_performance(train=True) ... m1 = h2o.make_metrics(predicted, actual, distribution=distr) ... m2 = h2o.make_metrics(predicted, actual) >>> print(m0) >>> print(m1) >>> print(m2) """ assert_is_type(predicted, H2OFrame) assert_is_type(actual, H2OFrame) assert_is_type(weights, H2OFrame, None) assert actual.ncol == 1, "`actual` frame should have exactly 1 column" assert_is_type(distribution, str, None) assert_satisfies(actual.ncol, actual.ncol == 1) assert_is_type(auc_type, str) allowed_auc_types = ["MACRO_OVO", "MACRO_OVR", "WEIGHTED_OVO", "WEIGHTED_OVR", "AUTO", "NONE"] assert auc_type in allowed_auc_types, "auc_type should be "+(" ".join([str(type) for type in allowed_auc_types])) if domain is None and any(actual.isfactor()): domain = actual.levels()[0] params = {"domain": domain, "distribution": distribution} if weights is not None: params["weights_frame"] = weights.frame_id params["auc_type"] = auc_type res = api("POST /3/ModelMetrics/predictions_frame/%s/actuals_frame/%s" % (predicted.frame_id, actual.frame_id), data=params) return res["model_metrics"] def flow(): """ Open H2O Flow in your browser. :examples: >>> python >>> import h2o >>> h2o.init() >>> h2o.flow() """ webbrowser.open(connection().base_url, new = 1) def _put_key(file_path, dest_key=None, overwrite=True): """ Upload given file into DKV and save it under give key as raw object. :param dest_key: name of destination key in DKV :param file_path: path to file to upload :return: key name if
self.datFile.write("%12.5e %12.5e %12.5e %12.5e \n" % (xtmp[1],xtmp[2], vtmp[1]/scale,vtmp[2]/scale)) self.datFile.write("\n \n") cmd = "set term x11 %i; plot \'%s\' index %i with vectors title \"%s\" \n" % (self.windowNumber(), self.datFilename, self.plotNumber(), title+" max=(%s,%s,%s) " % (max_u,max_v,max_w)+" : x-slice") self.cmdFile.write(cmd) self.viewerPipe.write(cmd) newPlot() newWindow() #y slice for ebN in range(vt.ebq_global[ckey].shape[0]): for k in range(vt.ebq_global[ckey].shape[1]): xtmp = vt.ebq_global['x'][ebN,k,:]; vtmp = vt.ebq_global[ckey][ebN,k,:] if abs(xtmp[0]-slice_y) < vt.mesh.h: self.datFile.write("%12.5e %12.5e %12.5e %12.5e \n" % (xtmp[1],xtmp[2], vtmp[1]/scale,vtmp[2]/scale)) self.datFile.write("\n \n") cmd = "set term x11 %i; plot \'%s\' index %i with vectors title \"%s\" \n" % (self.windowNumber(), self.datFilename, self.plotNumber(), title+" max=(%s,%s,%s) " % (max_u,max_v,max_w)+" : y-slice") self.cmdFile.write(cmd) self.viewerPipe.write(cmd) newPlot() newWindow() #z slice for ebN in range(vt.ebq_global[ckey].shape[0]): for k in range(vt.ebq_global[ckey].shape[1]): xtmp = vt.ebq_global['x'][ebN,k,:]; vtmp = vt.ebq_global[ckey][ebN,k,:] if abs(xtmp[0]-slice_z) < vt.mesh.h: self.datFile.write("%12.5e %12.5e %12.5e %12.5e \n" % (xtmp[1],xtmp[2], vtmp[1]/scale,vtmp[2]/scale)) self.datFile.write("\n \n") cmd = "set term x11 %i; plot \'%s\' index %i with vectors title \"%s\" \n" % (self.windowNumber(), self.datFilename, self.plotNumber(), title+" max=(%s,%s,%s) " % (max_u,max_v,max_w)+" : z-slice") self.cmdFile.write(cmd) self.viewerPipe.write(cmd) newPlot() newWindow() #3d #gnuplot elif self.viewerType == 'vtk': title = """ebq_global[%s]""" % (ckey,) if vt.nSpace_global == 1: max_u=max(numpy.absolute(numpy.take(vt.ebq_global[ckey],[0],2).flat)) L = max(vt.mesh.nodeArray[:,0]) scale = 10.*max_u/L if abs(scale) < 1.0e-12: scale = 1.0 npoints = vt.ebq_global['x'].shape[0]*vt.ebq_global['x'].shape[1] xvals = [vt.ebq_global['x'].flat[i*3+0] for i in range(npoints)] yvals = [vt.ebq_global[ckey].flat[i]/scale for i in range(npoints)] vtkViewers.viewScalar_1D(xvals,yvals,"x",ckey[0],title,self.windowNumber(), Pause=self.s.viewerPause,sortPoints=True) newPlot() newWindow() elif vt.nSpace_global == 2: max_u=max(numpy.absolute(numpy.take(vt.ebq_global[ckey],[0],2).flat)) max_v=max(numpy.absolute(numpy.take(vt.ebq_global[ckey],[1],2).flat)) L = min((max(vt.mesh.nodeArray[:,0]),max(vt.mesh.nodeArray[:,1]))) scale =10.0*max((max_u,max_v,1.0e-16))/L if abs(scale) < 1.e-12: scale = 1.0 npoints = vt.ebq_global['x'].shape[0]*vt.ebq_global['x'].shape[1] # x = [vt.ebq_global['x'].flat[i*3+0] for i in range(npoints)] # y = [vt.ebq_global['x'].flat[i*3+1] for i in range(npoints)] # z = [vt.ebq_global['x'].flat[i*3+2] for i in range(npoints)] xvals= [vt.ebq_global[ckey].flat[i*2+0]/scale for i in range(npoints)] yvals= [vt.ebq_global[ckey].flat[i*2+1]/scale for i in range(npoints)] nodes = vt.ebq_global['x'].flat[:] vtkViewers.viewVector_pointSet_2D(nodes,xvals,yvals,None,title,self.windowNumber(), arrows=True,streamlines=False, Pause=self.s.viewerPause) # vtkDisplay2DVectorMeshGeneric(x,y,z,xvals,yvals,None,title,self.windowNumber(), # arrows=True,streamlines=False, # Pause=self.flags['plotOptions']['vtk']['pause']) newPlot() newWindow() elif vt.nSpace_global == 3: max_u=max(numpy.absolute(numpy.take(vt.ebq_global[ckey],[0],2).flat)) max_v=max(numpy.absolute(numpy.take(vt.ebq_global[ckey],[1],2).flat)) max_w=max(numpy.absolute(numpy.take(vt.ebq_global[ckey],[2],2).flat)) L = min((max(vt.mesh.nodeArray[:,0]),max(vt.mesh.nodeArray[:,1]),max(vt.mesh.nodeArray[:,2]))) scale =10.0*max((max_u,max_v,max_w,1.0e-16))/L if abs(scale) < 1.e-12: scale = 1.0 npoints = vt.ebq_global['x'].shape[0]*vt.ebq_global['x'].shape[1] # x = [vt.ebq_global['x'].flat[i*3+0] for i in range(npoints)] # y = [vt.ebq_global['x'].flat[i*3+1] for i in range(npoints)] # z = [vt.ebq_global['x'].flat[i*3+2] for i in range(npoints)] nodes = vt.ebq_global['x'].flat[:] xvals= [vt.ebq_global[ckey].flat[i*3+0]/scale for i in range(npoints)] yvals= [vt.ebq_global[ckey].flat[i*3+1]/scale for i in range(npoints)] zvals= [vt.ebq_global[ckey].flat[i*3+2]/scale for i in range(npoints)] vtkViewers.viewVector_pointSet_3D(nodes,xvals,yvals,zvals,title,self.windowNumber(), arrows=True,streamlines=False, Pause=self.s.viewerPause) # vtkDisplay3DVectorMeshGeneric(x,y,z,xvals,yvals,zvals,title,self.windowNumber(), # arrows=True,streamlines=False, # Pause=self.flags['plotOptions']['vtk']['pause']) newPlot() newWindow() #def def plotVectorElementQuantity(self,ckey,mlvt,tsim,nVectorPlotPointsPerElement=1): """ plotting routine to look at vector quantity stored in global element quad dictionary q input : ckey --- what should be plotted p --- problem definition n --- numerics definition mlvt --- multilevel vector transport that holds the quantities to measure tsim --- simulation time assumes this is the correct time to plot and plotOffSet is set correctly """ from proteusGraphical import vtkViewers p = self.p; n = self.n vt = mlvt.levelModelList[-1] title = """q[%s] t= %s""" % (ckey,tsim) assert vt.q.has_key(ckey) if self.viewerType == 'gnuplot': if vt.nSpace_global == 1: max_u=max(numpy.absolute(numpy.take(vt.q[ckey],[0],2).flat)) L = max(vt.mesh.nodeArray[:,0]) scale = 10.*max_u/L if abs(scale) < 1.0e-12: scale = 1.0 npoints = vt.q['x'].shape[0]*vt.q['x'].shape[1] xandu = [(vt.q['x'].flat[i*3+0],vt.q[ckey].flat[i]) for i in range(npoints)] xandu.sort() for xu in xandu: self.datFile.write("%12.5e %12.5e \n" % (xu[0],xu[1]/scale)) self.datFile.write("\n \n") ptitle = title+" max= %g" % max_u cmd = "set term x11 %i; plot \'%s\' index %i with linespoints title \"%s\" \n" % (self.windowNumber(), self.datFilename, self.plotNumber(), ptitle) self.cmdFile.write(cmd) self.viewerPipe.write(cmd) newPlot() newWindow() elif vt.nSpace_global == 2: max_u=max(numpy.absolute(numpy.take(vt.q[ckey],[0],2).flat)) max_v=max(numpy.absolute(numpy.take(vt.q[ckey],[1],2).flat)) L = min((max(vt.mesh.nodeArray[:,0]),max(vt.mesh.nodeArray[:,1]))) scale =10.0*max((max_u,max_v,1.0e-16))/L if abs(scale) < 1.e-12: scale = 1.0 for eN in range(vt.mesh.nElements_global): #mwf what about just 1 point per element for k in range(vt.nQuadraturePoints_element): for k in range(min(nVectorPlotPointsPerElement,vt.nQuadraturePoints_element)): xtmp = vt.q['x'][eN,k,:]; vtmp = vt.q[ckey][eN,k,:] self.datFile.write("%12.5e %12.5e %12.5e %12.5e \n" % (xtmp[0],xtmp[1], vtmp[0]/scale,vtmp[1]/scale)) self.datFile.write("\n \n") ptitle = title + "max=(%s,%s)" % (max_u,max_v) cmd = "set term x11 %i; plot \'%s\' index %i with vectors title \"%s\" \n" % (self.windowNumber(), self.datFilename, self.plotNumber(), ptitle) self.cmdFile.write(cmd) self.viewerPipe.write(cmd) newPlot() newWindow() elif vt.nSpace_global == 3: (slice_x,slice_y,slice_z) = vt.mesh.nodeArray[vt.mesh.nodeArray.shape[0]/2,:] max_u=max(numpy.absolute(numpy.take(vt.q[ckey],[0],2).flat)) max_v=max(numpy.absolute(numpy.take(vt.q[ckey],[1],2).flat)) max_w=max(numpy.absolute(numpy.take(vt.q[ckey],[2],2).flat)) L = min((max(vt.mesh.nodeArray[:,0]),max(vt.mesh.nodeArray[:,1]), max(vt.mesh.nodeArray[:,1]))) scale = 10.0*max((max_u,max_v,max_w,1.e-16))/L if abs(scale) < 1.e-12: scale = 1.0 for eN in range(vt.mesh.nElements_global): #mwf now try one point per element for k in range(vt.nQuadraturePoints_element): for k in range(min(nVectorPlotPointsPerElement,vt.nQuadraturePoints_element)): xtmp = vt.q['x'][eN,k,:]; vtmp = vt.q[ckey][eN,k,:] if abs(xtmp[0]-slice_x) < vt.mesh.h: self.datFile.write("%12.5e %12.5e %12.5e %12.5e \n" % (xtmp[1],xtmp[2], vtmp[1]/scale,vtmp[2]/scale)) self.datFile.write("\n \n") ptitle = title + " max=(%s,%s,%s) " % (max_u,max_v,max_w)+" : x-slice" cmd = "set term x11 %i; plot \'%s\' index %i with vectors title \"%s\" \n" % (self.windowNumber(), self.datFilename, self.plotNumber(), ptitle) self.cmdFile.write(cmd) self.viewerPipe.write(cmd) newPlot() newWindow() #y slice for eN in range(vt.mesh.nElements_global): #mwf now try one point per element for k in range(vt.nQuadraturePoints_element): for k in range(min(nVectorPlotPointsPerElement,vt.nQuadraturePoints_element)): xtmp = vt.q['x'][eN,k,:]; vtmp = vt.q[ckey][eN,k,:] if abs(xtmp[1]-slice_y) < vt.mesh.h: self.datFile.write("%12.5e %12.5e %12.5e %12.5e \n" % (xtmp[0],xtmp[2], vtmp[0]/scale,vtmp[2]/scale)) self.datFile.write("\n \n") ptitle = title + " max=(%s,%s,%s) " % (max_u,max_v,max_w)+" : y-slice" cmd = "set term x11 %i; plot \'%s\' index %i with vectors title \"%s\" \n" % (self.windowNumber(), self.datFilename, self.plotNumber(), ptitle) self.cmdFile.write(cmd) self.viewerPipe.write(cmd) newPlot() newWindow() #z slice for eN in range(vt.mesh.nElements_global): #mwf now try one point per element for k in range(vt.nQuadraturePoints_element): for k in range(min(nVectorPlotPointsPerElement,vt.nQuadraturePoints_element)): xtmp = vt.q['x'][eN,k,:]; vtmp = vt.q[ckey][eN,k,:] if abs(xtmp[2]-slice_z) < vt.mesh.h: self.datFile.write("%12.5e %12.5e %12.5e %12.5e \n" % (xtmp[0],xtmp[1], vtmp[0]/scale,vtmp[1]/scale)) self.datFile.write("\n \n") ptitle = title + " max=(%s,%s,%s) " % (max_u,max_v,max_w)+" : z-slice" cmd = "set term x11 %i; plot \'%s\' index %i with vectors title \"%s\" \n" % (self.windowNumber(), self.datFilename, self.plotNumber(), ptitle) self.cmdFile.write(cmd) self.viewerPipe.write(cmd) newPlot() newWindow() #end 3d #gnuplot elif self.viewerType == 'matlab': name = ckey[0]; for i in range(len(ckey)-1): name += "_%s" % ckey[1+i] title = "%s t = %g " % (name,tsim) #does not handle window number counting internally writer = MatlabWriter(nxgrid=50,nygrid=50,nzgrid=50) nplotted = writer.viewVectorPointData(self.cmdFile,vt.nSpace_global,vt.q,ckey,name=name, storeMeshData=not self.meshDataStructuresWritten, useLocal=False,#not implemented yed figureNumber =self.windowNumber()+1,title=title) windowNumber += nplotted elif self.viewerType == 'vtk': title = """q[%s]""" % (ckey,) if vt.nSpace_global == 1: max_u=max(numpy.absolute(numpy.take(vt.q[ckey],[0],2).flat)) L = max(vt.mesh.nodeArray[:,0]) scale = 1.0 if abs(scale) < 1.0e-12: scale = 1.0 npoints = vt.q['x'].shape[0]*vt.q['x'].shape[1] xvals = [vt.q['x'].flat[i*3+0] for i in range(npoints)] yvals = [vt.q[ckey].flat[i]/scale for i in range(npoints)] vtkViewers.viewVector_1D(xvals,yvals,"x",ckey[0],title,self.windowNumber(), Pause=self.s.viewerPause) newPlot() newWindow() #1d elif vt.nSpace_global == 2: vtkViewers.viewVector_pointSet_2D(vt.q['x'],vt.q[ckey],title) newPlot() newWindow() elif vt.nSpace_global == 3: vtkViewers.viewVector_pointSet_3D(vt.q['x'],vt.q[ckey],title,self.windowNumber(), Pause=self.s.viewerPause) newPlot() newWindow() #def class MatlabWriter: """ collect functionality for generating visualation data and commands in matlab TODO: C0P2 in 3d DG monomials """ def __init__(self,nxgrid=50,nygrid=50,nzgrid=10,verbose=0): self.verbose = 0 self.ngrid=[nxgrid,nygrid,nzgrid] #default grid size if converting to regular mesh def storePointMeshData(self,cmdFile,x,name): """ write out spatial locations for generic point data """ cmdFile.write("%s_x_q = [ ... \n" % name) for eN in range(x.shape[0]): for k in range(x.shape[1]): cmdFile.write("%g %g %g \n" % (x[eN,k,0],x[eN,k,1],x[eN,k,2])) cmdFile.write("];") # def viewScalarPointData(self,cmdFile,nSpace,q,ckey,name=None, storeMeshData=True,useLocal=True, figureNumber=1,title=None): """ wrapper for visualling element quadrature points, can try to use a local representation or build a global one depending on useLocal. If useLocal and nPoints_elemnet < nSpace+1 calls global routine """ if not useLocal: return self.viewGlobalScalarPointData(cmdFile,nSpace,q,ckey,name=name, storeMeshData=storeMeshData, figureNumber=figureNumber,title=title) nPoints_element = q['x'].shape[1] if nPoints_element <= nSpace: print """ Warning! viewScalarPointData nPoints_element=%s < %s too small for useLocal, using global interp""" % (nPoints_element, nSpace+1) return self.viewGlobalScalarPointData(cmdFile,nSpace,q,ckey,name=name, storeMeshData=storeMeshData, figureNumber=figureNumber,title=title) return self.viewLocalScalarPointData(cmdFile,nSpace,q,ckey,name=name, storeMeshData=storeMeshData, figureNumber=figureNumber,title=title) def viewVectorPointData(self,cmdFile,nSpace,q,ckey,name=None, storeMeshData=True,useLocal=True, figureNumber=1,title=None): """ wrapper for visualling element quadrature points, can try to use a local representation or build a global one depending on useLocal. TODO: implement local view for vectors If useLocal and nPoints_elemnet < nSpace+1 calls global routine """ if not useLocal: return self.viewGlobalVectorPointData(cmdFile,nSpace,q,ckey,name=name, storeMeshData=storeMeshData, figureNumber=figureNumber,title=title) else: print "viewLocalVectorPointData not implemented, using global!" return self.viewGlobalVectorPointData(cmdFile,nSpace,q,ckey,name=name, storeMeshData=storeMeshData, figureNumber=figureNumber,title=title) # nPoints_element = q['x'].shape[1] # if nPoints_element <= nSpace: # print """ # Warning! viewScalarPointData nPoints_element=%s < %s too small for useLocal, using global interp""" % (nPoints_element, # nSpace+1) # return self.viewGlobalScalarPointData(cmdFile,nSpace,q,ckey,name=name, # storeMeshData=storeMeshData, # figureNumber=figureNumber,title=title) # return self.viewLocalScalarPointData(cmdFile,nSpace,q,ckey,name=name, # storeMeshData=storeMeshData, # figureNumber=figureNumber,title=title) def viewGlobalScalarPointData(self,cmdFile,nSpace,q,ckey,name=None, storeMeshData=True,figureNumber=1,title=None): """ input scalar variable and coordinates stored in dictionary q['x'], q[ckey] respectively, generate global continuous interpolant should work for q, ebq_global, and ebqe quadrature dictionaries uses delaunay triangulation in 2d and 3d scalar data is stored in name_q if storeMeshData = True, writes out name_x_q -- point data tri_name_q -- Delaunay representation (2d,3d) returns number of figures actually plotted """ nplotted = 0 ###simple visualization commands (%s --> name) #1d cmd1dData = """ [x_tmp,i_tmp] = sort(%s_x_q(:,1)); %s_x_q = %s_x_q(i_tmp); %s_q = %s_q(i_tmp); """ cmd1dView = """ figure(%i) ;
# -*- coding: utf-8 -*- import os import time import unittest import inspect from mock import patch import requests from configparser import ConfigParser from kb_Amplicon.kb_AmpliconImpl import kb_Amplicon from kb_Amplicon.kb_AmpliconServer import MethodContext from installed_clients.authclient import KBaseAuth as _KBaseAuth from kb_Amplicon.Utils.MDSUtils import MDSUtils from installed_clients.DataFileUtilClient import DataFileUtil from installed_clients.WorkspaceClient import Workspace from installed_clients.AbstractHandleClient import AbstractHandle as HandleService class kb_AmpliconTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.token = os.environ.get('KB_AUTH_TOKEN', None) config_file = os.environ.get('KB_DEPLOYMENT_CONFIG', None) cls.cfg = {} config = ConfigParser() config.read(config_file) for nameval in config.items('kb_Amplicon'): cls.cfg[nameval[0]] = nameval[1] # Getting username from Auth profile for token auth_service_url = cls.cfg['auth-service-url'] auth_client = _KBaseAuth(auth_service_url) user_id = auth_client.get_user(cls.token) # WARNING: don't call any logging methods on the context object, # it'll result in a NoneType error cls.ctx = MethodContext(None) cls.ctx.update({'token': cls.token, 'user_id': user_id, 'provenance': [ {'service': 'kb_Amplicon', 'method': 'please_never_use_it_in_production', 'method_params': [] }], 'authenticated': 1}) cls.wsURL = cls.cfg['workspace-url'] cls.wsClient = Workspace(cls.wsURL) cls.shockURL = cls.cfg['shock-url'] cls.serviceImpl = kb_Amplicon(cls.cfg) cls.scratch = cls.cfg['scratch'] cls.callback_url = os.environ['SDK_CALLBACK_URL'] cls.dfu = DataFileUtil(cls.callback_url) cls.mds_util = MDSUtils(cls.cfg) cls.hs = HandleService(url=cls.cfg['handle-service-url'], token=cls.token) suffix = int(time.time() * 1000) cls.wsName = "test_kb_Amplicon_" + str(suffix) ret = cls.wsClient.create_workspace({'workspace': cls.wsName}) cls.wsId = ret[0] small_file = os.path.join(cls.scratch, 'test.txt') with open(small_file, "w") as f: f.write("empty content") cls.test_shock = cls.dfu.file_to_shock({'file_path': small_file, 'make_handle': True}) cls.handles_to_delete = [] cls.nodes_to_delete = [] cls.handles_to_delete.append(cls.test_shock['handle']['hid']) cls.nodes_to_delete.append(cls.test_shock['shock_id']) @classmethod def tearDownClass(cls): if hasattr(cls, 'wsName'): cls.wsClient.delete_workspace({'workspace': cls.wsName}) print('Test workspace was deleted') if hasattr(cls, 'nodes_to_delete'): for node in cls.nodes_to_delete: cls.delete_shock_node(node) if hasattr(cls, 'handles_to_delete'): cls.hs.delete_handles(cls.hs.hids_to_handles(cls.handles_to_delete)) print('Deleted handles ' + str(cls.handles_to_delete)) @classmethod def delete_shock_node(cls, node_id): header = {'Authorization': 'Oauth {0}'.format(cls.token)} requests.delete(cls.shockURL + '/node/' + node_id, headers=header, allow_redirects=True) print('Deleted shock node ' + node_id) def getMDSUtil(self): return self.__class__.mds_util def mock_file_to_shock(params): print('Mocking DataFileUtilClient.file_to_shock') print(params) return kb_AmpliconTest().test_shock def loadExpressionMatrix(self): if hasattr(self.__class__, 'expr_matrix_ref'): return self.__class__.expr_matrix_ref # matrix_file_name = 'test_import.xlsx' col_attribute = {'attributes': [{'attribute': 'test_attribute_1', 'attribute_ont_id': 'OBI_0500020', 'source': 'upload', 'unit': 'Hour', 'unit_ont_id': 'UO_0000032'}, {'attribute': 'test_attribute_2', 'attribute_ont_id': 'CHEBI:9168', 'source': 'upload', 'unit': 'nanogram per milliliter', 'unit_ont_id': 'UO_0000275'}, {'attribute': 'test_attribute_3', 'attribute_ont_id': 'CHEBI:9168', 'source': 'upload', 'unit': 'nanogram per milliliter', 'unit_ont_id': 'UO_0000275'}], 'instances': {'instance_1': ['1', '5', '9'], 'instance_2': ['2', '6', '10'], 'instance_3': ['3', '7', '11'], 'instance_4': ['4', '8', '12']}, 'ontology_mapping_method': 'User Curation'} info = self.dfu.save_objects({ 'id': self.wsId, 'objects': [{'type': 'KBaseExperiments.AttributeMapping', 'data': col_attribute, 'name': 'test_ExpressionMatrix_col_attribute_mapping'}]})[0] col_attributemapping_ref = "%s/%s/%s" % (info[6], info[0], info[4]) self.__class__.col_attributemapping_ref = col_attributemapping_ref print('Loaded Col AttributeMapping: ' + col_attributemapping_ref) row_attribute = {'attributes': [{'attribute': 'test_attribute_1', 'attribute_ont_id': 'OBI_0500020', 'source': 'upload', 'unit': 'Hour', 'unit_ont_id': 'UO_0000032'}, {'attribute': 'test_attribute_2', 'attribute_ont_id': 'CHEBI:9168', 'source': 'upload', 'unit': 'nanogram per milliliter', 'unit_ont_id': 'UO_0000275'}, {'attribute': 'test_attribute_3', 'attribute_ont_id': 'CHEBI:9168', 'source': 'upload', 'unit': 'nanogram per milliliter', 'unit_ont_id': 'UO_0000275'}], 'instances': {'WRI_RS00010_CDS_1': ['1', '4', '7'], 'WRI_RS00015_CDS_1': ['3', '4', '8'], 'WRI_RS00025_CDS_1': ['3', '6', '7'], 'WRI_RS00030_CDS_1': ['3', '6', '7'], 'WRI_RS00035_CDS_1': ['3', '6', '7']}, 'ontology_mapping_method': 'User Curation'} info = self.dfu.save_objects({ 'id': self.wsId, 'objects': [{'type': 'KBaseExperiments.AttributeMapping', 'data': row_attribute, 'name': 'test_ExpressionMatrix_row_attribute_mapping'}]})[0] row_attributemapping_ref = "%s/%s/%s" % (info[6], info[0], info[4]) self.__class__.row_attributemapping_ref = row_attributemapping_ref print('Loaded Row AttributeMapping: ' + row_attributemapping_ref) matrix_data = {'attributes': {'Instrument': 'Old Faithful', 'Scientist': '<NAME>'}, 'col_attributemapping_ref': col_attributemapping_ref, 'col_mapping': {'instance_1': 'instance_1', 'instance_2': 'instance_2', 'instance_3': 'instance_3', 'instance_4': 'instance_4'}, 'col_normalization': 'test_col_normalization', 'data': {'col_ids': ['instance_1', 'instance_2', 'instance_3', 'instance_4'], 'row_ids': ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1', 'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1'], 'values': [[1, 2, 3, 4], [50, 60, 70, 80], [9, 10, 11, 12], [9, 10, 11, 12], [9, 10, 11, 12]]}, 'description': 'test_desc', 'row_attributemapping_ref': row_attributemapping_ref, 'row_mapping': {'WRI_RS00010_CDS_1': 'WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1': 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1': 'WRI_RS00025_CDS_1', 'WRI_RS00030_CDS_1': 'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1': 'WRI_RS00035_CDS_1'}, 'row_normalization': 'test_row_normalization', 'scale': 'log2', 'search_attributes': ['Scientist | <NAME>', 'Instrument | Old Faithful']} info = self.dfu.save_objects({'id': self.wsId, 'objects': [{'type': 'KBaseMatrices.ExpressionMatrix', 'data': matrix_data, 'name': 'test_ExpressionMatrix'}]})[0] expr_matrix_ref = "%s/%s/%s" % (info[6], info[0], info[4]) self.__class__.expr_matrix_ref = expr_matrix_ref print('Loaded ExpressionMatrix: ' + expr_matrix_ref) # load associated matrix matrix_data = {'attributes': {'Instrument': 'Old Faithful', 'Scientist': '<NAME>'}, 'col_attributemapping_ref': col_attributemapping_ref, 'col_mapping': {'instance_1': 'instance_1', 'instance_2': 'instance_2', 'instance_3': 'instance_3', 'instance_4': 'instance_4'}, 'col_normalization': 'test_col_normalization', 'data': {'col_ids': ['instance_1', 'instance_2', 'instance_3', 'instance_4'], 'row_ids': ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1', 'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1'], 'values': [[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8], [0.9, 1, 1.1, 1.2], [0.9, 1, 1.1, 1.2], [0.9, 1, 1.1, 1.2]]}, 'description': 'test_desc', 'row_attributemapping_ref': row_attributemapping_ref, 'row_mapping': {'WRI_RS00010_CDS_1': 'WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1': 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1': 'WRI_RS00025_CDS_1', 'WRI_RS00030_CDS_1': 'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1': 'WRI_RS00035_CDS_1'}, 'row_normalization': 'test_row_normalization', 'scale': 'log2', 'search_attributes': ['Scientist | <NAME>', 'Instrument | Old Faithful']} info = self.dfu.save_objects({ 'id': self.wsId, 'objects': [{'type': 'KBaseMatrices.ExpressionMatrix', 'data': matrix_data, 'name': 'test_associated_ExpressionMatrix'}]})[0] asso_matrix_ref = "%s/%s/%s" % (info[6], info[0], info[4]) self.__class__.asso_matrix_ref = asso_matrix_ref print('Loaded Associated ExpressionMatrix: ' + asso_matrix_ref) def start_test(self): testname = inspect.stack()[1][3] print('\n*** starting test: ' + testname + ' **') def test_init_ok(self): self.start_test() class_attri = ['ws_url', 'callback_url', 'token', 'scratch', 'dfu', 'working_dir', 'output_dir'] mds_util = self.getMDSUtil() self.assertTrue(set(class_attri) <= set(mds_util.__dict__.keys())) self.assertEqual(mds_util.scratch, self.cfg.get('scratch')) @patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock) def test_run_metaMDS_scale_by_attri_plot_associated_matrix_without_color(self, file_to_shock): self.start_test() self.loadExpressionMatrix() # testing col dimension with linked matrix params = {'workspace_name': self.wsName, 'input_obj_ref': self.expr_matrix_ref, 'n_components': 3, 'max_iter': 20, 'plot_script': 'plot(my_data.mds,type="t",display="sites")', 'plot_type': 'ps', 'plot_name': '', 'attribute_mapping_obj_ref': self.col_attributemapping_ref, 'associated_matrix_obj_ref': self.asso_matrix_ref, 'scale_size_by': {'attribute_size': ["test_attribute_1"]}, # 'color_marker_by': {'attribute_color': ['test_attribute_2']}, 'mds_matrix_name': 'output_mds_from_obj', 'dimension': 'col'} ret = self.serviceImpl.run_metaMDS(self.ctx, params)[0] self.assertTrue('report_name' in ret) self.assertTrue('report_ref' in ret) self.assertTrue('mds_ref' in ret) pca_matrix_ref = ret.get('mds_ref') pca_data = self.dfu.get_objects({"object_refs": [pca_matrix_ref]})['data'][0]['data'] expected_values = ['distance_matrix', 'mds_parameters', 'original_matrix_ref', 'rotation_matrix', 'site_ordination', 'species_ordination'] self.assertTrue(set(expected_values) <= set(pca_data.keys())) expected_row_ids = ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1', 'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1'] expected_col_ids = ['instance_1', 'instance_2', 'instance_3', 'instance_4'] result_row_ids = [value[0] for value in pca_data.get('species_ordination').get('values')] result_col_ids = [value[0] for value in pca_data.get('site_ordination').get('values')] self.assertCountEqual(result_row_ids, expected_row_ids) self.assertCountEqual(result_col_ids, expected_col_ids) mds_dir = '/kb/module/work/tmp/mds_output' expected_files = ['dist_matrix.csv', 'mds_script.R', 'others.json', 'plotly_fig.html', 'site_ordination.csv', 'species_ordination.csv', 'test_ExpressionMatrix.csv', 'usr_plt_name.ps'] self.assertTrue(set(expected_files) <= set(os.listdir(mds_dir))) @patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock) def test_run_metaMDS_scale_by_attri_plot_associated_matrix(self, file_to_shock): self.start_test() self.loadExpressionMatrix() # testing col dimension with linked matrix params = {'workspace_name': self.wsName, 'input_obj_ref': self.expr_matrix_ref, 'n_components': 3, 'max_iter': 20, 'plot_script': 'plot(my_data.mds,type="t",display="sites")', 'plot_type': 'ps', 'plot_name': '', 'attribute_mapping_obj_ref': self.col_attributemapping_ref, 'associated_matrix_obj_ref': self.asso_matrix_ref, 'scale_size_by': {'attribute_size': ["test_attribute_1"]}, 'color_marker_by': {'attribute_color': ['test_attribute_2']}, 'mds_matrix_name': 'output_mds_from_obj', 'dimension': 'col'} ret = self.serviceImpl.run_metaMDS(self.ctx, params)[0] self.assertTrue('report_name' in ret) self.assertTrue('report_ref' in ret) self.assertTrue('mds_ref' in ret) pca_matrix_ref = ret.get('mds_ref') pca_data = self.dfu.get_objects({"object_refs": [pca_matrix_ref]})['data'][0]['data'] expected_values = ['distance_matrix', 'mds_parameters', 'original_matrix_ref', 'rotation_matrix', 'site_ordination', 'species_ordination'] self.assertTrue(set(expected_values) <= set(pca_data.keys())) expected_row_ids = ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1', 'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1'] expected_col_ids = ['instance_1', 'instance_2', 'instance_3', 'instance_4'] result_row_ids = [value[0] for value in pca_data.get('species_ordination').get('values')] result_col_ids = [value[0] for value in pca_data.get('site_ordination').get('values')] self.assertCountEqual(result_row_ids, expected_row_ids) self.assertCountEqual(result_col_ids, expected_col_ids) mds_dir = '/kb/module/work/tmp/mds_output' expected_files = ['dist_matrix.csv', 'mds_script.R', 'others.json', 'plotly_fig.html', 'site_ordination.csv', 'species_ordination.csv', 'test_ExpressionMatrix.csv', 'usr_plt_name.ps'] self.assertTrue(set(expected_files) <= set(os.listdir(mds_dir))) @patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock) def test_run_metaMDS_with_linked_matrix_ok(self, file_to_shock): self.start_test() self.loadExpressionMatrix() # testing col dimension with linked matrix params = {'workspace_name': self.wsName, 'input_obj_ref': self.expr_matrix_ref, 'n_components': 3, 'max_iter': 20, 'plot_script': 'plot(my_data.mds,type="t",display="sites")', 'plot_type': 'ps', 'plot_name': '', 'attribute_mapping_obj_ref': self.col_attributemapping_ref, 'associated_matrix_obj_ref': self.asso_matrix_ref, 'scale_size_by': {'row_size': ['WRI_RS00010_CDS_1']}, 'color_marker_by': {'attribute_color': ['test_attribute_2']}, 'mds_matrix_name': 'output_mds_from_obj', 'dimension': 'col'} ret = self.serviceImpl.run_metaMDS(self.ctx, params)[0] self.assertTrue('report_name' in ret) self.assertTrue('report_ref' in ret) self.assertTrue('mds_ref' in ret) pca_matrix_ref = ret.get('mds_ref') pca_data = self.dfu.get_objects({"object_refs": [pca_matrix_ref]})['data'][0]['data'] expected_values = ['distance_matrix', 'mds_parameters', 'original_matrix_ref', 'rotation_matrix', 'site_ordination', 'species_ordination'] self.assertTrue(set(expected_values) <= set(pca_data.keys())) expected_row_ids = ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1', 'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1'] expected_col_ids = ['instance_1', 'instance_2', 'instance_3', 'instance_4'] result_row_ids = [value[0] for value in pca_data.get('species_ordination').get('values')] result_col_ids = [value[0] for value in pca_data.get('site_ordination').get('values')] self.assertCountEqual(result_row_ids, expected_row_ids) self.assertCountEqual(result_col_ids, expected_col_ids) mds_dir = '/kb/module/work/tmp/mds_output' expected_files = ['dist_matrix.csv', 'mds_script.R', 'others.json', 'plotly_fig.html', 'site_ordination.csv', 'species_ordination.csv', 'test_ExpressionMatrix.csv', 'usr_plt_name.ps'] self.assertTrue(set(expected_files) <= set(os.listdir(mds_dir))) @patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock) def test_run_metaMDS_with_row_linked_matrix_ok(self, file_to_shock): self.start_test() self.loadExpressionMatrix() # testing row dimension with linked matrix params = {'workspace_name': self.wsName, 'input_obj_ref': self.expr_matrix_ref, 'n_components': 3, 'max_iter': 20, 'plot_script': 'plot(my_data.mds,type="t",display="sites")', 'plot_type': 'ps', 'plot_name': '', 'attribute_mapping_obj_ref': self.row_attributemapping_ref, 'associated_matrix_obj_ref': self.asso_matrix_ref, 'scale_size_by': {'col_size': ['instance_2']}, 'color_marker_by': {'attribute_color': ['test_attribute_2']}, 'mds_matrix_name': 'output_mds_from_obj', 'dimension': 'row'} ret = self.serviceImpl.run_metaMDS(self.ctx, params)[0] self.assertTrue('report_name' in ret) self.assertTrue('report_ref' in ret) self.assertTrue('mds_ref' in ret) pca_matrix_ref = ret.get('mds_ref') pca_data = self.dfu.get_objects({"object_refs": [pca_matrix_ref]})['data'][0]['data'] expected_values = ['distance_matrix', 'mds_parameters', 'original_matrix_ref', 'rotation_matrix', 'site_ordination', 'species_ordination'] self.assertTrue(set(expected_values) <= set(pca_data.keys())) expected_row_ids = ['instance_1', 'instance_2', 'instance_3', 'instance_4'] expected_col_ids = ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1', 'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1'] result_row_ids = [value[0] for value in pca_data.get('species_ordination').get('values')] result_col_ids = [value[0] for value in pca_data.get('site_ordination').get('values')] self.assertCountEqual(result_row_ids, expected_row_ids) self.assertCountEqual(result_col_ids, expected_col_ids) mds_dir = '/kb/module/work/tmp/mds_output' expected_files = ['dist_matrix.csv', 'mds_script.R', 'others.json', 'plotly_fig.html', 'site_ordination.csv', 'species_ordination.csv', 'test_ExpressionMatrix.csv', 'usr_plt_name.ps'] self.assertTrue(set(expected_files) <= set(os.listdir(mds_dir))) @patch.object(DataFileUtil, "file_to_shock", side_effect=mock_file_to_shock) def test_run_metaMDS_with_linked_matrix_ok_only_scale_size(self, file_to_shock): self.start_test() self.loadExpressionMatrix() # testing only scale_size_by with linked matrix params = {'workspace_name': self.wsName, 'input_obj_ref': self.expr_matrix_ref, 'n_components': 3, 'max_iter': 20, 'plot_script': 'plot(my_data.mds,type="t",display="sites")', 'plot_type': 'ps', 'plot_name': '', 'associated_matrix_obj_ref': self.asso_matrix_ref, 'scale_size_by': {'row_size': ['WRI_RS00010_CDS_1']}, 'mds_matrix_name': 'output_mds_from_obj', 'dimension': 'col'} ret = self.serviceImpl.run_metaMDS(self.ctx, params)[0] self.assertTrue('report_name' in ret) self.assertTrue('report_ref' in ret) self.assertTrue('mds_ref' in ret) pca_matrix_ref = ret.get('mds_ref') pca_data = self.dfu.get_objects({"object_refs": [pca_matrix_ref]})['data'][0]['data'] expected_values = ['distance_matrix', 'mds_parameters', 'original_matrix_ref', 'rotation_matrix', 'site_ordination', 'species_ordination'] self.assertTrue(set(expected_values) <= set(pca_data.keys())) expected_row_ids = ['WRI_RS00010_CDS_1', 'WRI_RS00015_CDS_1', 'WRI_RS00025_CDS_1', 'WRI_RS00030_CDS_1', 'WRI_RS00035_CDS_1'] expected_col_ids = ['instance_1', 'instance_2', 'instance_3', 'instance_4'] result_row_ids = [value[0] for value in pca_data.get('species_ordination').get('values')] result_col_ids = [value[0] for value
"bleach_scan_number", 0x40000014: "trigger_in", 0x40000015: "trigger_out", 0x40000016: "is_ratio_track", 0x40000017: "bleach_count", 0x40000018: "spi_center_wavelength", 0x40000019: "pixel_time", 0x40000021: "condensor_frontlens", 0x40000023: "field_stop_value", 0x40000024: "id_condensor_aperture", 0x40000025: "condensor_aperture", 0x40000026: "id_condensor_revolver", 0x40000027: "condensor_filter", 0x40000028: "id_transmission_filter1", 0x40000029: "id_transmission1", 0x40000030: "id_transmission_filter2", 0x40000031: "id_transmission2", 0x40000032: "repeat_bleach", 0x40000033: "enable_spot_bleach_pos", 0x40000034: "spot_bleach_posx", 0x40000035: "spot_bleach_posy", 0x40000036: "spot_bleach_posz", 0x40000037: "id_tubelens", 0x40000038: "id_tubelens_position", 0x40000039: "transmitted_light", 0x4000003a: "reflected_light", 0x4000003b: "simultan_grab_and_bleach", 0x4000003c: "bleach_pixel_time", # detection_channels 0x70000001: "integration_mode", 0x70000002: "special_mode", 0x70000003: "detector_gain_first", 0x70000004: "detector_gain_last", 0x70000005: "amplifier_gain_first", 0x70000006: "amplifier_gain_last", 0x70000007: "amplifier_offs_first", 0x70000008: "amplifier_offs_last", 0x70000009: "pinhole_diameter", 0x7000000a: "counting_trigger", 0x7000000b: "acquire", 0x7000000c: "point_detector_name", 0x7000000d: "amplifier_name", 0x7000000e: "pinhole_name", 0x7000000f: "filter_set_name", 0x70000010: "filter_name", 0x70000013: "integrator_name", 0x70000014: "detection_channel_name", 0x70000015: "detection_detector_gain_bc1", 0x70000016: "detection_detector_gain_bc2", 0x70000017: "detection_amplifier_gain_bc1", 0x70000018: "detection_amplifier_gain_bc2", 0x70000019: "detection_amplifier_offset_bc1", 0x70000020: "detection_amplifier_offset_bc2", 0x70000021: "detection_spectral_scan_channels", 0x70000022: "detection_spi_wavelength_start", 0x70000023: "detection_spi_wavelength_stop", 0x70000026: "detection_dye_name", 0x70000027: "detection_dye_folder", # illumination_channels 0x90000001: "name", 0x90000002: "power", 0x90000003: "wavelength", 0x90000004: "aquire", 0x90000005: "detchannel_name", 0x90000006: "power_bc1", 0x90000007: "power_bc2", # beam_splitters 0xb0000001: "filter_set", 0xb0000002: "filter", 0xb0000003: "name", # data_channels 0xd0000001: "name", 0xd0000003: "acquire", 0xd0000004: "color", 0xd0000005: "sample_type", 0xd0000006: "bits_per_sample", 0xd0000007: "ratio_type", 0xd0000008: "ratio_track1", 0xd0000009: "ratio_track2", 0xd000000a: "ratio_channel1", 0xd000000b: "ratio_channel2", 0xd000000c: "ratio_const1", 0xd000000d: "ratio_const2", 0xd000000e: "ratio_const3", 0xd000000f: "ratio_const4", 0xd0000010: "ratio_const5", 0xd0000011: "ratio_const6", 0xd0000012: "ratio_first_images1", 0xd0000013: "ratio_first_images2", 0xd0000014: "dye_name", 0xd0000015: "dye_folder", 0xd0000016: "spectrum", 0xd0000017: "acquire", # markers 0x14000001: "name", 0x14000002: "description", 0x14000003: "trigger_in", 0x14000004: "trigger_out", # timers 0x12000001: "name", 0x12000002: "description", 0x12000003: "interval", 0x12000004: "trigger_in", 0x12000005: "trigger_out", 0x12000006: "activation_time", 0x12000007: "activation_number", } # Map TIFF tag code to attribute name, default value, type, count, validator TIFF_TAGS = { 254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()), 255: ('subfile_type', None, 3, 1, {0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}), 256: ('image_width', None, 4, 1, None), 257: ('image_length', None, 4, 1, None), 258: ('bits_per_sample', 1, 3, 1, None), 259: ('compression', 1, 3, 1, TIFF_COMPESSIONS), 262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS), 266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}), 269: ('document_name', None, 2, None, None), 270: ('image_description', None, 2, None, None), 271: ('make', None, 2, None, None), 272: ('model', None, 2, None, None), 273: ('strip_offsets', None, 4, None, None), 274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS), 277: ('samples_per_pixel', 1, 3, 1, None), 278: ('rows_per_strip', 2**32-1, 4, 1, None), 279: ('strip_byte_counts', None, 4, None, None), 280: ('min_sample_value', None, 3, None, None), 281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample 282: ('x_resolution', None, 5, 1, None), 283: ('y_resolution', None, 5, 1, None), 284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}), 285: ('page_name', None, 2, None, None), 286: ('x_position', None, 5, 1, None), 287: ('y_position', None, 5, 1, None), 296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}), 297: ('page_number', None, 3, 2, None), 305: ('software', None, 2, None, None), 306: ('datetime', None, 2, None, None), 315: ('artist', None, 2, None, None), 316: ('host_computer', None, 2, None, None), 317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}), 320: ('color_map', None, 3, None, None), 322: ('tile_width', None, 4, 1, None), 323: ('tile_length', None, 4, 1, None), 324: ('tile_offsets', None, 4, None, None), 325: ('tile_byte_counts', None, 4, None, None), 338: ('extra_samples', None, 3, None, {0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}), 339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS), 347: ('jpeg_tables', None, None, None, None), 530: ('ycbcr_subsampling', 1, 3, 2, None), 531: ('ycbcr_positioning', 1, 3, 1, None), 32997: ('image_depth', None, 4, 1, None), 32998: ('tile_depth', None, 4, 1, None), 33432: ('copyright', None, 1, None, None), 33445: ('md_file_tag', None, 4, 1, None), 33446: ('md_scale_pixel', None, 5, 1, None), 33447: ('md_color_table', None, 3, None, None), 33448: ('md_lab_name', None, 2, None, None), 33449: ('md_sample_info', None, 2, None, None), 33450: ('md_prep_date', None, 2, None, None), 33451: ('md_prep_time', None, 2, None, None), 33452: ('md_file_units', None, 2, None, None), 33550: ('model_pixel_scale', None, 12, 3, None), 33922: ('model_tie_point', None, 12, None, None), 37510: ('user_comment', None, None, None, None), 34665: ('exif_ifd', None, None, 1, None), 34735: ('geo_key_directory', None, 3, None, None), 34736: ('geo_double_params', None, 12, None, None), 34737: ('geo_ascii_params', None, 2, None, None), 34853: ('gps_ifd', None, None, 1, None), 42112: ('gdal_metadata', None, 2, None, None), 42113: ('gdal_nodata', None, 2, None, None), 50838: ('imagej_byte_counts', None, None, None, None), 50289: ('mc_xy_position', None, 12, 2, None), 50290: ('mc_z_position', None, 12, 1, None), 50291: ('mc_xy_calibration', None, 12, 3, None), 50292: ('mc_lens_lem_na_n', None, 12, 3, None), 50293: ('mc_channel_name', None, 1, None, None), 50294: ('mc_ex_wavelength', None, 12, 1, None), 50295: ('mc_time_stamp', None, 12, 1, None), 65200: ('flex_xml', None, 2, None, None), # code: (attribute name, default value, type, count, validator) } # Map custom TIFF tag codes to attribute names and import functions CUSTOM_TAGS = { 700: ('xmp', read_bytes), 34377: ('photoshop', read_numpy), 33723: ('iptc', read_bytes), 34675: ('icc_profile', read_numpy), 33628: ('mm_uic1', read_mm_uic1), 33629: ('mm_uic2', read_mm_uic2), 33630: ('mm_uic3', read_mm_uic3), 33631: ('mm_uic4', read_mm_uic4), 34361: ('mm_header', read_mm_header), 34362: ('mm_stamp', read_mm_stamp), 34386: ('mm_user_block', read_bytes), 34412: ('cz_lsm_info', read_cz_lsm_info), 43314: ('nih_image_header', read_nih_image_header), # 40001: ('mc_ipwinscal', read_bytes), 40100: ('mc_id_old', read_bytes), 50288: ('mc_id', read_bytes), 50296: ('mc_frame_properties', read_bytes), 50839: ('imagej_metadata', read_bytes), 51123: ('micromanager_metadata', read_json), } # Max line length of printed output PRINT_LINE_LEN = 79 def imshow(data, title=None, vmin=0, vmax=None, cmap=None, bitspersample=None, photometric='rgb', interpolation='nearest', dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs): """Plot n-dimensional images using matplotlib.pyplot. Return figure, subplot and plot axis. Requires pyplot already imported ``from matplotlib import pyplot``. Parameters ---------- bitspersample : int or None Number of bits per channel in integer RGB images. photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'} The color space of the image data. title : str Window and subplot title. figure : matplotlib.figure.Figure (optional). Matplotlib to use for plotting. subplot : int A matplotlib.pyplot.subplot axis. maxdim : int maximum image size in any dimension. kwargs : optional Arguments for matplotlib.pyplot.imshow. """ #if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'): # raise ValueError("Can't handle %s photometrics" % photometric) # TODO: handle photometric == 'separated' (CMYK) isrgb = photometric in ('rgb', 'palette') data = numpy.atleast_2d(data.squeeze()) data = data[(slice(0, maxdim), ) * len(data.shape)] dims = data.ndim if dims < 2: raise ValueError("not an image") elif dims == 2: dims = 0 isrgb = False else: if isrgb and data.shape[-3] in (3, 4): data = numpy.swapaxes(data, -3, -2) data = numpy.swapaxes(data, -2, -1) elif not isrgb and data.shape[-1] in (3, 4): data = numpy.swapaxes(data, -3, -1) data = numpy.swapaxes(data, -2, -1) isrgb = isrgb and data.shape[-1] in (3, 4) dims -= 3 if isrgb else 2 if photometric == 'palette' and isrgb: datamax = data.max() if datamax > 255: data >>= 8 # possible precision loss data = data.astype('B') elif data.dtype.kind in 'ui': if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None: try: bitspersample = int(math.ceil(math.log(data.max(), 2))) except Exception: bitspersample = data.dtype.itemsize * 8 elif not isinstance(bitspersample, int): # bitspersample can be tuple, e.g. (5, 6, 5) bitspersample = data.dtype.itemsize * 8 datamax = 2**bitspersample if isrgb: if bitspersample < 8: data <<= 8 - bitspersample elif bitspersample > 8: data >>= bitspersample - 8 # precision loss data = data.astype('B') elif data.dtype.kind == 'f': datamax = data.max() if isrgb and datamax > 1.0: if data.dtype.char == 'd': data = data.astype('f') data /= datamax elif data.dtype.kind == 'b': datamax = 1 elif data.dtype.kind == 'c': raise NotImplementedError("complex type") # TODO: handle complex types if not isrgb: if vmax is None: vmax = datamax if vmin is None: if data.dtype.kind == 'i': dtmin = numpy.iinfo(data.dtype).min vmin = numpy.min(data) if vmin == dtmin: vmin = numpy.min(data > dtmin) if data.dtype.kind == 'f': dtmin = numpy.finfo(data.dtype).min vmin = numpy.min(data) if vmin == dtmin: vmin = numpy.min(data > dtmin) else: vmin = 0 pyplot = sys.modules['matplotlib.pyplot'] if figure is None: pyplot.rc('font', family='sans-serif', weight='normal', size=8) figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True, facecolor='1.0', edgecolor='w') try: figure.canvas.manager.window.title(title) except Exception: pass pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9, left=0.1, right=0.95, hspace=0.05, wspace=0.0) subplot = pyplot.subplot(subplot) if title: try: title = unicode(title, 'Windows-1252') except TypeError: pass pyplot.title(title, size=11) if cmap is None: if data.dtype.kind in 'ub' and vmin == 0: cmap = 'gray' else: cmap = 'coolwarm' if photometric == 'miniswhite': cmap += '_r' image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax, cmap=cmap, interpolation=interpolation, **kwargs) if not isrgb: pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05 def format_coord(x, y): # callback function to format coordinate display in
- string of command to issue # cur_idx - index of the current column # new_idx - index of the new column base_response = self._createBaseTable() table = self._getTableFromResponse(base_response) column = table.columnFromIndex(cur_idx) column_name = column.getName(is_global_name=False) num_rows = table.numRows() num_columns = table.numColumns() # Do the cell update ajax_cmd = self._helper_http.ajaxCommandFactory() ajax_cmd['target'] = 'Column' ajax_cmd['command'] = command ajax_cmd['columnName'] = column_name ajax_cmd['args[]'] = 'Yet_Another_Column' command_url = self._helper_http.createURLFromAjaxCommand(ajax_cmd, address=BASE_URL) response = self.client.get(command_url) # Check the table new_table = self._getTableFromResponse(response) self.assertEqual(new_table.numColumns(), num_columns + 1) self.assertEqual(new_table.numRows(), num_rows) # New column should have all None values new_column = new_table.getColumns()[new_idx] self.assertTrue(new_column.numCells(), num_rows) b = all([np.isnan(x) for x in new_column.getCells()]) if not b: import pdb; pdb.set_trace() self.assertTrue(b) def testCommandColumnInsert(self): if IGNORE_TEST: return self._addColumn("Insert", 1, 1) self._addColumn("Insert", 2, 2) self._addColumn("Insert", NCOL, NCOL) def testCommandColumnAppend(self): if IGNORE_TEST: return self._addColumn("Append", 1, 2) self._addColumn("Append", 2, 3) self._addColumn("Append", NCOL, NCOL+1) def _moveColumn(self, column_idx_to_move, dest_column_name): # Inputs: column_idx_to_move - index of column to be moved # dest_column_name - name of the dest column after # which the column is to be moved base_response = self._createBaseTable() table = self._getTableFromResponse(base_response) column = table.columnFromIndex(column_idx_to_move) column_name = column.getName(is_global_name=False) num_rows = table.numRows() num_columns = table.numColumns() moved_column = table.columnFromIndex(column_idx_to_move) dest_column = table.columnFromName(dest_column_name) expected_index = table.indexFromColumn(dest_column) # Do the cell update ajax_cmd = self._helper_http.ajaxCommandFactory() ajax_cmd['target'] = "Column" ajax_cmd['command'] = "Move" ajax_cmd['columnName'] = column_name ajax_cmd['args[]'] = dest_column_name command_url = self._helper_http.createURLFromAjaxCommand(ajax_cmd, address=BASE_URL) response = self.client.get(command_url) # Check the table new_table = self._getTableFromResponse(response) self.assertEqual(new_table.numColumns(), num_columns) self.assertEqual(new_table.numRows(), num_rows) # New column should have all None values column = new_table.getColumns()[expected_index] self.assertEqual(column.getName(is_global_name=False), moved_column.getName(is_global_name=False)) b = all([column.getCells()[n] == moved_column.getCells()[n] for n in range(column.numCells())]) if not b: import pdb; pdb.set_trace() self.assertTrue(b) def _makeColumnName(self, index): return "Col_%d" % index def testCommandColumnMove(self): if IGNORE_TEST: return # The column names are "row", "Col_0", ... self._moveColumn(1, self._makeColumnName(NCOL-1)) # Make it the last column def _formulaColumn(self, column_idx, formula, isValid): # Inputs: column_idx - index of column whose formula is changed # formula - new formula for column # isValid - is a valid formula # Assumes that formula ony changes column_idx base_response = self._createBaseTable() table = self._getTableFromResponse(base_response) old_table = table.copy() column = table.columnFromIndex(column_idx) column_name = column.getName(is_global_name=False) old_formula = column.getFormula() # Reset the formula ajax_cmd = self._helper_http.ajaxCommandFactory() ajax_cmd['target'] = "Column" ajax_cmd['command'] = "Formula" ajax_cmd['columnName'] = column_name ajax_cmd['args[]'] = formula command_url = self._helper_http.createURLFromAjaxCommand(ajax_cmd, address=BASE_URL) response = self.client.get(command_url) content = json.loads(response.content) self.assertTrue(content.has_key("success")) # Check the table new_table = self._getTableFromResponse(response) new_column = new_table.childFromName(column_name, is_relative=False) if isValid: self.assertTrue(content["success"]) self.assertEqual(formula, new_column.getFormula()) else: self.assertFalse(content["success"]) self.assertEqual(formula, new_column.getFormula()) # Check the columns self.assertTrue(compareTableData(old_table, new_table, excludes=[column_idx])) def testCommandColumnFormula(self): if IGNORE_TEST: return self._formulaColumn(NCOL - 1, "np.sin(2.3)", True) # Valid formula self._formulaColumn(NCOL - 1, "np.sin(2.3", False) # Invalid formula def _evaluateTable(self, formula, isValid, col_idx=NCOL-1): # Inputs: formula - new formula for column # isValid - is a valid formula base_response = self._createBaseTable() table = self._getTableFromResponse(base_response) column = table.columnFromIndex(col_idx) column_name = column.getName(is_global_name=False) # Change the formula ajax_cmd = self._helper_http.ajaxCommandFactory() ajax_cmd['target'] = "Column" ajax_cmd['command'] = "Formula" ajax_cmd['columnName'] = column_name ajax_cmd['args[]'] = formula command_url = self._helper_http.createURLFromAjaxCommand(ajax_cmd, address=BASE_URL) response = self.client.get(command_url) content = json.loads(response.content) self.assertTrue(content.has_key("success")) # Check the table new_table = self._getTableFromResponse(response) error = new_table.evaluate(user_directory=TEST_DIR) if isValid: self.assertTrue(content["success"]) else: self.assertFalse(content["success"]) def testTableEvaluate(self): if IGNORE_TEST: return self._evaluateTable("np.sin(3.2)", True) # Valid formula self._evaluateTable("range(1000)", True) # Test large formula = "Col_2 = np.sin(np.array(range(10), dtype=float));B = Col_1**3" self._evaluateTable(formula, True) # Compound formula self._evaluateTable("np.sin(x)", False) # Invalid formula def testTableExport(self): if IGNORE_TEST: return # Populate the table with a couple of formulas FORMULA = "range(10)" FUNC_NAME = "ss_export_test" self._evaluateTable(FORMULA, True) # Do the export ajax_cmd = self._helper_http.ajaxCommandFactory() ajax_cmd['target'] = "Sheet" ajax_cmd['command'] = "Export" inputs = "Col_1" outputs = "Col_%d, Col_%d" % (NCOL-1, NCOL-2) arg_list = [FUNC_NAME, inputs, outputs] ajax_cmd['args[]'] = arg_list command_url = self._helper_http.createURLFromAjaxCommand(ajax_cmd, address=BASE_URL) response = self.client.get(command_url) content = json.loads(response.content) self.assertTrue(content.has_key("success")) def _tableTrim(self, row_idx, expected_number_rows): base_response = self._createBaseTable() table = self._getTableFromResponse(base_response) row_name = table._rowNameFromIndex(row_idx) # Add the row ajax_cmd = self._helper_http.ajaxCommandFactory() ajax_cmd['target'] = 'Row' ajax_cmd['command'] = 'Append' ajax_cmd['row'] = row_name command_url = self._helper_http.createURLFromAjaxCommand(ajax_cmd, address=BASE_URL) response = self.client.get(command_url) # Do the trim ajax_cmd = self._helper_http.ajaxCommandFactory() ajax_cmd['target'] = 'Table' ajax_cmd['columnName'] = table.getName() ajax_cmd['command'] = 'Trim' command_url = self._helper_http.createURLFromAjaxCommand(ajax_cmd, address=BASE_URL) response = self.client.get(command_url) # Check the table new_table = self._getTableFromResponse(response) self.assertEqual(new_table.numRows(), expected_number_rows) def testTableTrim(self): if IGNORE_TEST: return self._tableTrim(0, NROW+1) self._tableTrim(NROW, NROW) def _tableRename(self, new_name, is_valid_name): base_response = self._createBaseTable() table = self._getTableFromResponse(base_response) old_name = table.getName(is_global_name=False) # Rename the table ajax_cmd = self._helper_http.ajaxCommandFactory() ajax_cmd['target'] = 'Table' ajax_cmd['command'] = 'Rename' ajax_cmd['columnName'] = table.getName() ajax_cmd['args[]'] = new_name command_url = self._helper_http.createURLFromAjaxCommand(ajax_cmd, address=BASE_URL) response = self.client.get(command_url) # Check the result new_table = self._getTableFromResponse(response) content = json.loads(response.content) self.assertTrue(content.has_key("success")) if is_valid_name: content = json.loads(response.content) if not content["success"]: import pdb; pdb.set_trace() self.assertTrue(content["success"]) self.assertEqual(new_table.getName(is_global_name=False), new_name) else: self.assertFalse(content["success"]) self.assertEqual(new_table.getName(is_global_name=False), old_name) def testTableRename(self): if IGNORE_TEST: return self._tableRename("valid_name", True) self._tableRename("invalid_name!", False) def testTableListSheetFiles(self): if IGNORE_TEST: return filename = "dummy" helper = TableFileHelper(filename, st.SCISHEETS_USER_TBLDIR) helper.create() base_response = self._createBaseTable() table = self._getTableFromResponse(base_response) ajax_cmd = self._helper_http.ajaxCommandFactory() ajax_cmd['target'] = 'Sheet' ajax_cmd['command'] = 'ListSheetFiles' command_url = self._helper_http.createURLFromAjaxCommand(ajax_cmd, address=BASE_URL) response = self.client.get(command_url) content = json.loads(response.content) self.assertTrue("success" in content) self.assertTrue(content["success"]) self.assertTrue("data" in content) self.assertTrue(filename in content["data"]) helper.destroy() def testTableOpenSheetFiles(self): if IGNORE_TEST: return filename = "dummy" helper = TableFileHelper(filename, st.SCISHEETS_USER_TBLDIR) helper.create() ajax_cmd = self._helper_http.ajaxCommandFactory() ajax_cmd['target'] = 'Sheet' ajax_cmd['command'] = 'OpenSheetFile' ajax_cmd['args[]'] = filename command_url = self._helper_http.createURLFromAjaxCommand(ajax_cmd, address=BASE_URL) response = self.client.get(command_url) content = json.loads(response.content) self.assertTrue("success" in content) self.assertTrue(content["success"]) helper.destroy() def _tableSave(self, filename): """ Saves a table to file :param filename: - file name to save to """ base_response = self._createBaseTable() table = self._getTableFromResponse(base_response) ajax_cmd = self._helper_http.ajaxCommandFactory() ajax_cmd['target'] = 'Sheet' ajax_cmd['command'] = 'SaveAs' ajax_cmd['columnName'] = table.getName() ajax_cmd['args[]'] = filename command_url = self._helper_http.createURLFromAjaxCommand(ajax_cmd, address=BASE_URL) response = self.client.get(command_url) content = json.loads(response.content) self.assertTrue("success" in content) self.assertTrue(content["success"]) def testTableSave(self): if IGNORE_TEST: return filename = "dummy" _ = self._createBaseTable() helper = TableFileHelper(filename, st.SCISHEETS_USER_TBLDIR) helper.create() self._tableSave(filename) helper.destroy() def testTableDelete(self): if IGNORE_TEST: return filename = "dummy" helper = TableFileHelper(filename, st.SCISHEETS_USER_TBLDIR) _ = self._createBaseTable() helper.create() self._tableSave(filename) ajax_cmd = self._helper_http.ajaxCommandFactory() ajax_cmd['target'] = 'Sheet' ajax_cmd['command'] = 'Delete' command_url = self._helper_http.createURLFromAjaxCommand(ajax_cmd, address=BASE_URL) response = self.client.get(command_url) content = json.loads(response.content) self.assertTrue("success" in content) self.assertTrue(content["success"]) self.assertTrue(TableFileHelper.doesFilepathExist( st.SCISHEETS_DEFAULT_TABLEFILE)) #helper.destroy() def testTableNew(self): if IGNORE_TEST: return filename = st.SCISHEETS_DEFAULT_TABLEFILE _ = self._createBaseTable() ajax_cmd = self._helper_http.ajaxCommandFactory() ajax_cmd['target'] = 'Sheet' ajax_cmd['command'] = 'New' command_url = self._helper_http.createURLFromAjaxCommand(ajax_cmd, address=BASE_URL) response = self.client.get(command_url) content = json.loads(response.content) self.assertTrue("success" in content) self.assertTrue(content["success"]) self.assertTrue(TableFileHelper.doesFilepathExist( st.SCISHEETS_DEFAULT_TABLEFILE)) def testHierarchicalTable(self): if IGNORE_TEST: return base_response = self._createBaseTable(params=[-NCOL, NROW]) table = self._getTableFromResponse(base_response) self.assertEqual(table.numRows(), NROW) for col in table.getDataColumns(): self.assertEqual(len(col.getCells()), NROW) def testFormulaRowAddition(self): if IGNORE_TEST: return column_idx = 1 base_response = self._createBaseTable() table = self._getTableFromResponse(base_response) column = table.columnFromIndex(column_idx) column_name = column.getName(is_global_name=False) # Change the formula ajax_cmd = self._helper_http.ajaxCommandFactory() ajax_cmd['target'] = "Column" ajax_cmd['command'] = "Formula" ajax_cmd['columnName'] = column_name num_rows = 2*NROW ajax_cmd['args[]'] = "range(%d)" % num_rows command_url = self._helper_http.createURLFromAjaxCommand(ajax_cmd, address=BASE_URL) response = self.client.get(command_url) content = json.loads(response.content) self.assertTrue(content.has_key("success")) # Check the table table = self._getTableFromResponse(response) error = table.evaluate(user_directory=TEST_DIR) self.assertTrue(content["success"]) self.assertEqual(table.numRows(), num_rows) def _setFormula(self, table, formula, column_idx): """ Sets the formula for the column index :param Table table: :param str formula: :param int column_idx :return HTTP response: """ column = table.columnFromIndex(column_idx) column_name = column.getName(is_global_name=False) ajax_cmd = self._helper_http.ajaxCommandFactory() ajax_cmd['target'] = "Column" ajax_cmd['command'] = "Formula" ajax_cmd['columnName'] = column_name ajax_cmd['args[]'] = formula command_url = self._helper_http.createURLFromAjaxCommand(ajax_cmd, address=BASE_URL) response = self.client.get(command_url) content = json.loads(response.content) self.assertTrue(content.has_key("success")) if not content["success"]: import pdb; pdb.set_trace() self.assertTrue(content["success"]) #table = self._getTableFromResponse(response) return response def testTableWithLists(self): if IGNORE_TEST: return nrow = NROW + 1 ncol = 4 formula_columns = [3, 2] formula1 = '''xx = range(1,%d) Col_2 = [] for x in xx: Col_2.append(range(x)) ''' % nrow formula2 = """Col_1 = [] for x in Col_2: Col_1.append(np.average(x)) """ base_response = self._createBaseTable(params=[NROW, ncol]) old_table = self._getTableFromResponse(base_response) # Change the first formula response = self._setFormula(old_table, formula1, formula_columns[0]) # Change the second formula response = self._setFormula(old_table, formula2, formula_columns[1]) # Check the table new_table = self._getTableFromResponse(response) error = new_table.evaluate(user_directory=TEST_DIR) self.assertEqual(new_table.numColumns(), old_table.numColumns()) self.assertTrue(compareTableData(old_table, new_table, excludes=formula_columns)) val = new_table.getColumns()[formula_columns[1]].getCells()[1] self.assertEqual(val, 0.5) def testImportExcelToTable(self): if IGNORE_TEST: return column_idx = 1 filepath = os.path.join(TEST_DIR, 'RawData.xlsx') formula = "a = importExcelToTable(s, '%s')" % filepath base_response = self._createBaseTable() table = self._getTableFromResponse(base_response) column = table.columnFromIndex(column_idx) column_name = column.getName(is_global_name=False) # Reset the formula ajax_cmd = self._helper_http.ajaxCommandFactory() ajax_cmd['target'] =
"ref" in spec: value = self.generate( spec["ref"], expected_type=type_, spec_name_stack=spec_name_stack, value_constraint=value_constraint ) elif "oneOf" in spec: # value of the "oneOf" property should be a list of specs. sub_spec = random.choice(spec["oneOf"]) value = self.generate_from_spec( sub_spec, expected_type=type_, spec_name_stack=spec_name_stack, value_constraint=value_constraint ) else: generator = self.__GENERATOR_METHOD_MAP[type_] value = generator(self, spec, spec_name_stack) return value def generate_object(self, object_spec, spec_name_stack=None): """ Generate a JSON object from the given specification. :param object_spec: A JSON object specification :param spec_name_stack: Specification name stack, for reference loop detection. If None, use an empty stack. :return: A dict :raises stix2generator.exceptions.ObjectGenerationError: If a generation error occurs """ # Handle imports if "import" in object_spec: imported_spec_name = object_spec["import"] gen_object = self.generate( imported_spec_name, expected_type="object", spec_name_stack=spec_name_stack ) else: gen_object = {} # First, determine which properties to include names_to_include = _get_properties_to_include( object_spec, self.config.optional_property_probability, self.config.minimize_ref_properties ) if names_to_include: # Then, find values for the included properties, according to their # specs and co-constraints. value_coconstraints = _get_value_coconstraints(object_spec) # At this point, if there were any names to include, there must # have been some properties defined! prop_specs = object_spec["properties"] for prop_name in names_to_include: # Generate constraint if necessary constraint = _get_value_constraint( prop_name, value_coconstraints, gen_object ) gen_object[prop_name] = self.generate_from_spec( prop_specs[prop_name], spec_name_stack=spec_name_stack, value_constraint=constraint ) return gen_object def generate_array(self, array_spec, spec_name_stack=None): """ Generate a JSON array from the given specification. :param array_spec: A JSON array specification :param spec_name_stack: Specification name stack, for reference loop detection. If None, use an empty stack. :return: A list :raises stix2generator.exceptions.ObjectGenerationError: If a generation error occurs """ item_spec = array_spec["items"] has_min = "minItems" in array_spec has_max = "maxItems" in array_spec if (has_min and not has_max) or (not has_min and has_max): raise ObjectGenerationError( "Specification must include both or neither of the properties: " "minItems, maxItems", "array" ) min_items = array_spec.get("minItems", self.config.array_length_min) max_items = array_spec.get("maxItems", self.config.array_length_max) if min_items > max_items: raise ObjectGenerationError( "minItems must be less than or equal to maxItems", "array" ) if min_items < 0 or max_items < 0: raise ObjectGenerationError( "minItems and maxItems must be non-negative: {}".format( min_items if min_items < 0 else max_items ), "array" ) array = [ self.generate_from_spec(item_spec, spec_name_stack=spec_name_stack) for _ in range( random.randint(min_items, max_items) ) ] return array def generate_string(self, string_spec, spec_name_stack=None): """ Generate a string from the given specification. :param string_spec: A string specification :param spec_name_stack: A specification name stack, for reference loop detection. Unused but included for API compatibility with object/array generators. :return: A string :raises stix2generator.exceptions.ObjectGenerationError: If a generation error occurs """ has_min = "minLength" in string_spec has_max = "maxLength" in string_spec if (has_min and not has_max) or (not has_min and has_max): raise ObjectGenerationError( "Specification must include both or neither of the properties: " "minLength, maxLength", "string" ) min_length = string_spec.get("minLength", self.config.string_length_min) max_length = string_spec.get("maxLength", self.config.string_length_max) if min_length > max_length: raise ObjectGenerationError( "minLength must be less than or equal to maxLength: {} <= {}" .format(min_length, max_length), "string" ) if min_length < 0 or max_length < 0: raise ObjectGenerationError( "minLength and maxLength must be non-negative: {}".format( min_length if min_length < 0 else max_length ), "string" ) s = "".join( random.choice(self.config.string_chars) for _ in range( random.randint(min_length, max_length) ) ) return s def generate_integer(self, integer_spec, spec_name_stack=None): """ Generate an integer from the given specification. :param integer_spec: An integer specification :param spec_name_stack: A specification name stack, for reference loop detection. Unused but included for API compatibility with object/array generators. :return: An int :raises stix2generator.exceptions.ObjectGenerationError: If a generation error occurs """ min_, is_min_exclusive, max_, is_max_exclusive = \ _process_numeric_min_max_properties( integer_spec, self.config.number_min, self.config.is_number_min_exclusive, self.config.number_max, self.config.is_number_max_exclusive ) # Guess I won't assume the user expressed the bounds as ints, so I # need to convert to ints and check the resulting bounds. The # call above to process min/max properties doesn't assume we require # ints. if int(min_) == min_: min_ = int(min_) if is_min_exclusive: min_ += 1 else: min_ = int(math.ceil(min_)) if int(max_) == max_: max_ = int(max_) if is_max_exclusive: max_ -= 1 else: max_ = int(math.floor(max_)) if min_ > max_: raise ObjectGenerationError( "no integers exist in the specified interval", "integer" ) return random.randint(min_, max_) def generate_number(self, number_spec, spec_name_stack=None): """ Generate a number (float) from the given specification. :param number_spec: A number specification :param spec_name_stack: A specification name stack, for reference loop detection. Unused but included for API compatibility with object/array generators. :return: A float :raises stix2generator.exceptions.ObjectGenerationError: If a generation error occurs """ min_, is_min_exclusive, max_, is_max_exclusive = \ _process_numeric_min_max_properties( number_spec, self.config.number_min, self.config.is_number_min_exclusive, self.config.number_max, self.config.is_number_max_exclusive ) if is_min_exclusive and is_max_exclusive: n = _random_open(min_, max_) elif is_min_exclusive: n = _random_half_open_lower(min_, max_) elif is_max_exclusive: n = _random_half_open_upper(min_, max_) else: n = _random_closed(min_, max_) return n def generate_boolean(self, boolean_spec, spec_name_stack=None): """ Generate a boolean from the given specification. :param boolean_spec: A boolean specification (ignored; there's nothing to configure for now) :param spec_name_stack: A specification name stack, for reference loop detection. Unused but included for API compatibility with object/array generators. :return: True or False """ if random.random() < 0.5: return True return False def generate_null(self, null_spec, spec_name_stack=None): """ Generate null (None). :param null_spec: A null specification (ignored; there's nothing to configure for now) :param spec_name_stack: A specification name stack, for reference loop detection. Unused but included for API compatibility with object/array generators. :return: None """ return None # This has to be at the bottom, after the methods are defined. Would it # have been better to store method names and use getattr() to get the # methods instead? Or generate a function name from a template? This is # yet another map to keep sync'd up with others. Think about ways of # improving this situation... __GENERATOR_METHOD_MAP = { "object": generate_object, "array": generate_array, "string": generate_string, "integer": generate_integer, "number": generate_number, "boolean": generate_boolean, "null": generate_null } def _get_logger(): global _log if _log is None: _log = logging.getLogger(__name__) return _log def _get_value_coconstraints(object_spec): """ Get the value coconstraints, if any, from the given object specification. This also does some error checking. :param object_spec: The object specification whose value coconstraints should be checked. :return: A list of ValueCoconstraint objects; will be empty if there are none defined. :raises stix2generator.exceptions.ValueCoconstraintError: If an invalid value co-constraint is found. """ # This function shouldn't be called if object_spec has no properties, but # just in case... assert "properties" in object_spec value_coconstraints = object_spec.get("value-coconstraints", []) prop_specs = object_spec["properties"] coconstraint_objs = [] for coconstraint in value_coconstraints: coconstraint_obj = \ stix2generator.generation.constraints.make_value_coconstraint( coconstraint ) coconstraint_objs.append(coconstraint_obj) if coconstraint_obj.prop_name_left not in prop_specs: raise ValueCoconstraintError( coconstraint, "Property '{}' undefined in specification".format( coconstraint_obj.prop_name_left ) ) if coconstraint_obj.prop_name_right not in prop_specs: raise ValueCoconstraintError( coconstraint, "Property '{}' undefined in specification".format( coconstraint_obj.prop_name_right ) ) # Another scan through the coconstraints to check for properties # referenced more than once. prop_occurrence_counts = {} def inc_count_for_key(d, k): count = d.setdefault(k, 0) + 1 d[k] = count for coconstraint_obj in coconstraint_objs: inc_count_for_key( prop_occurrence_counts, coconstraint_obj.prop_name_left ) inc_count_for_key( prop_occurrence_counts, coconstraint_obj.prop_name_right ) props_with_count_gt_1 = [ k for k, v in prop_occurrence_counts.items() if v > 1 ] if props_with_count_gt_1: log = _get_logger() log.warning( "Some properties are referenced in more than one value" " co-constraint. If such a property requires constraining based on" " another property value, only the first such co-constraint will be" " consulted: %s", ", ".join(props_with_count_gt_1) ) return coconstraint_objs def _check_property_groups(groups_spec, property_specs): """ Do some sanity checks on the given property groups: empty groups, bad property names, naming conflicts, etc. Runs for side-effects (exceptions) and doesn't return anything. :param groups_spec: The groups spec from a presence coconstraint specification from an object specification :param property_specs: The properties specifications from an object specification :raises stix2generator.exceptions.InvalidPropertyGroupError: If there is a problem with a property group """ for group_name, prop_names in groups_spec.items(): if not prop_names: raise InvalidPropertyGroupError( group_name, "group can't be empty" ) if group_name in property_specs: raise InvalidPropertyGroupError( group_name, "group name conflicts with a property name" ) undef_props = set(prop_names) - property_specs.keys() if undef_props: raise InvalidPropertyGroupError( group_name, 'undefined property(s): {}'.format( ", ".join(undef_props) ) ) # check pairwise intersections to ensure all groups are disjoint if len(groups_spec) > 1:
/alfacase_definitions/list_of_unit_for_length.txt .. include:: /alfacase_definitions/list_of_unit_for_pressure.txt .. include:: /alfacase_definitions/list_of_unit_for_dimensionless.txt """ reference_coordinate: Scalar = attr.ib(default=Scalar(0.0, "m")) positions: Array = attr.ib(default=Array([], "m")) fractions: Dict[PhaseName, Array] = attr.ib(default={}) @attr.s(frozen=True, slots=True) class VolumeFractionsContainerDescription: """ .. include:: /alfacase_definitions/VolumeFractionsContainerDescription.txt .. include:: /alfacase_definitions/list_of_unit_for_length.txt .. include:: /alfacase_definitions/list_of_unit_for_dimensionless.txt """ positions: Array = attr.ib(default=Array([0.0], "m")) fractions: Dict[PhaseName, Array] = attr.ib( default={ constants.FLUID_GAS: Array([0.1], "-"), constants.FLUID_OIL: Array([0.9], "-"), }, validator=dict_of_array, ) @attr.s(frozen=True, slots=True) class InitialVolumeFractionsDescription: """ .. include:: /alfacase_definitions/InitialVolumeFractionsDescription.txt """ position_input_type = attrib_enum(default=constants.TableInputType.length) table_x: ReferencedVolumeFractionsContainerDescription = attr.ib( default=ReferencedVolumeFractionsContainerDescription() ) table_y: ReferencedVolumeFractionsContainerDescription = attr.ib( default=ReferencedVolumeFractionsContainerDescription() ) table_length: VolumeFractionsContainerDescription = attr.ib( default=VolumeFractionsContainerDescription() ) @attr.s(frozen=True, slots=True) class ReferencedTracersMassFractionsContainerDescription: """ .. include:: /alfacase_definitions/ReferencedTracersMassFractionsContainerDescription.txt .. include:: /alfacase_definitions/list_of_unit_for_length.txt .. include:: /alfacase_definitions/list_of_unit_for_dimensionless.txt """ reference_coordinate: Scalar = attr.ib(default=Scalar(0.0, "m")) positions: Array = attr.ib(default=Array([], "m")) tracers_mass_fractions: List[Array] = attr.ib(default=[]) @attr.s(frozen=True, slots=True) class TracersMassFractionsContainerDescription: """ .. include:: /alfacase_definitions/TracersMassFractionsContainerDescription.txt .. include:: /alfacase_definitions/list_of_unit_for_length.txt .. include:: /alfacase_definitions/list_of_unit_for_dimensionless.txt """ positions: Array = attr.ib(default=Array([], "m")) tracers_mass_fractions: List[Array] = attr.ib(default=[]) @attr.s(frozen=True, slots=True) class InitialTracersMassFractionsDescription: """ .. include:: /alfacase_definitions/InitialTracersMassFractionsDescription.txt """ position_input_type = attrib_enum(default=constants.TableInputType.length) table_x: ReferencedTracersMassFractionsContainerDescription = attr.ib( default=ReferencedTracersMassFractionsContainerDescription() ) table_y: ReferencedTracersMassFractionsContainerDescription = attr.ib( default=ReferencedTracersMassFractionsContainerDescription() ) table_length: TracersMassFractionsContainerDescription = attr.ib( default=TracersMassFractionsContainerDescription() ) @attr.s(frozen=True, slots=True) class ReferencedVelocitiesContainerDescription: """ .. include:: /alfacase_definitions/ReferencedVelocitiesContainerDescription.txt .. include:: /alfacase_definitions/list_of_unit_for_length.txt .. include:: /alfacase_definitions/list_of_unit_for_velocity.txt """ reference_coordinate: Scalar = attr.ib(default=Scalar(0.0, "m")) positions: Array = attr.ib(default=Array([], "m")) velocities: Dict[PhaseName, Array] = attr.ib(default={}) @attr.s(frozen=True, slots=True) class VelocitiesContainerDescription: """ .. include:: /alfacase_definitions/VelocitiesContainerDescription.txt .. include:: /alfacase_definitions/list_of_unit_for_length.txt .. include:: /alfacase_definitions/list_of_unit_for_velocity.txt """ positions: Array = attr.ib(default=Array([0.0], "m")) velocities: Dict[PhaseName, Array] = attr.ib( default={ constants.FLUID_GAS: Array([1e-8], "m/s"), constants.FLUID_OIL: Array([1e-8], "m/s"), }, validator=dict_of_array, ) @attr.s(frozen=True, slots=True) class InitialVelocitiesDescription: """ .. include:: /alfacase_definitions/InitialVelocitiesDescription.txt """ position_input_type = attrib_enum(default=constants.TableInputType.length) table_x: ReferencedVelocitiesContainerDescription = attr.ib( default=ReferencedVelocitiesContainerDescription() ) table_y: ReferencedVelocitiesContainerDescription = attr.ib( default=ReferencedVelocitiesContainerDescription() ) table_length: VelocitiesContainerDescription = attr.ib( default=VelocitiesContainerDescription() ) @attr.s(frozen=True, slots=True) class ReferencedTemperaturesContainerDescription: """ .. include:: /alfacase_definitions/ReferencedTemperaturesContainerDescription.txt .. include:: /alfacase_definitions/list_of_unit_for_length.txt .. include:: /alfacase_definitions/list_of_unit_for_temperature.txt """ reference_coordinate: Scalar = attr.ib(default=Scalar(0.0, "m")) positions: Array = attr.ib(default=Array([], "m")) temperatures: Array = attr.ib(default=Array([], "K")) @attr.s(frozen=True, slots=True) class TemperaturesContainerDescription: """ .. include:: /alfacase_definitions/TemperaturesContainerDescription.txt .. include:: /alfacase_definitions/list_of_unit_for_length.txt .. include:: /alfacase_definitions/list_of_unit_for_temperature.txt """ positions: Array = attr.ib(default=Array([0.0], "m")) temperatures: Array = attr.ib( default=Array([constants.DEFAULT_TEMPERATURE_IN_K], "K") ) @attr.s(frozen=True, slots=True) class InitialTemperaturesDescription: """ .. include:: /alfacase_definitions/InitialTemperaturesDescription.txt """ position_input_type = attrib_enum(default=constants.TableInputType.length) table_x: ReferencedTemperaturesContainerDescription = attr.ib( default=ReferencedTemperaturesContainerDescription() ) table_y: ReferencedTemperaturesContainerDescription = attr.ib( default=ReferencedTemperaturesContainerDescription() ) table_length: TemperaturesContainerDescription = attr.ib( default=TemperaturesContainerDescription() ) @attr.s(slots=True, kw_only=True) class InitialConditionsDescription: """ .. include:: /alfacase_definitions/InitialConditionsDescription.txt """ pressures: InitialPressuresDescription = attr.ib( default=InitialPressuresDescription() ) volume_fractions: InitialVolumeFractionsDescription = attr.ib( default=InitialVolumeFractionsDescription() ) tracers_mass_fractions: InitialTracersMassFractionsDescription = attr.ib( default=InitialTracersMassFractionsDescription() ) velocities: InitialVelocitiesDescription = attr.ib( default=InitialVelocitiesDescription() ) temperatures: InitialTemperaturesDescription = attr.ib( default=InitialTemperaturesDescription() ) fluid: Optional[str] = attr.ib(default=None, validator=optional(instance_of(str))) @attr.s(frozen=True) class InitialConditionArrays: """ .. include:: /alfacase_definitions/InitialConditionArrays.txt .. include:: /alfacase_definitions/list_of_unit_for_pressure.txt .. include:: /alfacase_definitions/list_of_unit_for_velocity.txt .. include:: /alfacase_definitions/list_of_unit_for_volume_fraction.txt .. include:: /alfacase_definitions/list_of_unit_for_temperature.txt """ pressure: Array = attr.ib(validator=instance_of(Array)) volume_fractions: Dict[PhaseName, Array] = attr.ib(validator=dict_of_array) velocity: Dict[PhaseName, Array] = attr.ib(validator=dict_of_array) temperature: Dict[str, Array] = attr.ib(validator=dict_of_array) x_coord_center: Optional[Array] = attr.ib( default=None, validator=optional(instance_of(Array)) ) x_coord_face: Optional[Array] = attr.ib( default=None, validator=optional(instance_of(Array)) ) value_and_unit = Tuple[Number, str] @attr.s(frozen=True, slots=True) class LengthAndElevationDescription: """ Describe a pipe with length and elevation. .. include:: /alfacase_definitions/LengthAndElevationDescription.txt .. include:: /alfacase_definitions/list_of_unit_for_length.txt """ length: Optional[Array] = attr.ib( default=None, validator=optional(instance_of(Array)) ) elevation: Optional[Array] = attr.ib( default=None, validator=optional(instance_of(Array)) ) def iter_values_and_unit( self, ) -> Iterator[Tuple[value_and_unit, value_and_unit]]: """Returns an iterator containing a pair of values with length and elevation along with their units.""" if self.length and self.elevation: length_values = self.length.GetValues(self.length.unit) elevation_values = self.elevation.GetValues(self.elevation.unit) for length, elevation in zip(length_values, elevation_values): yield (length, self.length.unit), (elevation, self.elevation.unit) return iter(()) @attr.s(frozen=True, slots=True) class XAndYDescription: """ Describe a pipe with a sequence of coordinates. .. include:: /alfacase_definitions/XAndYDescription.txt .. include:: /alfacase_definitions/list_of_unit_for_length.txt """ x: Optional[Array] = attr.ib(default=None, validator=optional(instance_of(Array))) y: Optional[Array] = attr.ib(default=None, validator=optional(instance_of(Array))) def iter_values_and_unit( self, ) -> Iterator[Tuple[value_and_unit, value_and_unit]]: """Returns a pair of values with the x and y value along with their units.""" for x, y in zip(self.x.GetValues(self.x.unit), self.y.GetValues(self.y.unit)): yield (x, self.x.unit), (y, self.y.unit) @attr.s() class ProfileDescription: """ Describe a pipe by either length and inclination or by X and Y coordinates. :ivar length_and_elevation: A list of points with the length and elevation. The first item *MUST* always be (0, 0), otherwise a ValueError is raised. :ivar x_and_y: A list of points (X, Y), describing the coordinates. .. note:: x_and_y and length_and_elevation are mutually exclusive. .. include:: /alfacase_definitions/ProfileDescription.txt """ x_and_y: Optional[XAndYDescription] = attr.ib(default=None) length_and_elevation: Optional[LengthAndElevationDescription] = attr.ib( default=None ) def __attrs_post_init__(self): if self.length_and_elevation and self.x_and_y: msg = ( f"length_and_elevation and x_and_y are mutually exclusive and you must configure only one of them, got " f"length_and_elevation={self.length_and_elevation} and x_and_y={self.x_and_y}" ) raise ValueError(msg) @attr.s() class EquipmentDescription: """ .. include:: /alfacase_definitions/EquipmentDescription.txt """ mass_sources = attrib_dict_of(MassSourceEquipmentDescription) pumps = attrib_dict_of(PumpEquipmentDescription) valves = attrib_dict_of(ValveEquipmentDescription) reservoir_inflows = attrib_dict_of(ReservoirInflowEquipmentDescription) heat_sources = attrib_dict_of(HeatSourceEquipmentDescription) compressors = attrib_dict_of(CompressorEquipmentDescription) leaks = attrib_dict_of(LeakEquipmentDescription) pigs = attrib_dict_of(PigEquipmentDescription) @attr.s(frozen=True, slots=True, kw_only=True) class EnvironmentPropertyDescription: """ .. include:: /alfacase_definitions/EnvironmentPropertyDescription.txt .. include:: /alfacase_definitions/list_of_unit_for_length.txt .. include:: /alfacase_definitions/list_of_unit_for_temperature.txt .. include:: /alfacase_definitions/list_of_unit_for_heat_transfer_coefficient.txt .. include:: /alfacase_definitions/list_of_unit_for_velocity.txt """ position = attrib_scalar(category="length") temperature = attrib_scalar(category="temperature") type = attrib_enum(type_=constants.PipeEnvironmentHeatTransferCoefficientModelType) heat_transfer_coefficient = attrib_scalar(default=Scalar(0.0, "W/m2.K")) overall_heat_transfer_coefficient = attrib_scalar(default=Scalar(0.0, "W/m2.K")) fluid_velocity = attrib_scalar(default=Scalar(0.0, "m/s")) @attr.s(frozen=True, slots=True, kw_only=True) class EnvironmentDescription: """ .. include:: /alfacase_definitions/EnvironmentDescription.txt .. include:: /alfacase_definitions/list_of_unit_for_length.txt """ thermal_model = attrib_enum(default=constants.PipeThermalModelType.SteadyState) position_input_mode = attrib_enum(default=constants.PipeThermalPositionInput.Md) reference_y_coordinate = attrib_scalar(default=Scalar("length", 0.0, "m")) md_properties_table = attrib_instance_list(EnvironmentPropertyDescription) tvd_properties_table = attrib_instance_list(EnvironmentPropertyDescription) @property def properties_table(self): # pragma: no cover if self.position_input_mode == constants.PipeThermalPositionInput.Md: return self.md_properties_table else: return self.tvd_properties_table @attr.s(slots=True) class PipeDescription: """ .. include:: /alfacase_definitions/PipeDescription.txt """ name: str = attr.ib(validator=instance_of(str)) source: str = attr.ib(validator=instance_of(str)) target: str = attr.ib(validator=instance_of(str)) source_port: Optional[constants.WellConnectionPort] = attr.ib( default=None, validator=optional(in_(constants.WellConnectionPort)) ) target_port: Optional[constants.WellConnectionPort] = attr.ib( default=None, validator=optional(in_(constants.WellConnectionPort)) ) pvt_model: Optional[str] = attr.ib( default=None, validator=optional(instance_of(str)) ) profile = attrib_instance(ProfileDescription) equipment = attrib_instance(EquipmentDescription) environment = attrib_instance(EnvironmentDescription) segments = attrib_instance(PipeSegmentsDescription) # Initial Condition Section initial_conditions = attrib_instance(InitialConditionsDescription) @attr.s(slots=True, kw_only=True) class PressureNodePropertiesDescription(_PressureSourceCommon): """ .. include:: /alfacase_definitions/PressureNodePropertiesDescription.txt """ @attr.s(slots=True, kw_only=True) class MassSourceNodePropertiesDescription(_MassSourceCommon): """ .. include:: /alfacase_definitions/MassSourceNodePropertiesDescription.txt """ @attr.s(slots=True, kw_only=True) class InternalNodePropertiesDescription: """ .. include:: /alfacase_definitions/InternalNodePropertiesDescription.txt """ fluid: Optional[str] = attr.ib(default=None, validator=optional(instance_of(str))) @attr.s(slots=True, kw_only=True) class SeparatorNodePropertiesDescription: """ :ivar overall_heat_transfer_coefficient: η such that the overall heat transferred to the separator is Q = η A (T_amb - T_sep) .. include:: /alfacase_definitions/SeparatorNodePropertiesDescription.txt .. include:: /alfacase_definitions/list_of_unit_for_length.txt .. include:: /alfacase_definitions/list_of_unit_for_temperature.txt .. include:: /alfacase_definitions/list_of_unit_for_heat_transfer_coefficient.txt .. include:: /alfacase_definitions/list_of_unit_for_volume_fraction.txt """ environment_temperature = attrib_scalar(default=Scalar(25.0, "degC")) geometry = attrib_enum(default=constants.SeparatorGeometryType.VerticalCylinder) length = attrib_scalar(default=Scalar(1.0, "m")) overall_heat_transfer_coefficient = attrib_scalar(default=Scalar(0.0, "W/m2.K")) diameter = attrib_scalar(default=Scalar(1.0, "m")) nozzles: Dict[str, Scalar] = attr.ib( default=attr.Factory(dict), validator=optional(dict_with_scalar) ) initial_phase_volume_fractions: Dict[str, Scalar] = attr.ib( default={ constants.FLUID_GAS: Scalar("volume fraction", 0.5, "-"), constants.FLUID_OIL: Scalar("volume fraction", 0.5, "-"), } ) gas_separation_efficiency = attrib_scalar(default=Scalar("dimensionless", 1.0, "-")) liquid_separation_efficiency = attrib_scalar( default=Scalar("dimensionless", 1.0, "-") ) @diameter.validator def _validate_diameter(self, attribute, value): assert ( isinstance(value, Scalar) and value.GetCategory() == "length" ), "Invalid diameter" @length.validator def _validate_length(self, attribute, value): assert ( isinstance(value, Scalar) and value.GetCategory() == "length" ), "Invalid length" @gas_separation_efficiency.validator def _validate_gas_separation_efficiency(self, attribute, value): assert isinstance(value, Scalar) and 0.6 <= value.GetValue("-") <= 1.0 @liquid_separation_efficiency.validator def _validate_liquid_separation_efficiency(self, attribute, value): assert isinstance(value, Scalar) and 0.6 <= value.GetValue("-") <= 1.0 @attr.s(slots=True, kw_only=True) class ControllerInputSignalPropertiesDescription: """ :ivar target_variable: Measured variable target of controller setpoint :ivar unit: Measuring unit of target variable :ivar input_trend_name: Name of input trend where target variable is measured .. include:: /alfacase_definitions/ControllerInputSignalPropertiesDescription.txt """ target_variable: Optional[str] = attr.ib( default=None, validator=optional(instance_of(str)) ) input_trend_name: Optional[str] = attr.ib( default=None, validator=optional(instance_of(str)) ) unit: Optional[str] = attr.ib(default=None, validator=optional(instance_of(str))) @attr.s(slots=True, kw_only=True) class ControllerOutputSignalPropertiesDescription: """ :ivar controlled_property: Property under control to make target variable reach setpoint :ivar unit: Measuring unit of controlled property :ivar network_element_name: Name of network element that has controlled property :ivar min_value: Minimum value of output signal :ivar max_value: Maximum value of output signal :ivar max_rate_of_change: Maximum rate of change of output signal .. include:: /alfacase_definitions/ControllerOutputSignalPropertiesDescription.txt """ controlled_property: Optional[str] = attr.ib( default=None, validator=optional(instance_of(str)) ) unit: Optional[str] = attr.ib(default=None, validator=optional(instance_of(str))) network_element_name: Optional[str] = attr.ib( default=None, validator=optional(instance_of(str)) ) min_value: float = attr.ib(default=-1.0e50, converter=float) max_value: float = attr.ib(default=1.0e50, converter=float) max_rate_of_change: float = attr.ib(default=1.0e50, converter=float) @max_rate_of_change.validator def _validate_max_rate_of_change(self, attribute, value): assert isinstance(value, float) and value >= 0.0 @attr.s(slots=True, kw_only=True) class ControllerNodePropertiesDescription: """ :ivar type: Type of controlling model :ivar gain: Proportional constant of PID controller :ivar setpoint: Target value for input signal :ivar integral_time: Integral constant of PID controller :ivar derivative_time: Derivative constant of PID controller :ivar input_signal_properties: Properties of input signal :ivar output_signal_properties: Properties of output signal .. include:: /alfacase_definitions/ControllerNodePropertiesDescription.txt """ type = attrib_enum(default=constants.ControllerType.PID) gain: float = attr.ib(default=1e-4, converter=float) setpoint: float = attr.ib(default=0.0, converter=float) integral_time = attrib_scalar(default=Scalar(10, "s")) derivative_time = attrib_scalar(default=Scalar(1, "s")) input_signal_properties = attrib_instance( ControllerInputSignalPropertiesDescription ) output_signal_properties = attrib_instance( ControllerOutputSignalPropertiesDescription ) @integral_time.validator def _validate_integral_time(self, attribute, value): assert ( isinstance(value, Scalar) and value.GetCategory() == "time" and value.GetValue("s") >
<reponame>pshchelo/vampy<gh_stars>1-10 #!/usr/bin/env python '''Top frame of the VamPy application ''' import glob, os import wx from numpy import empty import matplotlib as mplt mplt.use('WXAgg', warn=False) from matplotlib import cm from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as NavigationToolbar2 from matplotlib.figure import Figure from calc import analysis, features, load, smooth import tension, debug, geometry, widgets from resources import MICROSCOPE, SAVETXT, OPENFOLDER from calc.common import OWNPATH, SIDES, DATWILDCARD, CFG_FILENAME from calc.common import split_to_int from dialogs import VampyOtherUserDataDialog class VampyImageConfigPanel(wx.Panel): '''Sets parameters to configure the image properties''' def __init__(self, parent): wx.Panel.__init__(self, parent, -1) box = wx.StaticBox(self, -1, 'Image Config') boxsizer = wx.StaticBoxSizer(box, wx.VERTICAL) cropbox = wx.StaticBox(self, -1, 'Croppings') cropboxsizer = wx.StaticBoxSizer(cropbox, wx.VERTICAL) cropsizer = wx.FlexGridSizer(cols=2) for index,side in enumerate(SIDES): title = wx.StaticText(self, -1, side) cropping = wx.TextCtrl(self, -1, '0', style = wx.TE_PROCESS_ENTER, name = side+'crop', validator = widgets.NumValidator('int', min=0)) self.Bind(wx.EVT_TEXT_ENTER, parent.OnConfigImage, cropping) cropsizer.Add(title, 1, wx.GROW|wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL) cropsizer.Add(cropping, 1, wx.ALIGN_LEFT|wx.GROW) cropsizer.AddGrowableCol(1) cropboxsizer.Add(cropsizer, 1, wx.GROW) boxsizer.Add(cropboxsizer, 1, wx.GROW) sizer = wx.FlexGridSizer(cols=2) for value in self.ChoiceParams(): paramname, longName, choices = value label = wx.StaticText(self, -1, paramname) chbox = wx.Choice(self, -1, choices=choices, name=paramname) sizer.Add(label, 1, wx.GROW|wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL) sizer.Add(chbox, 1, wx.GROW|wx.ALIGN_LEFT) sizer.AddGrowableCol(0) sizer.AddGrowableCol(1) boxsizer.Add(sizer, 0, wx.GROW) self.Bind(wx.EVT_CHOICE, parent.OnConfigImage, wx.FindWindowByName('orient')) self.Bind(wx.EVT_CHOICE, self.OnModeChoice, wx.FindWindowByName('mode')) for boolparam in self.BoolParams(): cb = wx.CheckBox(self, -1, boolparam+"?", style=wx.ALIGN_LEFT, name=boolparam) boxsizer.Add(cb, 0, wx.ALIGN_LEFT) self.SetSizer(boxsizer) self.Fit() for child in self.GetChildren(): child.Enable(False) def ChoiceParams(self): return ( ('orient', 'Pipette orientation', SIDES), ('mode', 'Image mode', ('phc', 'dic')), ('polar', 'DiC polarization', ('left', 'right')) ) def BoolParams(self): return ('fromnames', 'darktip') def Initialize(self, imgcfg): for child in self.GetChildren(): child.Enable(True) for side in SIDES: cropctrl = wx.FindWindowByName(side+'crop') cropctrl.SetValue(str(imgcfg.get(side, 0))) for value in self.ChoiceParams(): paramname, LongName, choices = value ctrl = wx.FindWindowByName(paramname) ctrl.SetStringSelection(imgcfg.get(paramname, choices[0])) self.OnModeChoice(wx.EVT_CHOICE) for boolparam in self.BoolParams(): cb = wx.FindWindowByName(boolparam) cb.SetValue(int(imgcfg.get(boolparam, 0))) def GetCrop(self): crops = {} for side in SIDES: ctrl = wx.FindWindowByName(side+'crop') crop = ctrl.GetValue() crops[side] = int(crop) return crops def GetOrient(self): return wx.FindWindowByName('orient').GetStringSelection() def GetChoices(self): params = {} for value in self.ChoiceParams(): paramname, longName, choices = value params[paramname] = wx.FindWindowByName(paramname).GetStringSelection() #to convert from unicode return params def GetBools(self): params = {} for boolparam in self.BoolParams(): params[boolparam] = wx.FindWindowByName(boolparam).GetValue() return params def GetParams(self): params = self.GetChoices() params.update(self.GetBools()) params.update(self.GetCrop()) return params def OnModeChoice(self, evt): modectrl = wx.FindWindowByName('mode') polarctrl = wx.FindWindowByName('polar') if modectrl.GetStringSelection() == 'dic': polarctrl.Enable(True) else: polarctrl.Enable(False) class VampyAnalysisPanel(wx.Panel): """Shows other parameters needed for starting processing of images.""" def __init__(self, parent, id): wx.Panel.__init__(self, parent, id) box = wx.StaticBox(self, -1, 'Analysis Options') vsizer = wx.StaticBoxSizer(box, wx.VERTICAL) paramsizer = wx.FlexGridSizer(cols=2) label = wx.StaticText(self, -1, 'Smoothing') self.smoothchoice = wx.Choice(self, -1, choices = smooth.SMOOTHFILTERS.keys()) paramsizer.AddMany([(label,0,0), (self.smoothchoice,0,0)]) self.numparams = {'order':'2','window':'11','mismatch':'3'} self.boolparams = {'subpix':False,'extra':False} self.params = {} self.params.update(self.numparams) self.params.update(self.boolparams) for param in sorted(self.numparams, reverse=1): label = wx.StaticText(self, -1, param) val = wx.TextCtrl(self, -1, '0', name = param, validator = widgets.NumValidator('float', min = 0)) paramsizer.AddMany([(label,0,0), (val,0,0)]) for param in self.boolparams: cb = wx.CheckBox(self, -1, param+"?", style=wx.ALIGN_LEFT, name=param) paramsizer.Add(cb,0,0) subpixcb = self.FindWindowByName('subpix') subpixcb.Bind(wx.EVT_CHECKBOX, self.OnSubpix) vsizer.Add(paramsizer) btn = wx.Button(self, -1, 'Analyse') self.Bind(wx.EVT_BUTTON, parent.OnAnalyse, btn) vsizer.Add(btn) self.SetSizer(vsizer) self.Fit() self.SetState(False) def Initialize(self): self.SetState(True) self.smoothchoice.SetSelection(0) for param, val in self.params.items(): ctrl = wx.FindWindowByName(param) ctrl.SetValue(val) ### temporarily disabled, since not well implemented yet # for cb in self.boolparams: # ctrl = wx.FindWindowByName(cb) # ctrl.Enable(False) def SetState(self, state): for child in self.GetChildren(): child.Enable(state) def GetParams(self): params = {} params['smoothing']=self.smoothchoice.GetStringSelection() for param in self.numparams: ctrl = wx.FindWindowByName(param) params[param] = float(ctrl.GetValue()) for param in self.boolparams: params[param] = wx.FindWindowByName(param).GetValue() return params def OnSubpix(self, evt): evt.Skip() mismatchctrl = self.FindWindowByName('mismatch') subpix = self.FindWindowByName('subpix').GetValue() if subpix: mismatchctrl.SetState(True) else: mismatchctrl.SetState(False) class VampyImagePanel(wx.Panel): '''Shows image and sliders affecting image''' def __init__(self, parent, id): wx.Panel.__init__(self, parent, id, style = wx.BORDER_SUNKEN) self.Imgs = None vsizer = wx.BoxSizer(wx.VERTICAL) self.figure = Figure(facecolor = widgets.rgba_wx2mplt(self.GetBackgroundColour())) self.axes = self.figure.add_subplot(111) self.canvas = FigureCanvas(self, -1, self.figure) self.canvas.mpl_connect('motion_notify_event', parent.statusbar.SetPosition) vsizer.Add(self.canvas, 1, wx.ALIGN_LEFT|wx.ALIGN_TOP|wx.GROW) navtoolbar = NavigationToolbar2(self.canvas) navtoolbar.Realize() vsizer.Add(navtoolbar, 0, wx.ALIGN_LEFT|wx.GROW) slidersizer = wx.FlexGridSizer(cols=2) self.ImgNoTxt = wx.TextCtrl(self, -1, "0", size=(50,20), style = wx.TE_READONLY | wx.TE_CENTER) slidersizer.Add(self.ImgNoTxt, 0) self.ImgNoSlider = wx.Slider(self, -1, 1, 0, 1) self.Bind(wx.EVT_SCROLL, self.OnSlide, self.ImgNoSlider) slidersizer.Add(self.ImgNoSlider, 1, wx.GROW) self.paramsliders = [] regionlabel = wx.StaticText(self, -1, 'Aspirated\nVesicle') slidersizer.Add(regionlabel, 0, wx.ALIGN_RIGHT) self.regionslider = widgets.DoubleSlider(self, -1, (0,1), 0, 1, gap=2, name='aspves') self.Bind(wx.EVT_SLIDER, self.OnSlide, self.regionslider) self.paramsliders.append(self.regionslider) slidersizer.Add(self.regionslider, 1, wx.GROW|wx.ALIGN_LEFT) tiplabel = wx.StaticText(self, -1, 'Pipette Tip') slidersizer.Add(tiplabel, 0, wx.ALIGN_RIGHT) self.tipslider = widgets.DoubleSlider(self, -1, (0,1), 0, 1, gap=2, name='tip') self.Bind(wx.EVT_SLIDER, self.OnSlide, self.tipslider) self.paramsliders.append(self.tipslider) slidersizer.Add(self.tipslider, 1, wx.GROW|wx.ALIGN_LEFT) slidersizer.AddGrowableCol(1,1) vsizer.Add(slidersizer, 0, wx.GROW) hsizer = wx.BoxSizer() axslidersizer = wx.FlexGridSizer(rows=2) name = 'axis' label = wx.StaticText(self, -1, name) axslidersizer.Add(label) self.axisslider = widgets.DoubleSlider(self, -1, (0,1), 0, 1, style=wx.SL_VERTICAL, name=name) self.Bind(wx.EVT_SLIDER, self.OnSlide, self.axisslider) name = 'pipette' label = wx.StaticText(self, -1, name) self.pipetteslider = widgets.DoubleSlider(self, -1, (0,1), 0, 1, style=wx.SL_VERTICAL, name=name) self.Bind(wx.EVT_SLIDER, self.OnSlide, self.pipetteslider) axslidersizer.Add(label) axslidersizer.Add(self.axisslider, 1, wx.GROW|wx.ALIGN_TOP) axslidersizer.Add(self.pipetteslider, 1, wx.GROW|wx.ALIGN_TOP) self.paramsliders.append(self.axisslider) self.paramsliders.append(self.pipetteslider) axslidersizer.AddGrowableRow(1,1) hsizer.Add(axslidersizer, 0, wx.GROW) hsizer.Add(vsizer, 1, wx.GROW) for child in self.GetChildren(): child.Enable(False) self.SetSizer(hsizer) self.Fit() def GetImgNo(self): return self.ImgNoSlider.GetValue() def SetImgNo(self): self.ImgNoTxt.SetValue(str(self.GetImgNo())) # def SetRanges(self): imgno, ysize, xsize = self.Imgs.shape self.regionslider.SetRange(0,xsize-1) self.tipslider.SetRange(0,xsize-1) self.axisslider.SetRange(0, ysize-1) self.pipetteslider.SetRange(0, ysize/2) def Initialize(self): '''Draw the first image and initialise sliders''' for child in self.GetChildren(): child.Enable(True) self.ImgNoSlider.SetRange(1, len(self.Imgs)) self.ImgNoSlider.SetValue(1) self.SetImgNo() ImgsNo, ysize, xsize = self.Imgs.shape self.regionslider.SetRange(0, xsize-1) self.regionslider.SetValue((1, xsize-2)) self.tipslider.SetRange(0, xsize-1) self.tipslider.SetValue((xsize/2, xsize/2+1)) self.axisslider.SetRange(0, ysize-1) self.axisslider.SetValue((ysize/2, ysize/2)) self.pipetteslider.SetRange(0, ysize/2) self.pipetteslider.SetValue((ysize/4, ysize/4)) self.Draw() def GetParams(self): params = self.GetSlidersPos() params['images'] = self.Imgs return params def GetSlidersPos(self): params = {} for slider in self.paramsliders: key = str(slider.GetName()) params[key] = slider.GetValue() return params def SetSlidersPos(self, imgcfg): #=============================================================================== # Here the actual names of parameters are referenced by hardcoding #=============================================================================== imgno, ysize, xsize = self.Imgs.shape strvalue = imgcfg.get('aspves', '') value, mesg = split_to_int(strvalue, (0, xsize-1)) if mesg: self.GetParent().OnError(mesg) self.regionslider.SetValue(value) strvalue = imgcfg.get('tip', '') value, mesg = split_to_int(strvalue, (xsize/2, xsize/2+1)) if mesg: self.GetParent().OnError(mesg) self.tipslider.SetValue(value) for key in ('axis','pipette'): strvalue = imgcfg.get(key, '') value, mesg = split_to_int(strvalue, (0, ysize/4)) if mesg: self.GetParent().OnError(mesg) wx.FindWindowByName(key).SetValue(value) self.Draw() def OnSlide(self, evt): self.SetImgNo() self.Draw() def Draw(self): '''refresh image pane''' ImgNo = self.GetImgNo() self.axes.clear() for value in self.regionslider.GetValue(): self.axes.axvline(value) ydots = self.axisslider.GetValue() xdots = [0, self.regionslider.GetLow()] self.axes.plot(xdots, ydots, 'y--') tiplimleft, tiplimright = self.tipslider.GetValue() self.axes.axvspan(tiplimleft, tiplimright, fc='g', alpha=0.5) piprad, pipthick = self.pipetteslider.GetValue() line1 = [ydots[0]+piprad+pipthick, ydots[1]+piprad+pipthick] line2 = [ydots[0]+piprad, ydots[1]+piprad] line3 = [ydots[0]-piprad, ydots[1]-piprad] line4 = [ydots[0]-piprad-pipthick, ydots[1]-piprad-pipthick] self.axes.plot(xdots, line1, 'y-') self.axes.plot(xdots, line2, 'y-') self.axes.plot(xdots, line3, 'y-') self.axes.plot(xdots, line4, 'y-') self.axes.imshow(self.Imgs[ImgNo-1], aspect='equal', cmap=cm.get_cmap('gray')) self.canvas.draw() class VampyFrame(wx.Frame): '''wxPython VAMP frontend''' def __init__(self, parent, id): self.maintitle = 'VamPy' wx.Frame.__init__(self, parent, id, title=self.maintitle) self.folder = None self.menubar = widgets.SimpleMenuBar(self, self.MenuData()) self.SetMenuBar(self.menubar) self.toolbar = widgets.SimpleToolbar(self, *self.ToolbarData()) self.SetToolBar(self.toolbar) self.toolbar.Realize() self.statusbar = widgets.PlotStatusBar(self) self.SetStatusBar(self.statusbar) hsizer = wx.BoxSizer(wx.HORIZONTAL) self.imgpanel = VampyImagePanel(self, -1) hsizer.Add(self.imgpanel, 1, wx.GROW) paramssizer = wx.BoxSizer(wx.VERTICAL) self.imgconfpanel = VampyImageConfigPanel(self) paramssizer.Add(self.imgconfpanel, 0, wx.ALL|wx.GROW) self.analysispanel = VampyAnalysisPanel(self, -1) paramssizer.Add(self.analysispanel, 0, wx.ALL|wx.GROW) emptypanel = wx.Panel(self, -1) paramssizer.Add(emptypanel, 1, wx.ALL|wx.GROW) hsizer.Add(paramssizer, 0, wx.GROW) self.SetSizer(hsizer) self.Fit() self.Centre() self.SetFrameIcons(MICROSCOPE, (16,24,32)) def SetFrameIcons(self, artid, sizes): ib = wx.IconBundle() for size in sizes: ib.AddIcon(wx.ArtProvider.GetIcon(artid, size = (size,size))) self.SetIcons(ib) def ToolbarData(self): bmpsavetxt = wx.ArtProvider.GetBitmap(SAVETXT, wx.ART_TOOLBAR, (32,32)) bmpopenfolder = wx.ArtProvider.GetBitmap(OPENFOLDER, wx.ART_TOOLBAR, (32,32)) return ( ( (bmpopenfolder, 'Open Images Folder', 'Open folder with images to analyse', False), self.OnOpenFolder), ( (bmpsavetxt, 'Save Image Info', 'Save image Settings file', False), self.OnSave), ) def MenuData(self): return [["&File", [ ("&Open Folder...\tCtrl+O", "Open folder with images", self.OnOpenFolder), ("", "", ""), ("&Exit", "Exit application", self.OnExit)]], ["&Help", [ ("&Help", "Display help", self.OnHelp), ("&About...", "Show info about application", self.OnAbout)]], ["Debug", [ ("Reload", "Reload all dependencies", self.OnReload), ("Debug image", "Debug current image", self.OnDebugImage)]]] def OnOpenFolder(self, evt): """ Open directory of files, load them and initialise GUI @param evt: incoming event from caller """ if not self.folder: self.folder = OWNPATH dirDlg = wx.DirDialog(self, message="Choose a directory", defaultPath = self.folder) if dirDlg.ShowModal() != wx.ID_OK: dirDlg.Destroy() return self.folder = dirDlg.GetPath() dirDlg.Destroy() os.chdir(self.folder) extensions = ['png','tif'] extDlg = wx.SingleChoiceDialog(self, 'Choose image file type', 'File type', extensions) if extDlg.ShowModal()
<reponame>kavanase/DefectsWithTheBoys """ This module is Freysoldt correction for isotropic systems 1) Freysoldt correction for isotropic systems. includes a) PC energy b) potential alignment by planar averaging. If you use the corrections implemented in this module, cite Freysoldt, Neugebauer, and <NAME>, Phys. Rev. Lett. 102, 016402 (2009) [Optionally Phys. Status Solidi B. 248, 1067-1076 (2011) ] in addition to the pycdt paper """ import sys import math import logging import numpy as np from pymatgen.io.vasp.outputs import Locpot from pymatgen.core.structure import Structure from doped.pycdt.corrections.utils import * from doped.pycdt.utils.units import hart_to_ev norm = np.linalg.norm import warnings warnings.warn("Replacing PyCDT usage of Freysoldt base classes with calls to " "corresponding objects in pymatgen.analysis.defects.corrections\n" "Will remove QModel with Version 2.5 of PyCDT.", DeprecationWarning) class QModel(): """ Model for the defect charge distribution. A combination of exponential tail and gaussian distribution is used (see Freysoldt (2011), DOI: 10.1002/pssb.201046289 ) q_model(r) = q [x exp(-r/gamma) + (1-x) exp(-r^2/beta^2)] without normalization constants By default, gaussian distribution with 1 Bohr width is assumed. If defect charge is more delocalized, exponential tail is suggested. """ def __init__(self, beta=1.0, expnorm=0.0, gamma=1.0): """ Args: beta: Gaussian decay constant. Default value is 1 Bohr. When delocalized (eg. diamond), 2 Bohr is more appropriate. expnorm: Weight for the exponential tail in the range of [0-1]. Default is 0.0 indicating no tail . For delocalized charges ideal value is around 0.54-0.6. gamma: Exponential decay constant """ self.beta2 = beta * beta self.x = expnorm self.gamma2 = gamma * gamma if expnorm and not gamma: raise ValueError("Please supply exponential decay constant.") def rho_rec(self, g2): """ Reciprocal space model charge value for input squared reciprocal vector. Args: g2: Square of reciprocal vector Returns: Charge density at the reciprocal vector magnitude """ return (self.x / np.sqrt(1+self.gamma2*g2) + (1-self.x) * np.exp(-0.25*self.beta2*g2)) def rho_rec_limit0(self): """ Reciprocal space model charge value close to reciprocal vector 0 . rho_rec(g->0) -> 1 + rho_rec_limit0 * g^2 """ return -2*self.gamma2*self.x - 0.25*self.beta2*(1-self.x) warnings.warn("Replacing PyCDT usage of Freysoldt base classes with calls to " "corresponding objects in pymatgen.analysis.defects.corrections\n" "All correction plotting functionalities exist within pymatgen v2019.5.1." "Version 2.5 of PyCDT will remove pycdt.corrections.freysoldt_correction.FreysoldtCorrPlotter.", DeprecationWarning) class FreysoldtCorrPlotter(object): def __init__(self, x, v_R, dft_diff, final_shift, check): self.x = x self.v_R = v_R self.dft_diff = dft_diff self.final_shift = final_shift self.check = check def plot(self, title='default'): """ """ import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt plt.figure() plt.clf() plt.plot(self.x, self.v_R, c="green", zorder=1, label="long range from model") plt.plot(self.x, self.dft_diff, c="red", label="DFT locpot diff") plt.plot(self.x, self.final_shift, c="blue", label="short range (aligned)") tmpx = [self.x[i] for i in range(self.check[0], self.check[1])] plt.fill_between(tmpx, -100, 100, facecolor='red', alpha=0.15, label='sampling region') plt.xlim(round(self.x[0]), round(self.x[-1])) ymin = min(min(self.v_R), min(self.dft_diff), min(self.final_shift)) ymax = max(max(self.v_R), max(self.dft_diff), max(self.final_shift)) plt.ylim(-0.2+ymin, 0.2+ymax) plt.xlabel('distance along axis ' + str(1) + ' ($\AA$)', fontsize=20) plt.ylabel('Potential (V)', fontsize=20) plt.legend(loc=9) plt.axhline(y=0, linewidth=0.2, color='black') plt.title(str(title) + ' defect potential') plt.xlim(0, max(self.x)) plt.savefig(str(title) + 'FreyplnravgPlot.pdf') def to_datafile(self, file_name='FreyAxisData'): np.savez(file_name, x=self.x, v_R=self.v_R, dft_diff=self.dft_diff, #defavg-pureavg, final_shift=self.final_shift, #finalshift, check_range=self.check) #np.array([mid-checkdis, mid+checkdis])) @classmethod def plot_from_datfile(cls, file_name='FreyAxisData.npz', title='default'): """ Takes data file called 'name' and does plotting. Good for later plotting of locpot data after running run_correction() """ with open(file_name) as f: plotvals = np.load(f) x = plotvals['x'] v_R = plotvals['v_R'] dft_diff = plotvals['dft_diff'] final_shift = plotvals['final_shift'] check = plotvals['check_range'] plotter = cls(x, v_R, dft_diff, final_shift, check) plotter.plot(title) warnings.warn("Replacing PyCDT usage of Freysoldt base classes with calls to " "corresponding objects in pymatgen.analysis.defects.corrections\n" "Will remove pycdt.corrections.freysoldt_correction.FreysoldtCorrection " "with Version 2.5 of PyCDT. (Corrections will all come from pymatgen for " "longer term maintenance).", DeprecationWarning) class FreysoldtCorrection(object): def __init__(self, axis, dielectricconst, pure_locpot_path, defect_locpot_path, q, energy_cutoff=520, madetol=0.0001, q_model=None, **kw): """ Args: axis: axis to do Freysoldt averaging over (zero-defined). dielectric_tensor: Macroscopic dielectric tensor. Include ionic also if defect is relaxed, otherwise use ion clamped. Can be a matrix array or scalar. pure_locpot_path: Bulk Locpot file path or locpot object defect_locpot_path: Defect Locpot file path or locpot object q (int or float): Charge associated with the defect (not of the homogeneous background). Typically integer energy_cutoff: Energy for plane wave cutoff (in eV). If not given, Materials Project default 520 eV is used. madetol (float): Tolerance for convergence of energy terms (in eV) q_model (QModel object): User defined charge for correction. If not given, highly localized charge is assumed. keywords: 1) defect_position: Defect position as a pymatgen Site object in the bulk supercell structure NOTE: this is optional but recommended, if not provided then analysis is done to find the defect position; this analysis has been rigorously tested, but has broken in an example with severe long range relaxation (at which point you probably should not be including the defect in your analysis...) """ self._axis = axis if isinstance(dielectricconst, int) or \ isinstance(dielectricconst, float): self._dielectricconst = float(dielectricconst) else: self._dielectricconst = float(np.mean(np.diag(dielectricconst))) self._purelocpot = pure_locpot_path self._deflocpot = defect_locpot_path self._madetol = madetol self._q = q self._encut = energy_cutoff if 'defect_position' in kw: self._defpos = kw['defect_position'] else: self._defpos = None #code will determine defect position in defect cell if not q_model: self._q_model = QModel() def correction(self, title=None, partflag='All'): """ Args: title: set if you want to plot the planar averaged potential partflag: four options 'pc' for just point charge correction, or 'potalign' for just potalign correction, or 'All' for both, or 'AllSplit' for individual parts split up (form [PC,potterm,full]) """ logger = logging.getLogger(__name__) logger.info('This is Freysoldt Correction.') if not self._q: if partflag == 'AllSplit': return [0.0,0.0,0.0] else: return 0.0 if not type(self._purelocpot) is Locpot: logger.debug('Load bulk locpot') self._purelocpot = Locpot.from_file(self._purelocpot) logger.debug('\nRun PC energy') if partflag != 'potalign': energy_pc = self.pc() logger.debug('PC calc done, correction = %f', round(energy_pc, 4)) logger.debug('Now run potenttial alignment script') if partflag != 'pc': if not type(self._deflocpot) is Locpot: logger.debug('Load defect locpot') self._deflocpot = Locpot.from_file(self._deflocpot) potalign = self.potalign(title=title) logger.info('\n\nFreysoldt Correction details:') if partflag != 'potalign': logger.info('PCenergy (E_lat) = %f', round(energy_pc, 5)) if partflag != 'pc': logger.info('potential alignment (-q*delta V) = %f', round(potalign, 5)) if partflag in ['All','AllSplit']: logger.info('TOTAL Freysoldt correction = %f', round(energy_pc + potalign, 5)) if partflag == 'pc': return round(energy_pc, 5) elif partflag == 'potalign': return round(potalign, 5) elif partflag == 'All': return round(energy_pc + potalign, 5) else: return map(lambda x: round(x, 5), [energy_pc, potalign, energy_pc + potalign]) def pc(self, struct=None): """ Peform Electrostatic Correction note this ony needs structural info so struct input object speeds this calculation up equivalently fast if input Locpot is a locpot object """ logger = logging.getLogger(__name__) if type(struct) is Structure: s1 = struct else: if not type(self._purelocpot) is Locpot: logging.info('load Pure locpot') self._purelocpot = Locpot.from_file(self._purelocpot) s1 = self._purelocpot.structure ap = s1.lattice.get_cartesian_coords(1) logger.info('Running Freysoldt 2011 PC calculation (should be '\ 'equivalent to sxdefectalign)') logger.debug('defect lattice constants are (in angstroms)' \ + str(cleanlat(ap))) [a1, a2, a3] = ang_to_bohr * ap logging.debug( 'In atomic units, lat consts are (in bohr):' \ + str(cleanlat([a1, a2, a3]))) vol = np.dot(a1, np.cross(a2, a3)) #vol in bohr^3 #compute isolated energy step = 1e-4 encut1 = 20 #converge to some smaller encut first [eV] flag = 0 converge = [] while (flag != 1): eiso = 1. gcut = eV_to_k(encut1) #gcut is in units of 1/A g = step #initalize while g < (gcut + step): #simpson integration eiso += 4*(self._q_model.rho_rec(g*g) ** 2) eiso += 2*(self._q_model.rho_rec((g+step) ** 2) ** 2) g += 2 * step eiso -= self._q_model.rho_rec(gcut ** 2) ** 2 eiso *= (self._q ** 2) * step / (3 * round(np.pi, 6)) converge.append(eiso) if len(converge) > 2: if abs(converge[-1] - converge[-2]) < self._madetol: flag = 1 elif encut1 > self._encut: logger.error('Eiso did not converge before ' \ + str(self._encut) + ' eV') raise encut1 += 20 eiso = converge[-1] logger.debug('Eisolated : %f, converged at encut: %d', round(eiso, 5), encut1-20) #compute periodic energy; encut1 = 20 #converge to some smaller encut flag = 0 converge = [] while flag != 1: eper = 0.0 for g2 in generate_reciprocal_vectors_squared(a1, a2, a3, encut1): eper += (self._q_model.rho_rec(g2) ** 2) / g2 eper *= (self._q**2) *2* round(np.pi, 6) / vol eper += (self._q**2) *4* round(np.pi, 6) \ * self._q_model.rho_rec_limit0() / vol converge.append(eper) if
UUID and their human-readable label, creating a Credential object to hold the UUID if necessary. """ username = authorization_data.get("username") password = authorization_data.get("password") if username and not password: # The absence of a password indicates the username might # be a persistent authdata token smuggled to get around a # broken Adobe client-side API. Try treating the # 'username' as a token. possible_authdata_token = authorization_data["username"] return self.authdata_lookup(possible_authdata_token) if username and password: # Try to look up the username and password as a short # client token. This is currently the best way to do # authentication. uuid, label = self.short_client_token_lookup(username, password) if uuid and label: return uuid, label # Last ditch effort: try a normal username/password lookup. # This should almost never be used. patron = self.authenticator.authenticated_patron(self._db, authorization_data) return self.uuid_and_label(patron) def authdata_lookup(self, authdata): """Turn an authdata string into a Vendor ID UUID and a human-readable label. Generally we do this by decoding the authdata as a JWT and looking up or creating an appropriate DelegatedPatronIdentifier. However, for backwards compatibility purposes, if the authdata cannot be decoded as a JWT, we will try the old method of treating it as a Credential that identifies a Patron, and finding the DelegatedPatronIdentifier that way. """ if not authdata: return None, None library_uri = foreign_patron_identifier = None utility = AuthdataUtility.from_config(self.library, self._db) if utility: # Hopefully this is an authdata JWT generated by another # library's circulation manager. try: library_uri, foreign_patron_identifier = utility.decode(authdata) except Exception as e: # Not a problem -- we'll try the old system. pass if library_uri and foreign_patron_identifier: # We successfully decoded the authdata as a JWT. We know # which library the patron is from and which (hopefully # anonymized) ID identifies this patron within that # library. Keep their Adobe account ID in a # DelegatedPatronIdentifier. uuid_and_label = self.to_delegated_patron_identifier_uuid( library_uri, foreign_patron_identifier ) else: # Maybe this is an old-style authdata, stored as a # Credential associated with a specific patron. patron = self.patron_from_authdata_lookup(authdata) if patron: # Yes, that's what's going on. uuid_and_label = self.uuid_and_label(patron) else: # This alleged authdata doesn't fit into either # category. Stop trying to turn it into an Adobe account ID. uuid_and_label = (None, None) return uuid_and_label def short_client_token_lookup(self, token, signature): """Validate a short client token that came in as username/password.""" utility = AuthdataUtility.from_config(self.library, self._db) library_uri = foreign_patron_identifier = None if utility: # Hopefully this is a short client token generated by # another library's circulation manager. try: ( library_uri, foreign_patron_identifier, ) = utility.decode_two_part_short_client_token(token, signature) except Exception as e: # This didn't work--either the incoming data was wrong # or this technique wasn't the right one to use. pass if library_uri and foreign_patron_identifier: # We successfully decoded the authdata as a short client # token. We know which library the patron is from and # which (hopefully anonymized) ID identifies this patron # within that library. Keep their Adobe account ID in a # DelegatedPatronIdentifier. uuid_and_label = self.to_delegated_patron_identifier_uuid( library_uri, foreign_patron_identifier ) else: # We were not able to decode the authdata as a short client # token. uuid_and_label = (None, None) return uuid_and_label def to_delegated_patron_identifier_uuid( self, library_uri, foreign_patron_identifier, value_generator=None ): """Create or lookup a DelegatedPatronIdentifier containing an Adobe account ID for the given library and foreign patron ID. :return: A 2-tuple (UUID, label) """ if not library_uri or not foreign_patron_identifier: return None, None value_generator = value_generator or self.uuid identifier, is_new = DelegatedPatronIdentifier.get_one_or_create( self._db, library_uri, foreign_patron_identifier, DelegatedPatronIdentifier.ADOBE_ACCOUNT_ID, value_generator, ) if identifier is None: return None, None return ( identifier.delegated_identifier, self.urn_to_label(identifier.delegated_identifier), ) def patron_from_authdata_lookup(self, authdata): """Look up a patron by their persistent authdata token.""" credential = Credential.lookup_by_token( self._db, self.data_source, self.AUTHDATA_TOKEN_TYPE, authdata, allow_persistent_token=True, ) if not credential: return None return credential.patron def urn_to_label(self, urn): """We have no information about patrons, so labels are sparse.""" return "Delegated account ID %s" % urn def uuid(self): """Create a new UUID URN compatible with the Vendor ID system.""" u = str(uuid.uuid1(self.node_value)) # This chop is required by the spec. I have no idea why, but # since the first part of the UUID is the least significant, # it doesn't do much damage. value = "urn:uuid:0" + u[1:] return value @classmethod def get_or_create_patron_identifier_credential(cls, patron): _db = Session.object_session(patron) def refresh(credential): credential.credential = str(uuid.uuid1()) data_source = DataSource.lookup(_db, DataSource.INTERNAL_PROCESSING) patron_identifier_credential = Credential.lookup( _db, data_source, AuthdataUtility.ADOBE_ACCOUNT_ID_PATRON_IDENTIFIER, patron, refresher_method=refresh, allow_persistent_token=True, ) return patron_identifier_credential class AuthdataUtility(object): """Generate authdata JWTs as per the Vendor ID Service spec: https://docs.google.com/document/d/1j8nWPVmy95pJ_iU4UTC-QgHK2QhDUSdQ0OQTFR2NE_0 Capable of encoding JWTs (for this library), and decoding them (from this library and potentially others). Also generates and decodes JWT-like strings used to get around Adobe's lack of support for authdata in deactivation. """ # The type of the Credential created to identify a patron to the # Vendor ID Service. Using this as an alias keeps the Vendor ID # Service from knowing anything about the patron's true # identity. This Credential is permanent (unlike a patron's # username or authorization identifier), but can be revoked (if # the patron needs to reset their Adobe account ID) with no # consequences other than losing their currently checked-in books. ADOBE_ACCOUNT_ID_PATRON_IDENTIFIER = "Identifier for Adobe account ID purposes" ALGORITHM = "HS256" def __init__( self, vendor_id, library_uri, library_short_name, secret, other_libraries={} ): """Basic constructor. :param vendor_id: The Adobe Vendor ID that should accompany authdata generated by this utility. If this library has its own Adobe Vendor ID, it should go here. If this library is delegating authdata control to some other library, that library's Vendor ID should go here. :param library_uri: A URI identifying this library. This is used when generating JWTs. :param short_name: A short string identifying this library. This is used when generating short client tokens, which must be as short as possible (thus the name). :param secret: A secret used to sign this library's authdata. :param other_libraries: A dictionary mapping other libraries' canonical URIs to their (short name, secret) 2-tuples. An instance of this class will be able to decode an authdata from any library in this dictionary (plus the library it was initialized for). """ self.vendor_id = vendor_id # This is used to _encode_ JWTs and send them to the # delegation authority. self.library_uri = library_uri # This is used to _encode_ short client tokens. self.short_name = library_short_name.upper() # This is used to encode both JWTs and short client tokens. self.secret = secret # This is used by the delegation authority to _decode_ JWTs. self.secrets_by_library_uri = {} self.secrets_by_library_uri[self.library_uri] = secret # This is used by the delegation authority to _decode_ short # client tokens. self.library_uris_by_short_name = {} self.library_uris_by_short_name[self.short_name] = self.library_uri # Fill in secrets_by_library_uri and library_uris_by_short_name # for other libraries. for uri, v in list(other_libraries.items()): short_name, secret = v short_name = short_name.upper() if short_name in self.library_uris_by_short_name: # This can happen if the same library is in the list # twice, capitalized differently. raise ValueError("Duplicate short name: %s" % short_name) self.library_uris_by_short_name[short_name] = uri self.secrets_by_library_uri[uri] = secret self.log = logging.getLogger("Adobe authdata utility") self.short_token_signer = HMACAlgorithm(HMACAlgorithm.SHA256) self.short_token_signing_key = self.short_token_signer.prepare_key(self.secret) VENDOR_ID_KEY = "vendor_id" OTHER_LIBRARIES_KEY = "other_libraries" @classmethod def from_config(cls, library: Library, _db=None): """Initialize an AuthdataUtility from site configuration. The library must be successfully registered with a discovery integration in order for that integration to be a candidate to provide configuration for the AuthdataUtility. :return: An AuthdataUtility if one is configured; otherwise None. :raise CannotLoadConfiguration: If an AuthdataUtility is incompletely configured. """ _db = _db or Session.object_session(library) if not _db: raise ValueError( "No database connection provided and could not derive one from Library object!" ) # Use a version of the library library = _db.merge(library, load=False) # Try to find an external integration with a configured Vendor ID. integrations = ( _db.query(ExternalIntegration) .outerjoin(ExternalIntegration.libraries) .filter( ExternalIntegration.protocol == ExternalIntegration.OPDS_REGISTRATION, ExternalIntegration.goal == ExternalIntegration.DISCOVERY_GOAL, Library.id == library.id, ) ) for possible_integration in integrations: vendor_id = ConfigurationSetting.for_externalintegration( cls.VENDOR_ID_KEY, possible_integration ).value registration_status = ( ConfigurationSetting.for_library_and_externalintegration( _db, RegistrationConstants.LIBRARY_REGISTRATION_STATUS, library, possible_integration, ).value ) if
# Copyright 2018,2019,2020,2021 Sony Corporation. # Copyright 2021 Sony Group Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import numpy as np import argparse import nnabla as nn import nnabla.logger as logger import nnabla.functions as F import nnabla.parametric_functions as PF import nnabla.solvers as S from nnabla.contrib.context import extension_context from nnabla.monitor import Monitor, MonitorImageTile, MonitorSeries from nnabla.parameter import get_parameter_or_create, get_parameter from nnabla.initializer import ( calc_uniform_lim_glorot, calc_normal_std_he_backward, ConstantInitializer, NormalInitializer, UniformInitializer) from nnabla.parametric_functions import parametric_function_api @parametric_function_api("bn") def BN(inp, axes=[1], decay_rate=0.9, eps=1e-5, batch_stat=True, output_stat=False, fix_parameters=False): """Batch Normalization """ shape_stat = [1 for _ in inp.shape] shape_stat[axes[0]] = inp.shape[axes[0]] beta = get_parameter_or_create( "beta", shape_stat, ConstantInitializer(0), not fix_parameters) gamma = get_parameter_or_create( "gamma", shape_stat, ConstantInitializer(1), not fix_parameters) mean = get_parameter_or_create( "mean", shape_stat, ConstantInitializer(0), False) var = get_parameter_or_create( "var", shape_stat, ConstantInitializer(0), False) return F.batch_normalization(inp, beta, gamma, mean, var, axes, decay_rate, eps, batch_stat, output_stat) @parametric_function_api("in") def IN(inp, axes=[1], decay_rate=0.9, eps=1e-5, fix_parameters=True): """Instance Normalization """ if inp.shape[0] == 1: return INByBatchNorm(inp, axes, decay_rate, eps, fix_parameters) b, c = inp.shape[0:2] spacial_shape = inp.shape[2:] shape_stat = [1 for _ in inp.shape] shape_stat[axes[0]] = inp.shape[axes[0]] beta = get_parameter_or_create( "beta", shape_stat, ConstantInitializer(0), not fix_parameters) gamma = get_parameter_or_create( "gamma", shape_stat, ConstantInitializer(1), not fix_parameters) # Instance normalization # normalize over spatial dimensions axis = [i for i in range(len(inp.shape)) if i > 1] mean = F.sum(inp, axis=axis, keepdims=True) / np.prod(axis) var = F.pow_scalar(F.sum(inp - mean, axis=axis, keepdims=True), 2.0) / np.prod(axis) h = (inp - mean) / F.pow_scalar(var + eps, 0.5) return gamma * inp + beta @parametric_function_api("in") def INByBatchNorm(inp, axes=[1], decay_rate=0.9, eps=1e-5, fix_parameters=True): """Instance Normalization (implemented using BatchNormalization) Instance normalization is equivalent to the batch normalization if a batch size is one, in other words, it normalizes over spatial dimension(s), meaning all dimensions except for the batch and feature dimension. """ assert len(axes) == 1 shape_stat = [1 for _ in inp.shape] shape_stat[axes[0]] = inp.shape[axes[0]] beta = get_parameter_or_create( "beta", shape_stat, ConstantInitializer(0), not fix_parameters) gamma = get_parameter_or_create( "gamma", shape_stat, ConstantInitializer(1), not fix_parameters) mean = get_parameter_or_create( "mean", shape_stat, ConstantInitializer(0), False) var = get_parameter_or_create( "var", shape_stat, ConstantInitializer(0), False) return F.batch_normalization(inp, beta, gamma, mean, var, axes, decay_rate, eps, batch_stat=True, output_stat=False) @parametric_function_api("adain") def AdaIN(h, style, fix_parameters=False): """Adaptive Instance Normalization """ b, c, _, _ = h.shape gamma = style[:, :c].reshape([b, c, 1, 1]) beta = style[:, c:].reshape([b, c, 1, 1]) h = IN(h, name="in", fix_parameters=True) h = gamma * h + beta return h def f_layer_normalization(inp, beta, gamma, eps=1e-5): use_axis = [x for x in range(1, inp.ndim)] inp = F.sub2(inp, F.mean(inp, axis=use_axis, keepdims=True)) inp = F.div2(inp, F.pow_scalar( F.mean(F.pow_scalar(inp, 2), axis=use_axis, keepdims=True), 0.5) + eps) return inp * F.broadcast(gamma, inp.shape) + F.broadcast(beta, inp.shape) @parametric_function_api("ln") def LN(inp, fix_parameters=False): """Layer normalization. """ beta_shape = (1, inp.shape[1], 1, 1) gamma_shape = (1, inp.shape[1], 1, 1) beta = get_parameter_or_create( "beta", beta_shape, ConstantInitializer(0), not fix_parameters) gamma = get_parameter_or_create( "gamma", gamma_shape, ConstantInitializer(1), not fix_parameters) return f_layer_normalization(inp, beta, gamma) def convolution(x, maps, kernel=(3, 3), pad=(0, 0, 0, 0), stride=(1, 1), pad_mode="reflect", name="conv"): """Convolution wapper""" if type(kernel) == int: kernel = tuple([kernel] * 2) if type(pad) == int: pad = tuple([pad] * 4) if type(stride) == int: stride = tuple([stride] * 2) h = x #s = nn.initializer.calc_normal_std_glorot(h.shape[1], maps, kernel=kernel) s = nn.initializer.calc_normal_std_he_backward( h.shape[1], maps, kernel=kernel) init = nn.initializer.NormalInitializer(s) h = F.pad(h, pad, mode=pad_mode) h = PF.convolution(h, maps, kernel, stride=stride, with_bias=True, w_init=init, name=name) return h def normalize(x, style=None, norm=""): h = x if norm == "in": h = IN(h, fix_parameters=True) elif norm == "adain": h = AdaIN(h, style, fix_parameters=True) elif norm == "ln": h = LN(h, fix_parameters=False) else: h = h return h def convblock(x, maps, kernel=(3, 3), pad=(0, 0, 0, 0), stride=1, with_bias=True, pad_mode="reflect", norm="", leaky=False, name="convblock"): """Convolution block""" h = x with nn.parameter_scope(name): # conv -> norm -> act h = convolution(h, maps, kernel, pad, stride, pad_mode) h = normalize(h, norm=norm) h = F.relu(h, True) if not leaky else F.leaky_relu(h, 0.2, True) return h def mlp(x, maps, num_res=4, num_layers=2, name="mlp"): h = x with nn.parameter_scope(name): h = PF.affine(h, maps, name="affine-first") h = F.relu(h, True) h = PF.affine(h, maps, name="affine-mid") h = F.relu(h, True) h = PF.affine(h, 2 * maps * num_res * num_layers, name="affine-last") return h def style_encoder(x, maps=64, name="style-encoder"): h = x with nn.parameter_scope("generator"): with nn.parameter_scope(name): h = convblock(h, maps * 1, 7, 3, 1, norm="", name="convblock-1") h = convblock(h, maps * 2, 4, 1, 2, norm="", name="convblock-2") h = convblock(h, maps * 4, 4, 1, 2, norm="", name="convblock-3") h = convblock(h, maps * 4, 4, 1, 2, norm="", name="convblock-4") h = convblock(h, maps * 4, 4, 1, 2, norm="", name="convblock-5") h = F.average_pooling(h, h.shape[2:]) h = convolution(h, maps * 4, 1, 0, 1) return h def content_encoder(x, maps=64, pad_mode="reflect", name="content-encoder"): h = x with nn.parameter_scope("generator"): with nn.parameter_scope(name): h = convblock(h, maps * 1, 7, 3, 1, norm="in", pad_mode=pad_mode, name="convblock-1") h = convblock(h, maps * 2, 4, 1, 2, norm="in", pad_mode=pad_mode, name="convblock-2") h = convblock(h, maps * 4, 4, 1, 2, norm="in", pad_mode=pad_mode, name="convblock-3") h = resblock(h, None, maps * 4, norm="in", pad_mode=pad_mode, name="resblock-1") h = resblock(h, None, maps * 4, norm="in", pad_mode=pad_mode, name="resblock-2") h = resblock(h, None, maps * 4, norm="in", pad_mode=pad_mode, name="resblock-3") h = resblock(h, None, maps * 4, norm="in", pad_mode=pad_mode, name="resblock-4") return h def resblock(x, style=None, maps=256, pad_mode="reflect", norm="", name="resblock"): h = x def style_func(pos): if style is None: return None return style[:, pos*maps*2:(pos+1)*maps*2] with nn.parameter_scope(name): with nn.parameter_scope("conv-1"): h = convolution(h, maps, 3, 1, 1, pad_mode=pad_mode) h = normalize(h, style_func(0), norm) h = F.relu(h, True) with nn.parameter_scope("conv-2"): h = convolution(h, maps, 3, 1, 1, pad_mode=pad_mode) h = normalize(h, style_func(1), norm) return h + x def decoder(content, style, maps=256, num_res=4, num_layers=2, pad_mode="reflect", name="decoder"): h = content styles = mlp(style, maps, num_res, num_layers) b, c, _, _ = h.shape with nn.parameter_scope("generator"): with nn.parameter_scope(name): for i in range(num_res): s = styles[:, i*maps*num_layers*2:(i+1)*maps*num_layers*2] h = resblock(h, s, maps, norm="adain", pad_mode=pad_mode, name="resblock-{}".format(i + 1)) h = upsample(h, maps // 2, norm="ln", pad_mode=pad_mode, name="upsample-1") h = upsample(h, maps // 4, norm="ln", pad_mode=pad_mode, name="upsample-2") h = convolution(h, 3, 7, 3, 1, pad_mode=pad_mode, name="to-RGB") h = F.tanh(h) return h def upsample(x, maps, norm="ln", pad_mode="reflect", name="upsample"): h = x with nn.parameter_scope(name): #h = F.interpolate(h, (2, 2), mode="linear") h = F.unpooling(h, (2, 2)) h = convblock(h, maps, 5, 2, 1, norm=norm, pad_mode=pad_mode) return h def discriminator(x, maps=64, name="discriminator"): h = x with nn.parameter_scope(name): h = convblock(h, maps * 1, 4, 1, 2, leaky=True, name="convblock-1") h = convblock(h, maps * 2, 4, 1, 2, leaky=True, name="convblock-2") h = convblock(h, maps * 4, 4, 1, 2, leaky=True, name="convblock-3") h = convblock(h, maps * 8, 4, 1, 2, leaky=True, name="convblock-4") h = convolution(h, 1, 1, 0, 1, name="last-conv") return h def discriminators(x, maps=64, n=3): h = x discriminators = [] with nn.parameter_scope("discriminators"): for i in range(n): h = discriminator(x, maps, name="discriminator-{}x".format(2 ** i)) discriminators.append(h) x = F.average_pooling(x, kernel=(3, 3), stride=( 2, 2), pad=(1, 1), including_pad=False) return discriminators def recon_loss(x, y): return F.mean(F.absolute_error(x, y)) def lsgan_loss(d_fake, d_real=None, persistent=True): if d_real: # Discriminator loss loss_d_real = F.mean((d_real - 1.0) ** 2.0) loss_d_fake = F.mean(d_fake ** 2.0) loss = loss_d_real + loss_d_fake return loss else: # Generator loss, this form leads to minimization loss = F.mean((d_fake - 1.0) ** 2.0) return loss def main(): # Input b, c, h, w = 1, 3, 256, 256 x_real = nn.Variable([b, c, h, w]) # Conent Encoder content = content_encoder( x_real, maps=64, pad_mode="reflect", name="content-encoder") print("Content shape: ", content.shape) # Style Encoder style = style_encoder(x_real, maps=64, name="style-encoder") print("Style shape: ", style.shape) # Decoder x_fake = decoder(content, style, name="decoder") print("X_fake shape: ", x_fake.shape) for k, v in nn.get_parameters().items(): if "gamma" in k or "beta" in k: print(k, np.prod(v.shape)) # Discriminator p_reals = discriminators(x_real) for i, p_real in enumerate(p_reals): print("Scale: ", i,
<reponame>kimgiftww/cs<filename>callbacks.py from requirements import * import plotly.express as px import plotly.graph_objects as go region_c = pd.read_pickle('data/hdg.pickle').region_c.dropna().unique() region_c = np.sort(region_c) region_cd = [{'label': i, 'value': i} for i in region_c] rcl = list(region_c) # + ['All'] num = ['saleid', 'sum_premium', 'coveramount'] def levelf(x): if x > 80: return 'Alert' elif x > 60: return 'Warning' else: return 'Safe' def drawSub(text, substr, cards_dd_col, nd): cn = 'm-0 p-0 border-0' # border-info bm = '3px solid' if not cards_dd_col else 0 if text == nd: cn = 'm-0 p-0 border-info' return dbc.Card([ dbc.CardHeader( f'Sublimit {text}', className='p-0 m-0 border-0 bg-info text-white text-left font-weight-bold'), dbc.CardBody( substr, className='p-0 m-0 text-right font-weight-bold') ], className=cn, style={'border-left': '3px solid', 'border-bottom': bm, 'border-width': '3px', }) def drawSubdd(text, nd, dfm): dd_sublimit = dfm.loc[dfm.suppliercode == text][nd].sum() dd_quota = dfm.loc[dfm.suppliercode == text]['q_'+nd].sum() dd_percent = round((dd_sublimit/dd_quota)*100, 2) return dbc.Card([ dbc.CardHeader(text, className='p-0 m-0 text-left'), # bg-white dbc.CardBody([ html.Small(f'{dd_sublimit:,.0f} THB ({dd_percent}%)') ], className='p-0 m-0 text-right') ], className='p-0 m-0 border-0') def prep_data(gb, nd, future, dd1, province, district, quota, level_district): ndfl = ['Flood', 'Storm', 'Hail', 'Quake'] ndql = ['q_'+nd for nd in ndfl] ndl = ndfl+ndql col = gb+num+ndl dfm = pd.read_pickle('data/hdg.pickle')[col].fillna(0) # +ndq dfm.future = dfm.future.astype(int) print('\ndfm unfiltered', len(dfm), dfm.columns, dfm.nunique()) # filter # dd1 = [] if not dd1 else dd1 dfm = dfm.loc[dfm.suppliercode.isin(dd1)] if level_district: # district = [] if not district else district dfm = dfm.loc[dfm.R_P_A.isin(district)] print('dfm.sum()', dfm.sum()) else: fc = num + ndl + ['num_district'] dfm = dfm.loc[dfm.R_P.isin(province)] dfm['num_district'] = 0 dfm.loc[dfm.future == 0, 'num_district'] = 1 dfm = dfm.groupby(gb[:5])[fc].sum().reset_index() # print('dfm.sum()', dfm.sum()) print('\ndfm filtered', len(dfm), dfm.columns, dfm.nunique()) active = f'{dfm.loc[dfm.future==0].saleid.sum():,.0f} Policies' inactive = f'{dfm.loc[dfm.future==1].saleid.sum():,.0f} Policies' # future print('future', future) fg = gb[1:] if level_district else gb[1:4] print('fg', fg) if len(future): if len(future) == 1: cf = fg + num + ndl if level_district else \ fg + num + ndl + ['num_district'] dfm = dfm.loc[dfm.future == future[0] ][cf] else: cf = num + ndl if level_district else \ num + ndl + ['num_district'] dfm = dfm.groupby( fg)[cf].sum().reset_index() else: dfm = dfm.iloc[:0][fg + num + ndl] # .loc[dfm.future == future] # print('dfm futured', len(dfm), dfm.columns, dfm.nunique()) # Percent dfm['Percent'] = round((dfm[nd]/dfm['q_'+nd])*100, 2) dfm['Level'] = [levelf(x) for x in dfm.Percent] dfm['Count'] = 1 # Quota Level dfm['Quota'] = 'Safe' dfm.loc[dfm.Percent > 60, 'Quota'] = 'Warning' dfm.loc[dfm.Percent > 80, 'Quota'] = 'Alert' dfm = dfm.loc[dfm.Quota.isin(quota)] # Summary / Sublimit / Insurer Cards sum_premium = f'{dfm.sum_premium.sum():,.0f} THB' coveramount = f'{dfm.coveramount.sum():,.0f} THB' return dfm, active, inactive, sum_premium, coveramount def init_callback(dashapp): # login @dashapp.callback([Output('test', 'children'), Output('img', 'src')], Input('location', 'pathname')) def test(path): if session: print(session, path) x = session['profile'] return x['email'], x['picture'] return None, None # logout @dashapp.callback(Output('location', 'pathname'), Input('logout', 'n_clicks')) def click_logout(nc): if nc: session.clear() print(session) # ; redirect('/')#; layout = layout0 return '/tqm' # add callback for toggling the collapse on small screens @dashapp.callback(Output("sidebar", "className"), [Input("sidebar-toggle", "n_clicks")], [State("sidebar", "className")]) def toggle_classname(n, classname): if n and classname == "": return "collapsed" return "" @dashapp.callback(Output("collapse", "is_open"), [Input("navbar-toggle", "n_clicks")], [State("collapse", "is_open")]) def toggle_collapse(n, is_open): if n: return not is_open return is_open @dashapp.callback(Output("navbar-collapse", "is_open"), [Input("navbar-toggler", "n_clicks")], [State("navbar-collapse", "is_open")]) def toggle_navbar_collapse(n, is_open): if n: return not is_open return is_open # popover_insurer @dashapp.callback(Output("popover_insurer", "is_open"), Output("popover_insurer", "children"), Input("Insurer", "n_clicks"), State("dd1", "value")) def popover_insurer(test, dd1): if test: # print('dd1', dd1, len(dd1)) sc = pd.read_pickle('data/sc.pickle') sc = sc.loc[sc.suppliercode.isin(dd1)] # print('dd2', len(sc)) sc = dbc.Table.from_dataframe(sc, striped=True, bordered=True, hover=True, style={'background-color': 'white', 'width': '385px'}, id='sc') return True, sc # , 'info' , Output('sc','color') else: return False, None # hide summary @ dashapp.callback(Output("cc", "is_open"), # Output("heatmap_si", "style"), # Output("heatmap_nd", "style"), # Input('radio_layout', 'value'), Input('summary', 'value')) def hide_summary(summary): hopen = { 'height': 'calc(100vh - 109px)'} # if radio_layout == 'Map' else None hclose = { 'height': 'calc(100vh - 50px)'} # if radio_layout == 'Map' else None # h = hopen if summary else hclose if summary: return True, # hopen, hopen else: return False, # hclose, hclose # hide insurer_col @ dashapp.callback(Output("cards_dd_col", "is_open"), Input('insurer_col', 'value'), Input('dd1', 'value')) def hide_insurer(insurer_col, dd1): if insurer_col: if len(dd1) > 1: return True else: return False else: return False # hide district @ dashapp.callback(Output("level_district_click", "color"), Output("level_district", "is_open"), Output("level_district_c", "is_open"), Output('level_district_click', 'n_clicks'), Input('level_district_click', 'n_clicks'), State('level_district', 'is_open')) def hide_district(ncs, is_open): not_open = not is_open not_color = 'primary' if not_open else 'secondary' if ncs: return not_color, not_open, not_open, 0 else: return 'secondary', False, False, 0 # dynamic checkbox @dashapp.callback(Output("province", "options"), # Output("province", "value"), # Output("region", "value"), Input("region", "value"), State("region", "value")) # , Input("province", "value") def dynamic_ckl_p(region, regions): # , province # region = regions if not region else region # print('region', region, regions) dfm = pd.read_pickle('data/gi.pickle')[['region_c', 'R_P']] provincel = dfm.loc[dfm.region_c.isin(region), 'R_P'].unique() provincel = np.sort(provincel) provinced = [{'label': i, 'value': i} for i in provincel] return provinced @ dashapp.callback(Output("district", "options"), # Output("district", "value"), Input("province", "value")) # , Input("district", "value") def dynamic_ckl_d(province): # , district dfm = pd.read_pickle('data/gi.pickle')[['R_P', 'R_P_A']] # province = [] if not province else province # Is this line important? districtl = dfm.loc[dfm.R_P.isin(province), 'R_P_A'] districtl = np.sort(districtl) districtd = [{'label': i, 'value': i} for i in districtl] return districtd # all opa = [Output("region", "value"), Output("province", "value"), Output("district", "value")] ipa = [Input("all_region", "value"), Input("all_province", "value"), Input("all_district", "value")] spa = [State("region", "value"), State("province", "value"), State("province", "options"), State("district", "value"), State("district", "options")] @dashapp.callback(opa, ipa, spa) def all_chk(all_region, all_province, all_district, region, province, provinceo, district, districto): print('all_chk', all_region, all_province, all_district, region, province, provinceo, district, districto) changed_id = [p['prop_id'] for p in callback_context.triggered][0] print('changed_id', changed_id) if 'all_region' in changed_id: if all_region: return rcl, province, district else: return [], [], [] elif 'all_province' in changed_id: provincel = [p['value'] for p in provinceo] if all_province: return region, provincel, district else: return region, [], district elif 'all_district' in changed_id: districtl = [d['value'] for d in districto] if all_district: return region, province, districtl else: return region, province, [] elif changed_id == '.': return rcl, pd.read_pickle('data/gi.pickle').R_P.unique(), [] else: return region, province, district # select table / graph nmo = [Output('active', 'children'), Output('inactive', 'children'), Output('sum_premium', 'children'), Output('coveramount', 'children')] ndfl = ['Flood', 'Storm', 'Hail', 'Quake'] cgo = [Output('cards_sublimit', 'children'), Output('cards_dd_col', 'children'), ] ipl = [Input('radio_layout', 'value'), Input('radio_nd', 'value'), Input('future', 'value'), Input('dd1', 'value'), Input('province', 'value'), Input('district', 'value'), Input('quota', 'value'), Input("cc", "is_open"), Input("level_district", "is_open"), Input("cards_dd_col", "is_open")] @ dashapp.callback([Output('main', 'children')]+nmo+cgo, ipl) def select_content(layout, nd, future, dd1, province, district, quota, cc_is_open, level_district, cards_dd_col): # print(future, dd1, district, nd) gb = ['future', 'suppliercode', 'region_c', 'PROVINCE', 'R_P', 'DISTRICT', 'R_P_A'] dfm, active, inactive, sum_premium, coveramount = prep_data( gb, nd, future, dd1, province, district, quota, level_district) s, p, quota = {}, {}, {} for ndf in ndfl: s[ndf] = dfm[ndf].sum() quota[ndf] = dfm["q_"+ndf].sum() p[ndf] = round((s[ndf]/quota[ndf])*100, 2) s[ndf] = f'{s[ndf]:,.0f} THB ({p[ndf]:,.2f}%)' s = [drawSub(ndf, s[ndf], cards_dd_col, nd) for ndf in ndfl] cdd = dbc.Card( dbc.CardGroup([drawSubdd(dd, nd, dfm) for dd in dd1]), className='p-0 m-0 border-info', style={'border-width': '3px'}) ndl = [nd, 'q_'+nd] if layout == 'Table': from table import table if level_district: tc = gb[1:]+num+ndl+['Percent'] main = table(dfm[tc], cc_is_open, nd) else: tc = gb[1:4]+['num_district']+num+ndl+['Percent'] # dbct = dfm[tc].groupby('PROVINCE')[tc] def dbc_table(ql, color): dfm_dbct = dfm[tc].loc[dfm.Quota == ql].copy() dfm_dbct = dfm_dbct.sort_values( by='Percent', ascending=False) dfm_dbct.Percent = dfm_dbct.Percent.astype(str) + '%' dfm_dbct.saleid = [ f'{x:,.0f}' for x in dfm_dbct.saleid] dfm_dbct.sum_premium = [ f'{x:,.0f}' for x in dfm_dbct.sum_premium] dfm_dbct.coveramount = [ f'{x:,.0f}' for x in dfm_dbct.coveramount] dfm_dbct[nd] = [f'{x:,.0f}' for x in dfm_dbct[nd]] dfm_dbct['q_' + nd] = [f'{x:,.0f}' for x in dfm_dbct['q_'+nd]] cd = {'suppliercode': 'Insurer', 'region_c': 'Region', 'PROVINCE': 'Province', 'num_district': 'Total Districts', 'saleid': 'Total Policies', 'sum_premium': 'Total Premiums', 'coveramount': 'Total Sum Insured', nd: 'Sublimit ' + nd, 'q_'+nd: 'Quota Sublimit ' + nd} dfm_dbct.rename(columns=cd, inplace=True) return dbc.Card( dbc.Table.from_dataframe( dfm_dbct, className='p-0 m-0 text-right', striped=True, bordered=True, hover=True), className=f'p-0 m-0 border-{color}', style={'border-width': '3px'}) main = html.Div([ dbc_table('Alert', 'danger'), dbc_table('Warning', 'warning'), dbc_table('Safe', 'success'), # html.Br(), html.Br() ], style={'padding-bottom': '55px'}) # style={'background-color': 'white'}) # main = dbc.table(dfm[tc], cc_is_open, nd) elif layout == 'Chart': import dash_table from data import stylesall, df_data_all, data_columns topen = {'overflowY': 'scroll', 'height': 'calc(100vh - 215px)'} tclose = {'overflowY': 'scroll', 'height': 'calc(100vh - 50px)'} t = topen if cc_is_open else tclose main = html.Div([ dash_table.DataTable( data=df_data_all, columns=data_columns, merge_duplicate_headers=True, # page_action='none', #
# %% [markdown] # # Overview # The notebook shows how to extract the segmentation map for the ships, augment the images and train a simple DNN model to detect them. A few additional tweaks like balancing the ship-count out a little better have been done. # %% [markdown] # ## Model Parameters # We might want to adjust these later (or do some hyperparameter optimizations) # %% BATCH_SIZE = 4 EDGE_CROP = 16 NB_EPOCHS = 5 GAUSSIAN_NOISE = 0.1 UPSAMPLE_MODE = 'SIMPLE' # downsampling inside the network NET_SCALING = None # downsampling in preprocessing IMG_SCALING = (1, 1) # number of validation images to use VALID_IMG_COUNT = 400 # maximum number of steps_per_epoch in training MAX_TRAIN_STEPS = 200 AUGMENT_BRIGHTNESS = False #%% import logging import sys logger = logging.getLogger() logger.handlers = [] # Set level logger.setLevel(logging.INFO) logger.setLevel(logging.DEBUG) # Create formatter FORMAT = "%(levelno)-2s %(asctime)s : %(message)s" DATE_FMT = "%Y-%m-%d %H:%M:%S" formatter = logging.Formatter(FORMAT, DATE_FMT) # Create handler and assign handler = logging.StreamHandler(sys.stderr) handler.setFormatter(formatter) logger.handlers = [handler] logging.info("Logging started") # %% import os import logging from pathlib import Path import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # SK from skimage.io import imread from skimage.segmentation import mark_boundaries from skimage.util import montage from skimage.morphology import label from sklearn.model_selection import train_test_split montage_rgb = lambda x: np.stack([montage(x[:, :, :, i]) for i in range(x.shape[3])], -1) # TensorFlow import tensorflow as tf from tensorflow.keras import models, layers from tensorflow.keras.preprocessing.image import ImageDataGenerator import tensorflow.keras.backend as K from tensorflow.keras.optimizers import Adam from tensorflow.keras.losses import binary_crossentropy from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau # ship_dir = '../input' # train_image_dir = os.path.join(ship_dir, 'train_v2') # test_image_dir = os.path.join(ship_dir, 'test_v2') import gc; gc.enable() # memory is tight def multi_rle_encode(img): labels = label(img[:, :, 0]) return [rle_encode(labels==k) for k in np.unique(labels[labels>0])] # ref: https://www.kaggle.com/paulorzp/run-length-encode-and-decode def rle_encode(img): ''' img: numpy array, 1 - mask, 0 - background Returns run length as string formated ''' pixels = img.T.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return ' '.join(str(x) for x in runs) def rle_decode(mask_rle, shape=(768, 768)): ''' mask_rle: run-length as string formated (start length) shape: (height,width) of array to return Returns numpy array, 1 - mask, 0 - background ''' s = mask_rle.split() starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])] starts -= 1 ends = starts + lengths img = np.zeros(shape[0]*shape[1], dtype=np.uint8) for lo, hi in zip(starts, ends): img[lo:hi] = 1 return img.reshape(shape).T # Needed to align to RLE direction def masks_as_image(in_mask_list): # Take the individual ship masks and create a single mask array for all ships all_masks = np.zeros((768, 768), dtype = np.int16) #if isinstance(in_mask_list, list): for mask in in_mask_list: if isinstance(mask, str): all_masks += rle_decode(mask) return np.expand_dims(all_masks, -1) # %% DIR_INPUT = Path("~/DATA/airbus-ship-detection-sample").expanduser() assert DIR_INPUT.exists() PATH_CSV = DIR_INPUT / 'train_ship_segmentations_v2.csv' assert PATH_CSV.exists() DIR_IMAGES = DIR_INPUT / 'images' assert DIR_IMAGES.exists() masks = pd.read_csv(PATH_CSV) print(masks.shape[0], 'masks found') print(masks['ImageId'].value_counts().shape[0]) DIR_WEIGHTS = DIR_INPUT / 'weights' #%% Align the df with the actual sampled data masks DIR_IMAGES.joinpath('teas').exists() masks['exists'] = masks['ImageId'].apply(lambda image_id: DIR_IMAGES.joinpath(image_id).exists()) # r = masks.head(10) masks = masks[masks['exists']] logging.info("Resampled df to match existing images, {} records".format(len(masks))) # %% [markdown] # # Make sure encode/decode works # Given the process # $$ RLE_0 \stackrel{Decode}{\longrightarrow} \textrm{Image}_0 \stackrel{Encode}{\longrightarrow} RLE_1 \stackrel{Decode}{\longrightarrow} \textrm{Image}_1 $$ # We want to check if/that # $ \textrm{Image}_0 \stackrel{?}{=} \textrm{Image}_1 $ # We could check the RLEs as well but that is more tedious. Also depending on how the objects have been labeled we might have different counts. # # # %% fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (10, 5)) rle_0 = masks.query('ImageId=="00021ddc3.jpg"')['EncodedPixels'] img_0 = masks_as_image(rle_0) ax1.imshow(img_0[:, :, 0]) ax1.set_title('Image$_0$') rle_1 = multi_rle_encode(img_0) img_1 = masks_as_image(rle_1) ax2.imshow(img_1[:, :, 0]) ax2.set_title('Image$_1$') print('Check Decoding->Encoding', 'RLE_0:', len(rle_0), '->', 'RLE_1:', len(rle_1)) # plt.show() # %% [markdown] # # Split into training and validation groups # We stratify by the number of boats appearing so we have nice balances in each set # %% masks['ships'] = masks['EncodedPixels'].map(lambda c_row: 1 if isinstance(c_row, str) else 0) unique_img_ids = masks.groupby('ImageId').agg({'ships': 'sum'}).reset_index() unique_img_ids['has_ship'] = unique_img_ids['ships'].map(lambda x: 1.0 if x>0 else 0.0) unique_img_ids['has_ship_vec'] = unique_img_ids['has_ship'].map(lambda x: [x]) # some files are too small/corrupt masks['ImageId'].apply(lambda image_id: DIR_IMAGES.joinpath(image_id).exists()) unique_img_ids['file_size_kb'] = unique_img_ids['ImageId'].map(lambda image_id: os.stat(DIR_IMAGES.joinpath(image_id)).st_size/1024) unique_img_ids = unique_img_ids[unique_img_ids['file_size_kb']>50] # keep only 50kb files unique_img_ids['file_size_kb'].hist() # plt.show() masks.drop(['ships'], axis=1, inplace=True) unique_img_ids.sample(5) logging.info("Unique records: {}".format(len(unique_img_ids))) logging.info("Total records: {}".format(len(masks))) # %% train_ids, valid_ids = train_test_split(unique_img_ids, test_size = 0.3, stratify = unique_img_ids['ships']) train_df = pd.merge(masks, train_ids) valid_df = pd.merge(masks, valid_ids) print(train_df.shape[0], 'training masks') print(valid_df.shape[0], 'validation masks') # %% [markdown] # ### Examine Number of Ship Images # Here we examine how often ships appear and replace the ones without any ships with 0 # %% train_df['ships'].hist() # plt.show() # %% [markdown] {"_uuid": "ef8115a80749ac47f295e9a70217a5553970c2b3"} # # Undersample Empty Images # Here we undersample the empty images to get a better balanced group with more ships to try and segment # %% train_df['grouped_ship_count'] = train_df['ships'].map(lambda x: (x+1)//2).clip(0, 7) def sample_ships(in_df, base_rep_val=1500): if in_df['ships'].values[0]==0: return in_df.sample(base_rep_val//3) # even more strongly undersample no ships else: return in_df.sample(base_rep_val, replace=(in_df.shape[0]<base_rep_val)) balanced_train_df = train_df.groupby('grouped_ship_count').apply(sample_ships) balanced_train_df['ships'].hist(bins=np.arange(10)) logging.info("Balanced data frame with {} records".format(len(balanced_train_df))) # plt.show() balanced_train_df['ships'].value_counts().sort_index() # %% [markdown] # # Decode all the RLEs into Images # We make a generator to produce batches of images # %% def make_image_gen(in_df, batch_size = BATCH_SIZE): all_batches = list(in_df.groupby('ImageId')) out_rgb = [] out_mask = [] while True: np.random.shuffle(all_batches) for c_img_id, c_masks in all_batches: rgb_path = DIR_IMAGES / c_img_id c_img = imread(rgb_path) c_mask = masks_as_image(c_masks['EncodedPixels'].values) if IMG_SCALING is not None: c_img = c_img[::IMG_SCALING[0], ::IMG_SCALING[1]] c_mask = c_mask[::IMG_SCALING[0], ::IMG_SCALING[1]] out_rgb += [c_img] out_mask += [c_mask] if len(out_rgb)>=batch_size: yield np.stack(out_rgb, 0)/255.0, np.stack(out_mask, 0) out_rgb, out_mask=[], [] # %% train_gen = make_image_gen(balanced_train_df) # Get a single sample train_x, train_y = next(train_gen) print('x', train_x.shape, train_x.min(), train_x.max()) print('y', train_y.shape, train_y.min(), train_y.max()) # %% fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize = (30, 10)) batch_rgb = montage_rgb(train_x) batch_seg = montage(train_y[:, :, :, 0]) ax1.imshow(batch_rgb) ax1.set_title('Images') ax2.imshow(batch_seg) ax2.set_title('Segmentations') ax3.imshow(mark_boundaries(batch_rgb, batch_seg.astype(int))) ax3.set_title('Outlined Ships') plt.show() # fig.savefig('overview.png') # %% [markdown] # # Make the Validation Set # %% valid_x, valid_y = next(make_image_gen(valid_df, VALID_IMG_COUNT)) print(valid_x.shape, valid_y.shape) # %% [markdown] # # Augment Data # %% dg_args = dict(featurewise_center = False, samplewise_center = False, rotation_range = 15, width_shift_range = 0.1, height_shift_range = 0.1, shear_range = 0.01, zoom_range = [0.9, 1.25], horizontal_flip = True, vertical_flip = True, fill_mode = 'reflect', data_format = 'channels_last') # brightness can be problematic since it seems to change the labels differently from the images if AUGMENT_BRIGHTNESS: dg_args[' brightness_range'] = [0.5, 1.5] image_gen = ImageDataGenerator(**dg_args) if AUGMENT_BRIGHTNESS: dg_args.pop('brightness_range') label_gen = ImageDataGenerator(**dg_args) def create_aug_gen(in_gen, seed = None): np.random.seed(seed if seed is not None else np.random.choice(range(9999))) for in_x, in_y in in_gen: seed = np.random.choice(range(9999)) # keep the seeds syncronized otherwise the augmentation to the images is different from the masks g_x = image_gen.flow(255*in_x, batch_size = in_x.shape[0], seed = seed, shuffle=True) g_y = label_gen.flow(in_y, batch_size = in_x.shape[0], seed = seed, shuffle=True) yield next(g_x)/255.0, next(g_y) # %% cur_gen = create_aug_gen(train_gen) t_x, t_y = next(cur_gen) print('x', t_x.shape, t_x.dtype, t_x.min(), t_x.max()) print('y', t_y.shape, t_y.dtype, t_y.min(), t_y.max()) # only keep first 9 samples to examine in detail t_x = t_x[:9] t_y = t_y[:9] fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (20, 10)) ax1.imshow(montage_rgb(t_x), cmap='gray') ax1.set_title('images') ax2.imshow(montage(t_y[:, :, :, 0]), cmap='gray_r') ax2.set_title('ships') plt.show() # %% gc.collect() # %% [markdown] # # Build a Model # Here we use a slight deviation on the U-Net standard # %% # Build U-Net model def upsample_conv(filters, kernel_size, strides, padding): return layers.Conv2DTranspose(filters, kernel_size, strides=strides, padding=padding) def upsample_simple(filters, kernel_size, strides, padding): return layers.UpSampling2D(strides) if UPSAMPLE_MODE=='DECONV': upsample=upsample_conv else: upsample=upsample_simple input_img = layers.Input(t_x.shape[1:], name = 'RGB_Input') pp_in_layer = input_img if NET_SCALING is not None: pp_in_layer = layers.AvgPool2D(NET_SCALING)(pp_in_layer) pp_in_layer = layers.GaussianNoise(GAUSSIAN_NOISE)(pp_in_layer) pp_in_layer = layers.BatchNormalization()(pp_in_layer) c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same') (pp_in_layer) c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same') (c1) p1 = layers.MaxPooling2D((2, 2)) (c1) c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same') (p1) c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same') (c2) p2 = layers.MaxPooling2D((2, 2)) (c2) c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same') (p2) c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same') (c3) p3 = layers.MaxPooling2D((2, 2)) (c3) c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same') (p3) c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same') (c4) p4 = layers.MaxPooling2D(pool_size=(2, 2)) (c4) c5 = layers.Conv2D(128, (3, 3), activation='relu', padding='same') (p4) c5 = layers.Conv2D(128, (3, 3), activation='relu', padding='same') (c5) u6 = upsample(64, (2, 2), strides=(2, 2), padding='same') (c5) u6 = layers.concatenate([u6, c4]) c6 = layers.Conv2D(64, (3, 3), activation='relu', padding='same') (u6) c6 = layers.Conv2D(64, (3, 3), activation='relu', padding='same') (c6) u7 = upsample(32, (2, 2), strides=(2, 2), padding='same') (c6) u7 = layers.concatenate([u7, c3]) c7 = layers.Conv2D(32, (3, 3), activation='relu', padding='same') (u7) c7 = layers.Conv2D(32, (3, 3), activation='relu', padding='same') (c7) u8 = upsample(16, (2, 2), strides=(2, 2), padding='same') (c7) u8 = layers.concatenate([u8, c2]) c8 = layers.Conv2D(16, (3, 3), activation='relu', padding='same') (u8) c8 = layers.Conv2D(16, (3, 3), activation='relu', padding='same') (c8) u9 = upsample(8, (2, 2), strides=(2, 2), padding='same') (c8) u9 = layers.concatenate([u9, c1], axis=3) c9 = layers.Conv2D(8, (3, 3), activation='relu', padding='same') (u9) c9 = layers.Conv2D(8, (3, 3), activation='relu', padding='same') (c9) d = layers.Conv2D(1, (1, 1), activation='sigmoid') (c9) d = layers.Cropping2D((EDGE_CROP, EDGE_CROP))(d) d = layers.ZeroPadding2D((EDGE_CROP, EDGE_CROP))(d) if NET_SCALING is not None: d
__copyright__ = "Copyright (C) 2019 <NAME>" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import numpy as np import loopy as lp from pystella.field import Field, index_fields from pystella.elementwise import ElementWiseMap from pymbolic import var from pymbolic.primitives import Subscript, Variable __doc__ = """ .. currentmodule:: pystella.step .. autoclass:: Stepper .. currentmodule:: pystella Low-storage Runge-Kutta methods ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. currentmodule:: pystella.step .. autoclass:: LowStorageRKStepper .. currentmodule:: pystella .. autoclass:: LowStorageRK54 .. autoclass:: LowStorageRK3Williamson .. autoclass:: LowStorageRK3Inhomogeneous .. autoclass:: LowStorageRK3SSP Classical Runge-Kutta methods ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ "Classical" Runge-Kutta methods are also implemented, though are not recommended over the low-storage methods above. .. currentmodule:: pystella.step .. autoclass:: RungeKuttaStepper .. currentmodule:: pystella .. autoclass:: RungeKutta4 .. autoclass:: RungeKutta3SSP .. autoclass:: RungeKutta3Heun .. autoclass:: RungeKutta3Nystrom .. autoclass:: RungeKutta3Ralston .. autoclass:: RungeKutta2Midpoint .. autoclass:: RungeKutta2Ralston """ class Stepper: """ The base class for time steppers, with no implementation of a particular time stepper. :arg input: May be one of the following: * a :class:`dict` whose values represent the right-hand side of the ODEs to solve, i.e., `(key, value)` pairs corresponding to :math:`(y, f)` such that .. math:: \\frac{\\mathrm{d} y}{\\mathrm{d} t} = f, where :math:`f` is an arbitrary function of kernel data. Both keys and values must be :mod:`pymbolic` expressions. * a :class:`~pystella.Sector`. In this case, the right-hand side dictionary will be obtained from :attr:`~pystella.Sector.rhs_dict`. * a :class:`list` of :class:`~pystella.Sector`\\ s. In this case, the input obtained from each :class:`~pystella.Sector` (as described above) will be combined. The following keyword arguments are recognized: :arg MapKernel: The kernel class which each substep/stage will be an instance of---i.e., one of :class:`~pystella.ElementWiseMap` or its subclasses. Defaults to :class:`~pystella.ElementWiseMap`. :arg dt: A :class:`float` fixing the value of the timestep interval. Defaults to *None*, in which case it is not fixed at kernel creation. The remaining arguments are passed to :meth:`MapKernel` for each substep of the timestepper (i.e., see the documentation of :class:`~pystella.ElementWiseMap`). .. automethod:: __call__ .. attribute:: num_stages The number of substeps/stages per timestep. .. attribute:: expected_order The expected convergence order of *global* error, i.e. :math:`n` such that the global error is :math:`\\mathcal{O}(\\Delta t^n)`. .. attribute:: num_unknowns The number of unknown degrees of freedom which are evolved. """ num_stages = None expected_order = None num_copies = None def make_steps(self, MapKernel=ElementWiseMap, **kwargs): raise NotImplementedError def __init__(self, input, MapKernel=ElementWiseMap, **kwargs): single_stage = kwargs.pop("single_stage", True) from pystella import Sector if isinstance(input, Sector): self.rhs_dict = input.rhs_dict elif isinstance(input, list): self.rhs_dict = dict(i for s in input for i in s.rhs_dict.items()) elif isinstance(input, dict): self.rhs_dict = input if not single_stage: prepend_with = (self.num_copies,) else: prepend_with = None args = kwargs.pop("args", [...]) args = args + [lp.ValueArg("dt")] from pystella import get_field_args inferred_args = get_field_args(self.rhs_dict, prepend_with=prepend_with) from pystella.elementwise import append_new_args self.args = append_new_args(args, inferred_args) dt = kwargs.pop("dt", None) fixed_parameters = kwargs.pop("fixed_parameters", dict()) if dt is not None: fixed_parameters.update(dict(dt=dt)) self.num_unknowns = len(self.rhs_dict.keys()) self.steps = self.make_steps(**kwargs, fixed_parameters=fixed_parameters) def __call__(self, stage, queue=None, **kwargs): """ Calls substep/stage ``stage`` (:attr:`steps[stage]`) of the timestepper, i.e., :func:`pystella.ElementWiseMap.__call__` for the kernel for substep/stage ``stage``. :arg stage: The substep/stage of time timestepper to call. :returns: The :class:`pyopencl.Event` associated with the kernel invocation. """ evt, _ = self.steps[stage](queue, **kwargs) return evt class RungeKuttaStepper(Stepper): """ The base implementation of classical, explicit Runge-Kutta time steppers, which operate by storing and operating on multiple copies of each unknown array. Subclasses must provide an implementation of :meth:`step_statements` which returns a key-value pair implementing a specific substep of the particular timestepper. .. warning:: To minimize the required storage per unknown (i.e., number of temporaries), the implementation of most subclasses overwrite arrays that are being read as input to compute right-hand sides. This means that any non-local (stencil-type) operations must be precomputed and cached *globally* (unless otherwise noted). :raises ValueError: if the keys of :attr:`rhs_dict` are not :class:`~pystella.Field`\\ s (or :class:`pymbolic.primitives.Subscript`\\ s thereof). This is required for :meth:`make_steps` to be able to prepend unknown arrays' subscripts with the index corresponding to the temporary storage axis. """ def __init__(self, input, **kwargs): super().__init__(input, single_stage=False, **kwargs) def step_statements(self, stage, f, dt, rhs): raise NotImplementedError def make_steps(self, MapKernel=ElementWiseMap, **kwargs): rhs = var("rhs") dt = var("dt") q = var("q") fixed_parameters = kwargs.pop("fixed_parameters", dict()) rhs_statements = {rhs[i]: index_fields(value, prepend_with=(q,)) for i, value in enumerate(self.rhs_dict.values())} steps = [] for stage in range(self.num_stages): RK_dict = {} for i, f in enumerate(self.rhs_dict.keys()): # ensure that key is either a Field or a Subscript of a Field # so that index_fields can prepend the q index key_has_field = False if isinstance(f, Field): key_has_field = True elif isinstance(f, Subscript): if isinstance(f.aggregate, Field): key_has_field = True if not key_has_field: raise ValueError("rhs_dict keys must be Field instances") statements = self.step_statements(stage, f, dt, rhs[i]) for k, v in statements.items(): RK_dict[k] = v fixed_parameters.update(q=0 if stage == 0 else 1) options = lp.Options(enforce_variable_access_ordered="no_check") step = MapKernel(RK_dict, tmp_instructions=rhs_statements, args=self.args, **kwargs, options=options, fixed_parameters=fixed_parameters) steps.append(step) return steps class RungeKutta4(RungeKuttaStepper): """ The classical, four-stage, fourth-order Runge-Kutta method. Requires unknown arrays to have temporary storage axes of length three. """ num_stages = 4 expected_order = 4 num_copies = 3 def step_statements(self, stage, f, dt, rhs): fq = [index_fields(f, prepend_with=(q,)) for q in range(3)] if stage == 0: return {fq[1]: fq[0] + dt/2 * rhs, fq[2]: fq[0] + dt/6 * rhs} elif stage == 1: return {fq[1]: fq[0] + dt/2 * rhs, fq[2]: fq[2] + dt/3 * rhs} elif stage == 2: return {fq[1]: fq[0] + dt * rhs, fq[2]: fq[2] + dt/3 * rhs} elif stage == 3: return {fq[0]: fq[2] + dt/6 * rhs} class RungeKutta3Heun(RungeKuttaStepper): """ Heun's three-stage, third-order Runge-Kutta method. Requires unknown arrays to have temporary storage axes of length three. """ num_stages = 3 expected_order = 3 num_copies = 3 def step_statements(self, stage, f, dt, rhs): fq = [index_fields(f, prepend_with=(q,)) for q in range(3)] if stage == 0: return {fq[1]: fq[0] + dt/3 * rhs, fq[2]: fq[0] + dt/4 * rhs} elif stage == 1: return {fq[1]: fq[0] + dt*2/3 * rhs} elif stage == 2: return {fq[0]: fq[2] + dt*3/4 * rhs} class RungeKutta3Nystrom(RungeKuttaStepper): """ Nystrom's three-stage, third-order Runge-Kutta method. Requires unknown arrays to have temporary storage axes of length three. """ num_stages = 3 expected_order = 3 num_copies = 3 def step_statements(self, stage, f, dt, rhs): fq = [index_fields(f, prepend_with=(q,)) for q in range(3)] if stage == 0: return {fq[1]: fq[0] + dt*2/3 * rhs, fq[2]: fq[0] + dt*2/8 * rhs} elif stage == 1: return {fq[1]: fq[0] + dt*2/3 * rhs, fq[2]: fq[2] + dt*3/8 * rhs} elif stage == 2: return {fq[0]: fq[2] + dt*3/8 * rhs} class RungeKutta3Ralston(RungeKuttaStepper): """ Ralston's three-stage, third-order Runge-Kutta method. Requires unknown arrays to have temporary storage axes of length three. """ num_stages = 3 expected_order = 3 num_copies = 3 def step_statements(self, stage, f, dt, rhs): fq = [index_fields(f, prepend_with=(q,)) for q in range(3)] if stage == 0: return {fq[1]: fq[0] + dt/2 * rhs, fq[2]: fq[0] + dt*2/9 * rhs} elif stage == 1: return {fq[1]: fq[0] + dt*3/4 * rhs, fq[2]: fq[2] + dt*1/3 * rhs} elif stage == 2: return {fq[0]: fq[2] + dt*4/9 * rhs} class RungeKutta3SSP(RungeKuttaStepper): """ A three-stage, third-order strong-stability preserving Runge-Kutta method. Requires unknown arrays to have temporary storage axes of length two. """ num_stages = 3 expected_order = 3 num_copies = 2 def step_statements(self, stage, f,
from utils.graph_utils import * from copy import deepcopy import random # Generate the mask based on the valences and adjacent matrix so far # For a (node_in_focus, neighbor, edge_type) to be valid, neighbor's color < 2 and # there is no edge so far between node_in_focus and neighbor and it satisfy the valence constraint # and node_in_focus != neighbor def generate_mask(valences, adj_mat, color, real_n_vertices, node_in_focus, check_overlap_edge, new_mol): edge_type_mask=[] edge_mask=[] for neighbor in range(real_n_vertices): if neighbor != node_in_focus and color[neighbor] < 2 and \ not check_adjacent_sparse(adj_mat, node_in_focus, neighbor)[0]: min_valence = min(valences[node_in_focus], valences[neighbor], 3) # Check whether two cycles have more than two overlap edges here # the neighbor color = 1 and there are left valences and # adding that edge will not cause overlap edges. if check_overlap_edge and min_valence > 0 and color[neighbor] == 1: # attempt to add the edge new_mol.AddBond(int(node_in_focus), int(neighbor), number_to_bond[0]) # Check whether there are two cycles having more than two overlap edges ssr = Chem.GetSymmSSSR(new_mol) overlap_flag = False for idx1 in range(len(ssr)): for idx2 in range(idx1+1, len(ssr)): if len(set(ssr[idx1]) & set(ssr[idx2])) > 2: overlap_flag=True # remove that edge new_mol.RemoveBond(int(node_in_focus), int(neighbor)) if overlap_flag: continue for v in range(min_valence): assert v < 3 edge_type_mask.append((node_in_focus, neighbor, v)) # there might be an edge between node in focus and neighbor if min_valence > 0: edge_mask.append((node_in_focus, neighbor)) return edge_type_mask, edge_mask # when a new edge is about to be added, we generate labels based on ground truth # if an edge is in ground truth and has not been added to incremental adj yet, we label it as positive def generate_label(ground_truth_graph, incremental_adj, node_in_focus, real_neighbor, real_n_vertices, params): edge_type_label=[] edge_label=[] for neighbor in range(real_n_vertices): adjacent, edge_type = check_adjacent_sparse(ground_truth_graph, node_in_focus, neighbor) incre_adjacent, incre_edge_type = check_adjacent_sparse(incremental_adj, node_in_focus, neighbor) if not params["label_one_hot"] and adjacent and not incre_adjacent: assert edge_type < 3 edge_type_label.append((node_in_focus, neighbor, edge_type)) edge_label.append((node_in_focus, neighbor)) elif params["label_one_hot"] and adjacent and not incre_adjacent and neighbor==real_neighbor: edge_type_label.append((node_in_focus, neighbor, edge_type)) edge_label.append((node_in_focus, neighbor)) return edge_type_label, edge_label # add a incremental adj with one new edge def genereate_incremental_adj(last_adj, node_in_focus, neighbor, edge_type): # copy last incremental adj matrix new_adj= deepcopy(last_adj) # Add a new edge into it new_adj[node_in_focus].append((neighbor, edge_type)) new_adj[neighbor].append((node_in_focus, edge_type)) return new_adj def update_one_step(overlapped_edge_features, distance_to_others,node_sequence, node_in_focus, neighbor, edge_type, edge_type_masks, valences, incremental_adj_mat, color, real_n_vertices, graph, edge_type_labels, local_stop, edge_masks, edge_labels, local_stop_label, params, check_overlap_edge, new_mol, up_to_date_adj_mat,keep_prob): # check whether to keep this transition or not if params["sample_transition"] and random.random()> keep_prob: return # record the current node in focus node_sequence.append(node_in_focus) # generate mask based on current situation edge_type_mask, edge_mask=generate_mask(valences, up_to_date_adj_mat, color,real_n_vertices, node_in_focus, check_overlap_edge, new_mol) edge_type_masks.append(edge_type_mask) edge_masks.append(edge_mask) if not local_stop_label: # generate the label based on ground truth graph edge_type_label, edge_label=generate_label(graph, up_to_date_adj_mat, node_in_focus, neighbor,real_n_vertices, params) edge_type_labels.append(edge_type_label) edge_labels.append(edge_label) else: edge_type_labels.append([]) edge_labels.append([]) # update local stop local_stop.append(local_stop_label) # Calculate distance using bfs from the current node to all other node distances = bfs_distance(node_in_focus, up_to_date_adj_mat) distances = [(start, node, params["truncate_distance"]) if d > params["truncate_distance"] else (start, node, d) for start, node, d in distances] distance_to_others.append(distances) # Calculate the overlapped edge mask overlapped_edge_features.append(get_overlapped_edge_feature(edge_mask, color, new_mol)) # update the incremental adj mat at this step incremental_adj_mat.append(deepcopy(up_to_date_adj_mat)) def construct_incremental_graph(dataset, edges, max_n_vertices, real_n_vertices, node_symbol, params, is_training_data, initial_idx=0): # FI changed # avoid calculating this if it is just for generating new molecules for speeding up if params["generation"] and is_training_data: # FI changed return [], [], [], [], [], [], [], [], [] # avoid the initial index is larger than real_n_vertices: if initial_idx >= real_n_vertices: initial_idx=0 # Maximum valences for each node valences=get_initial_valence([np.argmax(symbol) for symbol in node_symbol], dataset) # Add backward edges edges_bw=[(dst, edge_type, src) for src, edge_type, dst in edges] edges=edges+edges_bw # Construct a graph object using the edges graph=defaultdict(list) for src, edge_type, dst in edges: graph[src].append((dst, edge_type)) # Breadth first search over the molecule # color 0: have not found 1: in the queue 2: searched already color = [0] * max_n_vertices color[initial_idx] = 1 queue=deque([initial_idx]) # create a adj matrix without any edges up_to_date_adj_mat=defaultdict(list) # record incremental adj mat incremental_adj_mat=[] # record the distance to other nodes at the moment distance_to_others=[] # soft constraint on overlapped edges overlapped_edge_features=[] # the exploration order of the nodes node_sequence=[] # edge type masks for nn predictions at each step edge_type_masks=[] # edge type labels for nn predictions at each step edge_type_labels=[] # edge masks for nn predictions at each step edge_masks=[] # edge labels for nn predictions at each step edge_labels=[] # local stop labels local_stop=[] # record the incremental molecule new_mol = Chem.MolFromSmiles('') new_mol = Chem.rdchem.RWMol(new_mol) # Add atoms add_atoms(new_mol, sample_node_symbol([node_symbol], [len(node_symbol)], dataset)[0], dataset) # calculate keep probability sample_transition_count= real_n_vertices + len(edges)/2 keep_prob= float(sample_transition_count)/((real_n_vertices + len(edges)/2) * params["bfs_path_count"]) # to form a binomial distribution while len(queue) > 0: node_in_focus=queue.popleft() current_adj_list=graph[node_in_focus] # sort (canonical order) it or shuffle (random order) it if not params["path_random_order"]: current_adj_list=sorted(current_adj_list) else: random.shuffle(current_adj_list) for neighbor, edge_type in current_adj_list: # Add this edge if the color of neighbor node is not 2 if color[neighbor]<2: update_one_step(overlapped_edge_features, distance_to_others,node_sequence, node_in_focus, neighbor, edge_type, edge_type_masks, valences, incremental_adj_mat, color, real_n_vertices, graph, edge_type_labels, local_stop, edge_masks, edge_labels, False, params, params["check_overlap_edge"], new_mol, up_to_date_adj_mat,keep_prob) # Add the edge and obtain a new adj mat up_to_date_adj_mat=genereate_incremental_adj( up_to_date_adj_mat, node_in_focus, neighbor, edge_type) # suppose the edge is selected and update valences after adding the valences[node_in_focus]-=(edge_type + 1) valences[neighbor]-=(edge_type + 1) # update the incremental mol new_mol.AddBond(int(node_in_focus), int(neighbor), number_to_bond[edge_type]) # Explore neighbor nodes if color[neighbor]==0: queue.append(neighbor) color[neighbor]=1 # local stop here. We move on to another node for exploration or stop completely update_one_step(overlapped_edge_features, distance_to_others,node_sequence, node_in_focus, None, None, edge_type_masks, valences, incremental_adj_mat, color, real_n_vertices, graph, edge_type_labels, local_stop, edge_masks, edge_labels, True, params, params["check_overlap_edge"], new_mol, up_to_date_adj_mat,keep_prob) color[node_in_focus]=2 return incremental_adj_mat,distance_to_others,node_sequence,edge_type_masks,edge_type_labels,local_stop, edge_masks, edge_labels, overlapped_edge_features def construct_incremental_graph_preselected(dataset, edges, max_n_vertices, real_n_vertices, vertices_to_keep, exit_points, node_symbol, params, is_training_data, initial_idx=0, single_exit=False): # FI changed # avoid calculating this if it is just for generating new molecules for speeding up if params["generation"] and is_training_data: # FI changed return [], [], [], [], [], [], [], [], [] # avoid the initial index is larger than real_n_vertices: if initial_idx >= real_n_vertices: initial_idx=0 # Maximum valences for each node valences=get_initial_valence([np.argmax(symbol) for symbol in node_symbol], dataset) # Construct a graph object using the edges graph=defaultdict(list) for src, edge_type, dst in edges: graph[src].append((dst, edge_type)) # Breadth first search over the molecule # color 0: have not found 1: in the queue 2: searched already color = [0] * max_n_vertices #color[initial_idx] = 1 #queue=deque([initial_idx]) queue=deque([]) # Queue starts as empty. Initial idx only added if nothing else kept # create a adj matrix without any edges up_to_date_adj_mat=defaultdict(list) # record incremental adj mat incremental_adj_mat=[] # record the distance to other nodes at the moment distance_to_others=[] # soft constraint on overlapped edges overlapped_edge_features=[] # the exploration order of the nodes node_sequence=[] # edge type masks for nn predictions at each step edge_type_masks=[] # edge type labels for nn predictions at each step edge_type_labels=[] # edge masks for nn predictions at each step edge_masks=[] # edge labels for nn predictions at each step edge_labels=[] # local stop labels local_stop=[] # record the incremental molecule new_mol = Chem.MolFromSmiles('') new_mol = Chem.rdchem.RWMol(new_mol) # Add atoms add_atoms(new_mol, sample_node_symbol([node_symbol], [len(node_symbol)], dataset)[0], dataset) # calculate keep probability sample_transition_count= real_n_vertices + len(edges)/2 keep_prob= float(sample_transition_count)/((real_n_vertices + len(edges)/2) * params["bfs_path_count"]) # to form a binomial distribution # FI - Add edges between vertices in vertices_to_keep and update starting points # FI - TODO edges_to_ignore = [] for src, edge_type, dst in edges: if src in vertices_to_keep and dst in vertices_to_keep: # Add the edge and obtain a new adj mat up_to_date_adj_mat=genereate_incremental_adj( up_to_date_adj_mat, src, dst, edge_type) # update valences valences[src]-=(edge_type + 1) valences[dst]-=(edge_type + 1) # update the incremental mol new_mol.AddBond(int(src), int(dst), number_to_bond[edge_type]) # add edge to ignore list edges_to_ignore.append((src, edge_type, dst)) edges_to_ignore.append((dst, edge_type, src)) # FI - If single exit, only add one of the exit vectors to the queue if single_exit: exit_to_use = [random.choice(exit_points)] else: exit_to_use = exit_points # FI - Add vertices in vertices_to_keep to queue and update
<filename>WORC/WORC.py #!/usr/bin/env python # Copyright 2016-2021 Biomedical Imaging Group Rotterdam, Departments of # Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import yaml import fastr import graphviz import configparser from pathlib import Path from random import randint import WORC.IOparser.file_io as io from fastr.api import ResourceLimit from WORC.tools.Slicer import Slicer from WORC.tools.Elastix import Elastix from WORC.tools.Evaluate import Evaluate import WORC.addexceptions as WORCexceptions import WORC.IOparser.config_WORC as config_io from WORC.detectors.detectors import DebugDetector from WORC.export.hyper_params_exporter import export_hyper_params_to_latex from urllib.parse import urlparse from urllib.request import url2pathname class WORC(object): """Workflow for Optimal Radiomics Classification. A Workflow for Optimal Radiomics Classification (WORC) object that serves as a pipeline spawner and manager for optimizating radiomics studies. Depending on the attributes set, the object will spawn an appropriate pipeline and manage it. Note that many attributes are lists and can therefore contain multiple instances. For example, when providing two sequences per patient, the "images" list contains two items. The type of items in the lists is described below. All objects that serve as source for your network, i.e. refer to actual files to be used, should be formatted as fastr sources suited for one of the fastr plugings, see also http://fastr.readthedocs.io/en/stable/fastr.reference.html#ioplugin-reference The objects should be lists of these fastr sources or dictionaries with the sample ID's, e.g. images_train = [{'Patient001': vfs://input/CT001.nii.gz, 'Patient002': vfs://input/CT002.nii.gz}, {'Patient001': vfs://input/MR001.nii.gz, 'Patient002': vfs://input/MR002.nii.gz}] Attributes ------------------ name: String, default 'WORC' name of the network. configs: list, required Configuration parameters, either ConfigParser objects created through the defaultconfig function or paths of config .ini files. (list, required) labels: list, required Paths to files containing patient labels (.txt files). network: automatically generated The FASTR network generated through the "build" function. images: list, optional Paths refering to the images used for Radiomics computation. Images should be of the ITK Image type. segmentations: list, optional Paths refering to the segmentations used for Radiomics computation. Segmentations should be of the ITK Image type. semantics: semantic features per image type (list, optional) masks: state which pixels of images are valid (list, optional) features: input Radiomics features for classification (list, optional) metadata: DICOM headers belonging to images (list, optional) Elastix_Para: parameter files for Elastix (list, optional) fastr_plugin: plugin to use for FASTR execution fastr_tempdir: temporary directory to use for FASTR execution additions: additional inputs for your network (dict, optional) source_data: data to use as sources for FASTR (dict) sink_data: data to use as sinks for FASTR (dict) CopyMetadata: Boolean, default True when using elastix, copy metadata from image to segmentation or not """ def __init__(self, name='test'): """Initialize WORC object. Set the initial variables all to None, except for some defaults. Arguments: name: name of the nework (string, optional) """ self.name = 'WORC_' + name # Initialize several objects self.configs = list() self.fastrconfigs = list() self.images_train = list() self.segmentations_train = list() self.semantics_train = list() self.labels_train = list() self.masks_train = list() self.masks_normalize_train = list() self.features_train = list() self.metadata_train = list() self.images_test = list() self.segmentations_test = list() self.semantics_test = list() self.labels_test = list() self.masks_test = list() self.masks_normalize_test = list() self.features_test = list() self.metadata_test = list() self.Elastix_Para = list() self.label_names = 'Label1, Label2' self.fixedsplits = list() # Set some defaults, name self.fastr_plugin = 'LinearExecution' if name == '': name = [randint(0, 9) for p in range(0, 5)] self.fastr_tmpdir = os.path.join(fastr.config.mounts['tmp'], self.name) self.additions = dict() self.CopyMetadata = True self.segmode = [] self._add_evaluation = False self.TrainTest = False # Memory settings for all fastr nodes self.fastr_memory_parameters = dict() self.fastr_memory_parameters['FeatureCalculator'] = '14G' self.fastr_memory_parameters['Classification'] = '6G' self.fastr_memory_parameters['WORCCastConvert'] = '4G' self.fastr_memory_parameters['Preprocessing'] = '4G' self.fastr_memory_parameters['Elastix'] = '4G' self.fastr_memory_parameters['Transformix'] = '4G' self.fastr_memory_parameters['Segmentix'] = '6G' self.fastr_memory_parameters['ComBat'] = '12G' self.fastr_memory_parameters['PlotEstimator'] = '12G' if DebugDetector().do_detection(): print(fastr.config) def defaultconfig(self): """Generate a configparser object holding all default configuration values. Returns: config: configparser configuration file """ config = configparser.ConfigParser() config.optionxform = str # General configuration of WORC config['General'] = dict() config['General']['cross_validation'] = 'True' config['General']['Segmentix'] = 'True' config['General']['FeatureCalculators'] = '[predict/CalcFeatures:1.0, pyradiomics/Pyradiomics:1.0]' config['General']['Preprocessing'] = 'worc/PreProcess:1.0' config['General']['RegistrationNode'] = "elastix4.8/Elastix:4.8" config['General']['TransformationNode'] = "elastix4.8/Transformix:4.8" config['General']['Joblib_ncores'] = '1' config['General']['Joblib_backend'] = 'threading' config['General']['tempsave'] = 'False' config['General']['AssumeSameImageAndMaskMetadata'] = 'False' config['General']['ComBat'] = 'False' # Options for the object/patient labels that are used config['Labels'] = dict() config['Labels']['label_names'] = 'Label1, Label2' config['Labels']['modus'] = 'singlelabel' config['Labels']['url'] = 'WIP' config['Labels']['projectID'] = 'WIP' # Preprocessing config['Preprocessing'] = dict() config['Preprocessing']['CheckSpacing'] = 'False' config['Preprocessing']['Clipping'] = 'False' config['Preprocessing']['Clipping_Range'] = '-1000.0, 3000.0' config['Preprocessing']['Normalize'] = 'True' config['Preprocessing']['Normalize_ROI'] = 'Full' config['Preprocessing']['Method'] = 'z_score' config['Preprocessing']['ROIDetermine'] = 'Provided' config['Preprocessing']['ROIdilate'] = 'False' config['Preprocessing']['ROIdilateradius'] = '10' config['Preprocessing']['Resampling'] = 'False' config['Preprocessing']['Resampling_spacing'] = '1, 1, 1' config['Preprocessing']['BiasCorrection'] = 'False' config['Preprocessing']['BiasCorrection_Mask'] = 'False' config['Preprocessing']['CheckOrientation'] = 'False' config['Preprocessing']['OrientationPrimaryAxis'] = 'axial' # Segmentix config['Segmentix'] = dict() config['Segmentix']['mask'] = 'subtract' config['Segmentix']['segtype'] = 'None' config['Segmentix']['segradius'] = '5' config['Segmentix']['N_blobs'] = '1' config['Segmentix']['fillholes'] = 'True' config['Segmentix']['remove_small_objects'] = 'False' config['Segmentix']['min_object_size'] = '2' # PREDICT - Feature calculation # Determine which features are calculated config['ImageFeatures'] = dict() config['ImageFeatures']['shape'] = 'True' config['ImageFeatures']['histogram'] = 'True' config['ImageFeatures']['orientation'] = 'True' config['ImageFeatures']['texture_Gabor'] = 'True' config['ImageFeatures']['texture_LBP'] = 'True' config['ImageFeatures']['texture_GLCM'] = 'True' config['ImageFeatures']['texture_GLCMMS'] = 'True' config['ImageFeatures']['texture_GLRLM'] = 'False' config['ImageFeatures']['texture_GLSZM'] = 'False' config['ImageFeatures']['texture_NGTDM'] = 'False' config['ImageFeatures']['coliage'] = 'False' config['ImageFeatures']['vessel'] = 'True' config['ImageFeatures']['log'] = 'True' config['ImageFeatures']['phase'] = 'True' # Parameter settings for PREDICT feature calculation # Defines only naming of modalities config['ImageFeatures']['image_type'] = 'CT' # Define frequencies for gabor filter in pixels config['ImageFeatures']['gabor_frequencies'] = '0.05, 0.2, 0.5' # Gabor, GLCM angles in degrees and radians, respectively config['ImageFeatures']['gabor_angles'] = '0, 45, 90, 135' config['ImageFeatures']['GLCM_angles'] = '0, 0.79, 1.57, 2.36' # GLCM discretization levels, distances in pixels config['ImageFeatures']['GLCM_levels'] = '16' config['ImageFeatures']['GLCM_distances'] = '1, 3' # LBP radius, number of points in pixels config['ImageFeatures']['LBP_radius'] = '3, 8, 15' config['ImageFeatures']['LBP_npoints'] = '12, 24, 36' # Phase features minimal wavelength and number of scales config['ImageFeatures']['phase_minwavelength'] = '3' config['ImageFeatures']['phase_nscale'] = '5' # Log features sigma of Gaussian in pixels config['ImageFeatures']['log_sigma'] = '1, 5, 10' # Vessel features scale range, steps for the range config['ImageFeatures']['vessel_scale_range'] = '1, 10' config['ImageFeatures']['vessel_scale_step'] = '2' # Vessel features radius for erosion to determine boudnary config['ImageFeatures']['vessel_radius'] = '5' # Tags from which to extract features, and how to name them config['ImageFeatures']['dicom_feature_tags'] = '0010 1010, 0010 0040' config['ImageFeatures']['dicom_feature_labels'] = 'age, sex' # PyRadiomics - Feature calculation # Addition to the above, specifically for PyRadiomics # Mostly based on specific MR Settings: see https://github.com/Radiomics/pyradiomics/blob/master/examples/exampleSettings/exampleMR_NoResampling.yaml config['PyRadiomics'] = dict() config['PyRadiomics']['geometryTolerance'] = '0.0001' config['PyRadiomics']['normalize'] = 'False' config['PyRadiomics']['normalizeScale'] = '100' config['PyRadiomics']['resampledPixelSpacing'] = 'None' config['PyRadiomics']['interpolator'] = 'sitkBSpline' config['PyRadiomics']['preCrop'] = 'True' config['PyRadiomics']['binCount'] = config['ImageFeatures']['GLCM_levels'] # BinWidth to sensitive for normalization, thus use binCount config['PyRadiomics']['binWidth'] = 'None' config['PyRadiomics']['force2D'] = 'False' config['PyRadiomics']['force2Ddimension'] = '0' # axial slices, for coronal slices, use dimension 1 and for sagittal, dimension 2. config['PyRadiomics']['voxelArrayShift'] = '300' config['PyRadiomics']['Original'] = 'True' config['PyRadiomics']['Wavelet'] = 'False' config['PyRadiomics']['LoG'] = 'False' if config['General']['Segmentix'] == 'True': config['PyRadiomics']['label'] = '1' else: config['PyRadiomics']['label'] = '255' # Enabled PyRadiomics features config['PyRadiomics']['extract_firstorder'] = 'False' config['PyRadiomics']['extract_shape'] = 'True' config['PyRadiomics']['texture_GLCM'] = 'False' config['PyRadiomics']['texture_GLRLM'] = 'True' config['PyRadiomics']['texture_GLSZM'] = 'True' config['PyRadiomics']['texture_GLDM'] = 'True' config['PyRadiomics']['texture_NGTDM'] = 'True' # ComBat Feature Harmonization config['ComBat'] = dict() config['ComBat']['language'] = 'python' config['ComBat']['batch'] = 'Hospital' config['ComBat']['mod'] = '[]' config['ComBat']['par'] = '1' config['ComBat']['eb'] = '1' config['ComBat']['per_feature'] = '0' config['ComBat']['excluded_features'] = 'sf_, of_, semf_, pf_' config['ComBat']['matlab'] = 'C:\\Program Files\\MATLAB\\R2015b\\bin\\matlab.exe' # Feature OneHotEncoding config['OneHotEncoding'] = dict() config['OneHotEncoding']['Use'] = 'False' config['OneHotEncoding']['feature_labels_tofit'] = '' # Feature imputation config['Imputation'] = dict() config['Imputation']['use'] = 'True' config['Imputation']['strategy'] = 'mean, median, most_frequent, constant, knn' config['Imputation']['n_neighbors'] = '5, 5' # Feature scaling options config['FeatureScaling'] = dict() config['FeatureScaling']['scaling_method'] = 'robust_z_score' config['FeatureScaling']['skip_features'] = 'semf_, pf_' # Feature preprocessing before the whole HyperOptimization config['FeatPreProcess'] = dict() config['FeatPreProcess']['Use'] = 'False' config['FeatPreProcess']['Combine'] = 'False' config['FeatPreProcess']['Combine_method'] = 'mean' # Feature selection config['Featsel'] = dict() config['Featsel']['Variance'] = '1.0' config['Featsel']['GroupwiseSearch'] = 'True' config['Featsel']['SelectFromModel'] = '0.275' config['Featsel']['SelectFromModel_estimator'] = 'Lasso, LR, RF' config['Featsel']['SelectFromModel_lasso_alpha'] =
ContainerBroker(fresh_db_path, account=acct, container=cont) check_sharding_db_files(broker) # force_db_file can be used to open db_path specifically forced_broker = ContainerBroker(db_path, account=acct, container=cont, force_db_file=True) self.assertEqual(forced_broker.db_file, db_path) self.assertEqual(forced_broker._db_file, db_path) def check_sharded_db_files(broker): self.assertEqual(broker.db_file, fresh_db_path) self.assertEqual(broker._db_file, db_path) self.assertFalse(os.path.exists(db_path)) self.assertTrue(os.path.exists(fresh_db_path)) self.assertEqual([fresh_db_path], broker.db_files) # Test the SHARDED state, this is when only fresh_db_file exists, so # obviously this should return the fresh_db_path os.unlink(db_path) broker.reload_db_files() check_sharded_db_files(broker) broker = ContainerBroker(db_path, account=acct, container=cont) check_sharded_db_files(broker) @with_tempdir def test_sharding_initiated_and_required(self, tempdir): db_path = os.path.join( tempdir, 'part', 'suffix', 'hash', '%s.db' % uuid4()) broker = ContainerBroker(db_path, account='a', container='c') broker.initialize(Timestamp.now().internal, 0) # no shard ranges self.assertIs(False, broker.sharding_initiated()) self.assertIs(False, broker.sharding_required()) # only own shard range own_sr = broker.get_own_shard_range() for state in ShardRange.STATES: own_sr.update_state(state, state_timestamp=Timestamp.now()) broker.merge_shard_ranges(own_sr) self.assertIs(False, broker.sharding_initiated()) self.assertIs(False, broker.sharding_required()) # shard ranges, still ACTIVE own_sr.update_state(ShardRange.ACTIVE, state_timestamp=Timestamp.now()) broker.merge_shard_ranges(own_sr) broker.merge_shard_ranges(ShardRange('.shards_a/cc', Timestamp.now())) self.assertIs(False, broker.sharding_initiated()) self.assertIs(False, broker.sharding_required()) # shard ranges and SHARDING, SHRINKING or SHARDED broker.enable_sharding(Timestamp.now()) self.assertTrue(broker.set_sharding_state()) self.assertIs(True, broker.sharding_initiated()) self.assertIs(True, broker.sharding_required()) epoch = broker.db_epoch own_sr.update_state(ShardRange.SHRINKING, state_timestamp=Timestamp.now()) own_sr.epoch = epoch broker.merge_shard_ranges(own_sr) self.assertIs(True, broker.sharding_initiated()) self.assertIs(True, broker.sharding_required()) own_sr.update_state(ShardRange.SHARDED) broker.merge_shard_ranges(own_sr) self.assertTrue(broker.set_sharded_state()) self.assertIs(True, broker.sharding_initiated()) self.assertIs(False, broker.sharding_required()) @with_tempdir def test_put_object_multiple_encoded_timestamps_using_file(self, tempdir): # Test ContainerBroker.put_object with differing data, content-type # and metadata timestamps, using file db to ensure that the code paths # to write/read pending file are exercised. db_path = os.path.join(tempdir, 'container.db') broker = ContainerBroker(db_path, account='a', container='c') self._test_put_object_multiple_encoded_timestamps(broker) def _test_put_object_multiple_explicit_timestamps(self, broker): ts = make_timestamp_iter() broker.initialize(next(ts).internal, 0) t = [next(ts) for _ in range(11)] # Create initial object broker.put_object('obj_name', t[0].internal, 123, 'application/x-test', '5af83e3196bf99f440f31f2e1a6c9afe', ctype_timestamp=None, meta_timestamp=None) self.assertEqual(1, len(broker.get_items_since(0, 100))) self._assert_db_row(broker, 'obj_name', t[0].internal, 123, 'application/x-test', '5af83e3196bf99f440f31f2e1a6c9afe') # hash and size change with same data timestamp are ignored t_encoded = encode_timestamps(t[0], t[1], t[1]) broker.put_object('obj_name', t[0].internal, 456, 'application/x-test-2', '1234567890abcdeffedcba0987654321', ctype_timestamp=t[1].internal, meta_timestamp=t[1].internal) self.assertEqual(1, len(broker.get_items_since(0, 100))) self._assert_db_row(broker, 'obj_name', t_encoded, 123, 'application/x-test-2', '5af83e3196bf99f440f31f2e1a6c9afe') # content-type change with same timestamp is ignored t_encoded = encode_timestamps(t[0], t[1], t[2]) broker.put_object('obj_name', t[0].internal, 456, 'application/x-test-3', '1234567890abcdeffedcba0987654321', ctype_timestamp=t[1].internal, meta_timestamp=t[2].internal) self.assertEqual(1, len(broker.get_items_since(0, 100))) self._assert_db_row(broker, 'obj_name', t_encoded, 123, 'application/x-test-2', '5af83e3196bf99f440f31f2e1a6c9afe') # update with differing newer timestamps t_encoded = encode_timestamps(t[4], t[6], t[8]) broker.put_object('obj_name', t[4].internal, 789, 'application/x-test-3', '<KEY>', ctype_timestamp=t[6].internal, meta_timestamp=t[8].internal) self.assertEqual(1, len(broker.get_items_since(0, 100))) self._assert_db_row(broker, 'obj_name', t_encoded, 789, 'application/x-test-3', '<KEY>') # update with differing older timestamps should be ignored broker.put_object('obj_name', t[3].internal, 9999, 'application/x-test-ignored', 'ignored_hash', ctype_timestamp=t[5].internal, meta_timestamp=t[7].internal) self.assertEqual(1, len(broker.get_items_since(0, 100))) self._assert_db_row(broker, 'obj_name', t_encoded, 789, 'application/x-test-3', 'abcdef1234567890abcdef1234567890') # content_type_timestamp == None defaults to data timestamp t_encoded = encode_timestamps(t[9], t[9], t[8]) broker.put_object('obj_name', t[9].internal, 9999, 'application/x-test-new', 'new_hash', ctype_timestamp=None, meta_timestamp=t[7].internal) self.assertEqual(1, len(broker.get_items_since(0, 100))) self._assert_db_row(broker, 'obj_name', t_encoded, 9999, 'application/x-test-new', 'new_hash') # meta_timestamp == None defaults to data timestamp t_encoded = encode_timestamps(t[9], t[10], t[10]) broker.put_object('obj_name', t[8].internal, 1111, 'application/x-test-newer', 'older_hash', ctype_timestamp=t[10].internal, meta_timestamp=None) self.assertEqual(1, len(broker.get_items_since(0, 100))) self._assert_db_row(broker, 'obj_name', t_encoded, 9999, 'application/x-test-newer', 'new_hash') def test_put_object_multiple_explicit_timestamps_using_memory(self): # Test ContainerBroker.put_object with differing data, content-type # and metadata timestamps passed as explicit args broker = ContainerBroker(':memory:', account='a', container='c') self._test_put_object_multiple_explicit_timestamps(broker) @with_tempdir def test_put_object_multiple_explicit_timestamps_using_file(self, tempdir): # Test ContainerBroker.put_object with differing data, content-type # and metadata timestamps passed as explicit args, using file db to # ensure that the code paths to write/read pending file are exercised. db_path = os.path.join(tempdir, 'container.db') broker = ContainerBroker(db_path, account='a', container='c') self._test_put_object_multiple_explicit_timestamps(broker) def test_last_modified_time(self): # Test container listing reports the most recent of data or metadata # timestamp as last-modified time ts = make_timestamp_iter() broker = ContainerBroker(':memory:', account='a', container='c') broker.initialize(next(ts).internal, 0) # simple 'single' timestamp case t0 = next(ts) broker.put_object('obj1', t0.internal, 0, 'text/plain', 'hash1') listing = broker.list_objects_iter(100, '', None, None, '') self.assertEqual(len(listing), 1) self.assertEqual(listing[0][0], 'obj1') self.assertEqual(listing[0][1], t0.internal) # content-type and metadata are updated at t1 t1 = next(ts) t_encoded = encode_timestamps(t0, t1, t1) broker.put_object('obj1', t_encoded, 0, 'text/plain', 'hash1') listing = broker.list_objects_iter(100, '', None, None, '') self.assertEqual(len(listing), 1) self.assertEqual(listing[0][0], 'obj1') self.assertEqual(listing[0][1], t1.internal) # used later t2 = next(ts) # metadata is updated at t3 t3 = next(ts) t_encoded = encode_timestamps(t0, t1, t3) broker.put_object('obj1', t_encoded, 0, 'text/plain', 'hash1') listing = broker.list_objects_iter(100, '', None, None, '') self.assertEqual(len(listing), 1) self.assertEqual(listing[0][0], 'obj1') self.assertEqual(listing[0][1], t3.internal) # all parts updated at t2, last-modified should remain at t3 t_encoded = encode_timestamps(t2, t2, t2) broker.put_object('obj1', t_encoded, 0, 'text/plain', 'hash1') listing = broker.list_objects_iter(100, '', None, None, '') self.assertEqual(len(listing), 1) self.assertEqual(listing[0][0], 'obj1') self.assertEqual(listing[0][1], t3.internal) # all parts updated at t4, last-modified should be t4 t4 = next(ts) t_encoded = encode_timestamps(t4, t4, t4) broker.put_object('obj1', t_encoded, 0, 'text/plain', 'hash1') listing = broker.list_objects_iter(100, '', None, None, '') self.assertEqual(len(listing), 1) self.assertEqual(listing[0][0], 'obj1') self.assertEqual(listing[0][1], t4.internal) @patch_policies def test_put_misplaced_object_does_not_effect_container_stats(self): policy = random.choice(list(POLICIES)) ts = make_timestamp_iter() broker = ContainerBroker(':memory:', account='a', container='c') broker.initialize(next(ts).internal, policy.idx) # migration tests may not honor policy on initialize if isinstance(self, ContainerBrokerMigrationMixin): real_storage_policy_index = \ broker.get_info()['storage_policy_index'] policy = [p for p in POLICIES if p.idx == real_storage_policy_index][0] broker.put_object('correct_o', next(ts).internal, 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe', storage_policy_index=policy.idx) info = broker.get_info() self.assertEqual(1, info['object_count']) self.assertEqual(123, info['bytes_used']) other_policy = random.choice([p for p in POLICIES if p is not policy]) broker.put_object('wrong_o', next(ts).internal, 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe', storage_policy_index=other_policy.idx) self.assertEqual(1, info['object_count']) self.assertEqual(123, info['bytes_used']) @patch_policies def test_has_multiple_policies(self): policy = random.choice(list(POLICIES)) ts = make_timestamp_iter() broker = ContainerBroker(':memory:', account='a', container='c') broker.initialize(next(ts).internal, policy.idx) # migration tests may not honor policy on initialize if isinstance(self, ContainerBrokerMigrationMixin): real_storage_policy_index = \ broker.get_info()['storage_policy_index'] policy = [p for p in POLICIES if p.idx == real_storage_policy_index][0] broker.put_object('correct_o', next(ts).internal, 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe', storage_policy_index=policy.idx) self.assertFalse(broker.has_multiple_policies()) other_policy = [p for p in POLICIES if p is not policy][0] broker.put_object('wrong_o', next(ts).internal, 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe', storage_policy_index=other_policy.idx) self.assertTrue(broker.has_multiple_policies()) @patch_policies def test_get_policy_info(self): policy = random.choice(list(POLICIES)) ts = make_timestamp_iter() broker = ContainerBroker(':memory:', account='a', container='c') broker.initialize(next(ts).internal, policy.idx) # migration tests may not honor policy on initialize if isinstance(self, ContainerBrokerMigrationMixin): real_storage_policy_index = \ broker.get_info()['storage_policy_index'] policy = [p for p in POLICIES if p.idx == real_storage_policy_index][0] policy_stats = broker.get_policy_stats() expected = {policy.idx: {'bytes_used': 0, 'object_count': 0}} self.assertEqual(policy_stats, expected) # add an object broker.put_object('correct_o', next(ts).internal, 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe', storage_policy_index=policy.idx) policy_stats = broker.get_policy_stats() expected = {policy.idx: {'bytes_used': 123, 'object_count': 1}} self.assertEqual(policy_stats, expected) # add a misplaced object other_policy = random.choice([p for p in POLICIES if p is not policy]) broker.put_object('wrong_o', next(ts).internal, 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe', storage_policy_index=other_policy.idx) policy_stats = broker.get_policy_stats() expected = { policy.idx: {'bytes_used': 123, 'object_count': 1}, other_policy.idx: {'bytes_used': 123, 'object_count': 1}, } self.assertEqual(policy_stats, expected) @patch_policies def test_policy_stat_tracking(self): ts = make_timestamp_iter() broker = ContainerBroker(':memory:', account='a', container='c') # Note: in subclasses of this TestCase that inherit the # ContainerBrokerMigrationMixin, passing POLICIES.default.idx here has # no effect and broker.get_policy_stats() returns a dict with a single # entry mapping policy index 0 to the container stats broker.initialize(next(ts).internal, POLICIES.default.idx) stats = defaultdict(dict) def assert_empty_default_policy_stats(policy_stats): # if no objects were added for the default policy we still # expect an entry for the default policy in the returned info # because the database was initialized with that storage policy # - but it must be empty. default_stats = policy_stats[POLICIES.default.idx] expected = {'object_count': 0, 'bytes_used': 0} self.assertEqual(default_stats, expected) policy_stats = broker.get_policy_stats() assert_empty_default_policy_stats(policy_stats) iters = 100 for i in range(iters): policy_index = random.randint(0, iters * 0.1) name = 'object-%s' % random.randint(0, iters * 0.1) size = random.randint(0, iters) broker.put_object(name, next(ts).internal, size, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe', storage_policy_index=policy_index) # track the size of the latest timestamp put for each object # in each storage policy stats[policy_index][name] = size policy_stats = broker.get_policy_stats() if POLICIES.default.idx not in stats: # unlikely, but check empty default index still in policy stats assert_empty_default_policy_stats(policy_stats) policy_stats.pop(POLICIES.default.idx) self.assertEqual(len(policy_stats), len(stats)) for policy_index, stat in policy_stats.items(): self.assertEqual(stat['object_count'], len(stats[policy_index])) self.assertEqual(stat['bytes_used'], sum(stats[policy_index].values())) def test_initialize_container_broker_in_default(self): broker = ContainerBroker(':memory:', account='test1', container='test2') # initialize with no storage_policy_index argument broker.initialize(Timestamp(1).internal) info = broker.get_info() self.assertEqual(info['account'], 'test1') self.assertEqual(info['container'], 'test2') self.assertEqual(info['hash'], '00000000000000000000000000000000') self.assertEqual(info['put_timestamp'], Timestamp(1).internal) self.assertEqual(info['delete_timestamp'], '0') info = broker.get_info() self.assertEqual(info['object_count'], 0) self.assertEqual(info['bytes_used'], 0) policy_stats = broker.get_policy_stats() # Act as policy-0 self.assertTrue(0 in policy_stats) self.assertEqual(policy_stats[0]['bytes_used'], 0) self.assertEqual(policy_stats[0]['object_count'], 0) broker.put_object('o1', Timestamp.now().internal, 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe') info = broker.get_info() self.assertEqual(info['object_count'], 1) self.assertEqual(info['bytes_used'], 123) policy_stats = broker.get_policy_stats() self.assertTrue(0 in policy_stats) self.assertEqual(policy_stats[0]['object_count'], 1) self.assertEqual(policy_stats[0]['bytes_used'], 123) def test_get_info(self): # Test ContainerBroker.get_info broker = ContainerBroker(':memory:', account='test1', container='test2') broker.initialize(Timestamp('1').internal, 0) info = broker.get_info() self.assertEqual(info['account'], 'test1') self.assertEqual(info['container'], 'test2') self.assertEqual(info['hash'], '00000000000000000000000000000000') self.assertEqual(info['put_timestamp'], Timestamp(1).internal) self.assertEqual(info['delete_timestamp'], '0') if self.__class__ in (TestContainerBrokerBeforeMetadata, TestContainerBrokerBeforeXSync, TestContainerBrokerBeforeSPI, TestContainerBrokerBeforeShardRanges): self.assertEqual(info['status_changed_at'], '0') else: self.assertEqual(info['status_changed_at'], Timestamp(1).internal) info = broker.get_info() self.assertEqual(info['object_count'], 0) self.assertEqual(info['bytes_used'], 0) broker.put_object('o1', Timestamp.now().internal, 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe') info = broker.get_info() self.assertEqual(info['object_count'], 1) self.assertEqual(info['bytes_used'], 123) sleep(.00001) broker.put_object('o2', Timestamp.now().internal, 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe') info = broker.get_info() self.assertEqual(info['object_count'], 2) self.assertEqual(info['bytes_used'], 246) sleep(.00001) broker.put_object('o2', Timestamp.now().internal, 1000, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe') info = broker.get_info() self.assertEqual(info['object_count'], 2) self.assertEqual(info['bytes_used'],
<reponame>chrico7/data515_project """ Performs King County real estate analysis. Collects, cleans, aggregates, and visualizes King County real estate data based on user-defined zip code and time window of interest. Data on the parcels, buildings, and sales history is from the King County Assessor's website while data on currently active listings is from the Redfin API. Functions: get_county_data() get_redfin_data() organize_county_data() join_county_redfin() aggregate_by_zip_spacial() zipcode_choro() aggregate_by_date() trend_plot() plotly_by_date() zip_code_agg_plotly() view_redfin_data_by_agg() Examples: """ # Import packages import datetime import difflib import io from pathlib import Path import time import json import urllib.request import requests import geopandas as gpd import matplotlib.pyplot as plt import numpy as np import pandas as pd import plotly.express as px # Define paths home_path = Path.home() working_path = Path.cwd() / 'data515_project' data_path = working_path / 'data' kc_path = data_path / 'kc' redfin_path = data_path / 'redfin' examples_path = Path.cwd() / 'examples' output_path = working_path / 'output' def get_county_data(file_name, num_rows=None): """ Retrieves a single data-file from the King County Assessors webstie. Retrieves the single data-file from the King County Assessors webstie defined by file_name using the Pandas read_csv() function. Args: file_name(str): The name of the file to download. num_rows(int): The number of rows to return. Returns: A Pandas dataframe containing all columns of the data retreived from the King County Assessor's webstie and number of rows equal to num_rows (defaults to all). Raises: ValueError: If passed file_name is not a string. ValueError: If passed file_name is not valid. ValueError: If passed num_rows is not a positive integer. OSError: If a connection to the URL is unable to be established. """ # Initialize dataframe data_raw = pd.DataFrame() # Check inputs valid_names = ['Accessory', 'Apartment%20Complex', 'Change%20History', 'Change%20History%20Detail', 'Commercial%20Building', 'Condo%20Complex%20and%20Units', 'District%20Levy%20Reference', 'Environmental%20Restriction', 'Home%20Improvement%20Applications', 'Home%20Improvement%20Exemptions', 'Legal', 'Lookup', 'Notes', 'Parcel', 'Permit', 'Real%20Property%20Account', 'Real%20Property%20Appraisal%20History', 'Real%20Property%20Sales', 'Residential%20Building', 'Review%20History', 'Tax%20Data', 'Unit%20Breakdown', 'Vacant%20Lot', 'Value%20History'] if not isinstance(file_name, str): raise ValueError('Passed file_name must be of type string') file_name = file_name.replace(' ', '%20') if file_name not in valid_names: raise ValueError('The file name you\'ve entered is not valid. ' + 'Please check ' + 'https://info.kingcounty.gov/assessor/' + 'DataDownload/default.aspx for correct file name') if num_rows is not None: if not isinstance(num_rows, int) & (num_rows > 0): raise ValueError('Number or rows to return must be a positive' + f'integer not {num_rows}') # Define base URL url = f'https://aqua.kingcounty.gov/extranet/assessor/{file_name}.zip' # Read in the data try: data_raw = pd.read_csv(url, nrows=num_rows, low_memory=False) except OSError: # try three more times with delay for i in range(3): time.sleep(1) try: data_raw = pd.read_csv(url, nrows=num_rows, low_memory=False) except OSError: pass if data_raw.empty: raise OSError('King County Assessor\'s page could not be ' + 'reached. Please check that ' + 'https://info.kingcounty.gov/assessor/' + 'DataDownload/default.aspx is available') except UnicodeDecodeError: # change encoding to latin-1 in read_csv data_raw = pd.read_csv(url, nrows=num_rows, encoding='latin-1', low_memory=False) # Check result and return if data_raw.shape[0] == 0: raise RuntimeError('No data was returned. Please try again later.') return data_raw def get_redfin_data(): """ Retrieves active King County SFH Redfin listings. Retrieves active Redfin listings from either the Redfin API or a local file if the API fails. Results are limited to single family homes in King County. Returns: A Pandas dataframe containing all columns of the data retreived from the Redfin API or locally stored file. Raises: OSError: If a connection to the API URL is unable to be established. FileNotFoundError: If the local file is not found. """ # Define inner functions def get_from_api(): # Retreives Redfin data using the API # Define API URL all_king_url = (r'https://www.redfin.com/stingray/api/gis-csv?al=1&' + r'cluster_bounds=-123.04941%2046.84777%' + r'2C-121.01694%2046.84777%2C-121.01694%2047.92442%' + r'2C-123.04941%2047.92442%2C-123.04941%2046.84777&' + r'market=seattle&min_stories=1&num_homes=5000&' + r'ord=redfin-recommended-asc&page_number=1&' + r'region_id=118&region_type=5&sf=1,2,3,5,6,7&' + r'status=1&uipt=1,2,3,4,5,6&v=8') # Get API response url_data = requests.get(all_king_url).content # Check if API response has been blocked if "spam bot" in str(url_data): raise ValueError("Redfin api error") # If API response is not blocked redfin_dataframe = pd.read_csv(io.StringIO(url_data.decode('utf-8'))) if redfin_dataframe.empty: raise OSError('The Redfin API page could not be ' + 'reached. Please check that ' + 'https://redfin.com is available') return redfin_dataframe def get_from_file(): # Retreives Redfin data from local file file_path = redfin_path / "All_King_Redfin.csv" return pd.read_csv(file_path) # Retreive Redfin data try: return get_from_api() except ValueError: return get_from_file() def organize_county_data(df_sale, df_building, df_parcel, df_lookup, zip_code: list, start_year='2010', start_month='1', start_day='1', end_year='2020', end_month='1', end_day='1'): """ Cleans and organizes data retrieved from King County Assessors website. Renames columns consistently, filters data using default and customizable inputs, merges data to a single csv file. Args: df_sale(DataFrame): King County Assessor's sales data df_building(DataFrame): King County Assessor's buildings data df_parcel(DataFrame): King County Assessor's parcel data df_lookup(DataFrame): King County Assessor's lookup data zip_code(list): List of zip codes in the King County. start_year(str): Include property sale data from this year. start_month(str): Include property sale data from this month. start_day(str): Include property sale data from this day. end_year(str): Include property sale data to this year. end_month(str): Include property sale data to this month. end_day(str): Include property sale data to this day. Returns: A Pandas dataframe containing all the data retrieved from the King County Assessor's website, filtered and merged. Raises: ValueError: If passed zip code is not valid. ValueError: If passed start_year is before the first record. ValueError: If passed end_year is after the last record. ValueError: If start date is after end date based on passed values. """ #df_lookup_items = pd.read_csv('https://raw.githubusercontent.com/' + # 'chrico7/data515_project/' + # 'master/data/look_up_item.csv') #df_col_names = pd.read_csv('https://raw.githubusercontent.com/' + # 'chrico7/data515_project/' + # 'master/data/column_names.csv') df_lookup_items = pd.read_csv(data_path / 'look_up_item.csv') df_col_names = pd.read_csv(data_path / 'column_names.csv') df_sale.columns = (df_col_names[df_col_names['source'] == 'sale']. name.tolist()) df_building.columns = (df_col_names[df_col_names['source'] == 'building']. name.tolist()) df_parcel.columns = (df_col_names[df_col_names['source'] == 'parcel']. name.tolist()) df_lookup.columns = (df_col_names[df_col_names['source'] == 'lookup']. name.tolist()) df_lookup['Look Up Description'] = (df_lookup['Look Up Description']. str.strip()) df_sale = df_sale[df_sale['Major'] != ' '] df_sale = df_sale.astype({'Major': int, 'Minor': int}) # get valid zip codes in King County # kc_zip_codes = df_building['Zip code'].dropna().unique() # index = [] # for i in range(len(kc_zip_codes)): # if type(kc_zip_codes[i]) == float: # kc_zip_codes[i] = int(kc_zip_codes[i]) # kc_zip_codes[i] = str(kc_zip_codes[i]) # # if (kc_zip_codes[i][:2] != '98' or (len(kc_zip_codes[i]) != 5 and # len(kc_zip_codes[i]) != 10)): # index.append(i) # # valid_zip = np.delete(kc_zip_codes, index) # # for i in range(len(valid_zip)): # if len(valid_zip[i]) == 10: # valid_zip[i] = valid_zip[i][:5] # # print(zip_code) # check zip code(s) # for code in zip_code: # if code not in np.unique(valid_zip): # raise ValueError('The zip code ' + str(code) + # ' you\'ve entered is not in King County') # check dates df_sale['Document Date'] = pd.to_datetime(df_sale['Document Date']) start_date = start_year + '-' + start_month + '-' + start_day end_date = end_year + '-' + end_month + '-' + end_day begin_year = (df_sale.sort_values(['Document Date'], ascending=[True]) ['Document Date'].iloc[0].year) end_year = (df_sale.sort_values(['Document Date'], ascending=[True]) ['Document Date'].iloc[-1].year) if int(start_year) < int(begin_year): raise ValueError('There is no record before year' + str(begin_year)) if int(start_year) > int(end_year): raise ValueError('There is no record after year' + str(end_year)) if datetime.date(int(start_year), int(start_month), int(start_day)) > \ datetime.date(int(end_year), int(end_month), int(end_day)): raise ValueError('Start date is after end date') # clean up the data df_building['Zip code'] = pd.to_numeric(df_building['Zip code'], errors='coerce') df_building = df_building.dropna(subset=['Zip code']) df_building['Zip code'] = df_building['Zip code'].astype(int) df_building['Zip code'] = df_building['Zip code'].astype(str) # limit properties to only single family houses df_parcel_sf = df_parcel.loc[df_parcel['Property Type'] == 'R'] df_parcel_sf = df_parcel_sf.drop(columns=['Property Type']) df_sale_sf = df_sale.loc[df_sale['Property Type'] == 11] df_building_sf = df_building.loc[df_building['Number Living Units'] == 1] # filter by a start date and end date df_sale_sf_recent = df_sale_sf[df_sale_sf['Document Date'] >= start_date] df_sale_sf_recent = df_sale_sf_recent[df_sale_sf_recent['Document Date'] <= end_date] # filter by zip #print(zip_code) df_building_sf_zip = df_building_sf[df_building_sf['Zip code'].isin(zip_code)] #print(df_building_sf['Zip code'].value_counts()) #print(df_building_sf_zip['Zip code'].value_counts()) # combine data into a single frame new_df = pd.merge(df_building_sf_zip, df_parcel_sf, how='left', left_on=['Major', 'Minor'], right_on=['Major', 'Minor']) df_all = pd.merge(new_df, df_sale_sf_recent, how='left', left_on=['Major', 'Minor'], right_on=['Major', 'Minor']) # replace numerical codes in records to readable descriptions for col in df_all.columns: if col in df_lookup_items['Field Name'].tolist(): look_up_type = int(df_lookup_items.loc[df_lookup_items['Field Name'] == col]['Look Up']) look_up_items = df_lookup.loc[df_lookup['Look Up Type'] == look_up_type] description_list = [] for i in range(len(df_all[col])): num = df_all[col].iloc[i] description = (look_up_items.loc[look_up_items['Look Up Item'] == num]['Look Up Description']) if len(description) == 0: description_list.append('nan') else: description_list.append(description.values[0]) df_all[col] = description_list return df_all def join_county_redfin(kc_data, redfin_data): """ Joins King County and Redfin data frames based on address mapping. Joins the passed dataframes kc_data and redfin_data (representing King County and Redfin data respectively) using the pandas merge() function and address matching with the difflib get_close_matches() function. King County data must contain Major, Minor, Situs
the given STIX-2 Indicator to a VAST-compatible IoC and ingests it via a VAST matcher. @param vast_binary The vast binary command to use with PyVAST @param vast_endpoint The endpoint of a running vast node ('host:port') @param indicator The STIX-2 Indicator to query VAST for """ global logger, matcher_name vast_ioc = indicator_to_vast_matcher_ioc(indicator) if not vast_ioc: logger.error( f"Unable to convert STIX-2 Indicator to VAST compatible IoC. Is it a point IoC? {indicator}" ) return vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger) proc = ( await vast.matcher() .add(matcher_name, vast_ioc["value"], vast_ioc["reference"]) .exec() ) await proc.wait() logger.debug(f"Ingested indicator for VAST live matching: {indicator}") async def remove_vast_ioc(vast_binary: str, vast_endpoint: str, indicator: Indicator): """ Converts the given STIX-2 Indicator to a VAST-compatible IoC and removes it from the VAST matcher. @param vast_binary The vast binary command to use with PyVAST @param vast_endpoint The endpoint of a running vast node ('host:port') @param indicator The STIX-2 Indicator to remove from VAST """ global logger, matcher_name type_and_value = get_vast_type_and_value(indicator.pattern) if not type_and_value: logger.debug(f"Cannot remove IoC from VAST. Is it a point IoC? {indicator}") return None (vast_type, ioc_value) = type_and_value vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger) await vast.matcher().remove(matcher_name, ioc_value).exec() logger.debug(f"Removed indicator from VAST live matching: {indicator}") async def match_intel( vast_binary: str, vast_endpoint: str, indicator_queue: asyncio.Queue, sightings_queue: asyncio.Queue, live_match: bool, retro_match: bool, retro_match_max_events: int, retro_match_timeout: float, ): """ Reads from the indicator_queue and matches all IoCs, either via VAST's live-matching or retro-matching. @param vast_binary The vast binary command to use with PyVAST @param vast_endpoint The endpoint of a running vast node ('host:port') @param indicator_queue The queue to read new IoCs from @param sightings_queue The queue to put new sightings into @param live_match Boolean flag to use retro-matching @param retro_match Boolean flag to use live-matching @param retro_match_max_events Max amount of retro match results @param retro_match_timeout Interval after which to terminate the retro-query """ global logger, open_tasks while True: msg = await indicator_queue.get() try: indicator = parse(msg, allow_custom=True) except Exception as e: logger.warning(f"Failed to decode STIX-2 Indicator item {msg}: {e}") continue if type(indicator) is not Indicator: logger.warning( f"Ignoring unknown message type, expected STIX-2 Indicator: {type(indicator)}" ) continue if ( ThreatBusSTIX2Constants.X_THREATBUS_UPDATE.value in indicator and indicator.x_threatbus_update == Operation.REMOVE.value ): g_iocs_removed.inc() if live_match: asyncio.create_task( remove_vast_ioc(vast_binary, vast_endpoint, indicator) ) else: # add new Indicator to matcher / query Indicator retrospectively g_iocs_added.inc() if retro_match: g_retro_match_backlog.inc() asyncio.create_task( retro_match_vast( vast_binary, vast_endpoint, retro_match_max_events, retro_match_timeout, indicator, sightings_queue, ) ) if live_match: asyncio.create_task( ingest_vast_ioc(vast_binary, vast_endpoint, indicator) ) indicator_queue.task_done() async def live_match_vast( vast_binary: str, vast_endpoint: str, sightings_queue: asyncio.Queue ): """ Starts a VAST matcher. Enqueues all matches from VAST to the sightings_queue. @param vast_binary The VAST binary command to use with PyVAST @param vast_endpoint The endpoint of a running VAST node @param sightings_queue The queue to put new sightings into """ global logger, matcher_name vast = VAST(binary=vast_binary, endpoint=vast_endpoint, logger=logger) proc = await vast.matcher().attach().json(matcher_name).exec() # returncode is None as long as the process did not terminate yet while proc.returncode is None: data = await proc.stdout.readline() if not data: if not await vast.test_connection(): logger.error("Lost connection to VAST, cannot live-match") # TODO reconnect continue vast_sighting = data.decode("utf-8").rstrip() sighting = matcher_result_to_sighting(vast_sighting) if not sighting: logger.error(f"Cannot parse sighting-output from VAST: {vast_sighting}") continue g_live_matcher_sightings.inc() logger.info(f"Got a new sighting from VAST") await sightings_queue.put(sighting) stderr = await proc.stderr.read() if stderr: logger.error( "VAST matcher process exited with message: {}".format(stderr.decode()) ) logger.critical("Unexpected exit of VAST matcher process.") async def invoke_cmd_for_context( cmd: str, context: dict, ioc: str = "%ioc", matchtype: str = "%matchtype" ): """ Invoke a command as subprocess for the given context. The command string is treated as template string and occurences of "%ioc" are replaced with the actually matched IoC. Returns stdout from the invoked command. @param cmd The command, including flags, to invoke as subprocess. cmd is treated as template string and occurrences of '%ioc' are replaced with the actually matched IoC. @param context The context to forward as JSON @param ioc The value to replace '%ioc' with in the `cmd` string @param matchtype The value to replace '%matchtype' with in the `cmd` string """ if not ioc: ioc = "%ioc" if not matchtype: matchtype = "%matchtype" cmd = cmd.replace("%ioc", ioc) cmd = cmd.replace("%matchtype", matchtype) proc = await asyncio.create_subprocess_exec( *lexical_split(cmd), stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE, ) proc.stdin.write(json.dumps(context).encode()) await proc.stdin.drain() proc.stdin.close() stdout, stderr = await proc.communicate() if stderr: logger.error(f"Error while transforming sighting context: {stderr}") return stdout async def report_sightings( sub_endpoint: str, sightings_queue: asyncio.Queue, transform_cmd: str = None, sink: str = None, ): """ Starts a ZeroMQ publisher on the given endpoint and publishes sightings from the sightings_queue. @param sub_endpoint A host:port string to connect to via ZeroMQ @param sightings_queue The queue to receive sightings from @param transform_cmd The command to use to pipe sightings to. Treated as template string: occurrences of '%ioc' in the cmd string get replaced with the matched IoC. @param report_data If True, only report context data of the sighting instead of the whole thing. """ global logger if transform_cmd: logger.info( f"Using '{transform_cmd}' to transform every sighting's context before sending" ) if sink: logger.info(f"Forwarding sightings to sink '{sink}'") else: socket = zmq.Context().socket(zmq.PUB) socket.connect(f"tcp://{sub_endpoint}") topic = "stix2/sighting" logger.info(f"Forwarding sightings to Threat Bus at {sub_endpoint}/{topic}") while True: sighting = await sightings_queue.get() if type(sighting) is not Sighting: logger.warning( f"Ignoring unknown message type, expected Sighting: {type(sighting)}" ) continue if transform_cmd: sighting = await transform_context(sighting, transform_cmd) if sink: context = ( sighting.x_threatbus_sighting_context if ThreatBusSTIX2Constants.X_THREATBUS_SIGHTING_CONTEXT.value in sighting else None ) matchtype = ( sighting.x_threatbus_match_type if ThreatBusSTIX2Constants.X_THREATBUS_MATCH_TYPE.value in sighting else None ) if not context: logger.warn( f"Cannot report sighting context to custom sink because no context data is found in the sighting {sighting}" ) continue if sink.lower() == "stdout": print(json.dumps(context)) else: await invoke_cmd_for_context(sink, context, matchtype=matchtype) else: socket.send_string(f"{topic} {sighting.serialize()}") sightings_queue.task_done() logger.debug(f"Reported sighting: {sighting}") async def transform_context(sighting: Sighting, transform_cmd: str) -> Sighting: """ Transforms the context of a sighting using the command configured in `transform_context` @param sighting the sighting as it was reported by VAST @param transform_cmd The command to use to pipe sightings to. Treated as template string: occurrences of '%ioc' in the cmd string get replaced with the matched IoC. @return a copy of the original sighting with the x_threatbus_context field set and transformed accordingly """ context = ( sighting.x_threatbus_sighting_context if ThreatBusSTIX2Constants.X_THREATBUS_SIGHTING_CONTEXT.value in sighting else None ) if not context: logger.error( f"Cannot invoke `transform_context` command because no context data is found in the sighting {sighting}" ) return indicator = ( sighting.x_threatbus_indicator if ThreatBusSTIX2Constants.X_THREATBUS_INDICATOR.value in sighting else None ) if indicator: _, ioc_value = split_object_path_and_value(indicator.pattern) else: # try to find the indicator value instead ioc_value = ( sighting.x_threatbus_indicator_value if ThreatBusSTIX2Constants.X_THREATBUS_INDICATOR_VALUE.value in sighting else None ) if not ioc_value: logger.error( f"Cannot invoke `transform_context` command because no indicator value is found in the sighting {sighting}" ) return matchtype = ( sighting.x_threatbus_match_type if ThreatBusSTIX2Constants.X_THREATBUS_MATCH_TYPE.value in sighting else None ) transformed_context_raw = await invoke_cmd_for_context( transform_cmd, context, ioc=ioc_value, matchtype=matchtype ) try: transformed_context = json.loads(transformed_context_raw) # recreate the sighting with the new transformed context ser = json.loads(sighting.serialize()) ser[ ThreatBusSTIX2Constants.X_THREATBUS_SIGHTING_CONTEXT.value ] = transformed_context return parse(json.dumps(ser), allow_custom=True) except Exception as e: logger.error( f"Cannot parse transformed sighting context (expecting JSON): {transformed_context_raw}", e, ) def send_manage_message(endpoint: str, action: dict, timeout: int = 5): """ Sends a 'management' message, following the threatbus-zmq-app protocol to either subscribe or unsubscribe this application to/from Threat Bus. @param endpoint A host:port string to connect to via ZeroMQ @param action The message to send as JSON @param timeout The period after which the connection attempt is aborted """ context = zmq.Context() socket = context.socket(zmq.REQ) socket.setsockopt(zmq.LINGER, 0) socket.connect(f"tcp://{endpoint}") socket.send_json(action) poller = zmq.Poller() poller.register(socket, zmq.POLLIN) reply = None if poller.poll(timeout * 1000): reply = socket.recv_json() socket.close() context.term() return reply def reply_is_success(reply: dict): """ Predicate to check if `reply` is a dict and contains the key-value pair "status" = "success" @param reply A python dict @return True if the dict contains "status" = "success" """ return ( reply and type(reply) is dict and reply.get("status", None) and reply["status"] == "success" ) def subscribe(endpoint: str, topic: str, snapshot: int, timeout: int = 5): """ Subscribes this app
<filename>opac/tests/test_interface_menu.py # coding: utf-8 from flask import url_for, current_app from flask_babelex import lazy_gettext as __ from .base import BaseTestCase from . import utils class MenuTestCase(BaseTestCase): # Collection Menu def test_alpha_link_is_selected_for_list_alpha(self): """ Verficamos que o link do menú "Alfabética" tem o css: "selected" quando acessamos a view "collection_list_alpha" """ response = self.client.get(url_for('main.collection_list')) self.assertStatus(response, 200) self.assertTemplateUsed('collection/list_journal.html') expected_anchor = '<a href="/journals/#alpha" class="tab_link">\n Lista alfab\xe9tica de peri\xf3dicos\n </a>' self.assertIn(expected_anchor, response.data.decode('utf-8')) def test_theme_link_is_selected_for_list_theme(self): """ Verficamos que o link do menú "Temática" tem o css: "selected" quando acessamos a view "collection_list_theme" """ response = self.client.get(url_for('main.collection_list')) self.assertStatus(response, 200) self.assertTemplateUsed('collection/list_journal.html') expected_anchor = '<a href="/journals/#theme" class="tab_link">\n Lista temática de periódicos\n </a>' self.assertIn(expected_anchor, response.data.decode('utf-8')) # Hamburger Menu def test_links_in_hamburger_menu(self): """ no menú de hamurger, verificamos os links que apontam a views do opac """ with current_app.app_context(): collection = utils.makeOneCollection({'name': 'dummy collection'}) with self.client as c: response = c.get(url_for('main.index')) response_data = response.data.decode('utf-8') self.assertStatus(response, 200) expected_anchor1 = """<a href="%s">\n <strong>%s</strong>""" % (url_for('.index'), collection.name or __('NOME DA COLEÇÃO!!')) self.assertIn(expected_anchor1, response_data) expected_anchor2 = """<li>\n <a href="%s" class="tab_link">\n %s\n </a>\n </li>""" % (url_for('.collection_list') + '#alpha', __('Lista alfabética de periódicos')) self.assertIn(expected_anchor2, response_data) expected_anchor3 = """<li>\n <a href="%s" class="tab_link">\n %s\n </a>\n </li>""" % (url_for('.collection_list') + '#theme', __('Lista temática de periódicos')) self.assertIn(expected_anchor3, response_data) # expected_anchor4 = """<li>\n <a href="%s" class="tab_link">\n %s\n </a>\n </li>""" % (url_for('.collection_list') + '#publisher', __('Lista de periódicos por editoras')) # self.assertIn(expected_anchor4, response_data) expected_anchor5 = """<li>\n <a href="%s">\n %s\n </a>\n </li>""" % (current_app.config['URL_SEARCH'] + "?q=*&lang=pt&filter[in][]=" + current_app.config['OPAC_COLLECTION'], 'Busca') self.assertIn(expected_anchor5, response_data) expected_anchor6 = """<li>\n <a target="_blank" href="%s/?collection=%s">\n %s\n </a>\n </li>\n <li>""" % (current_app.config['METRICS_URL'], current_app.config['OPAC_COLLECTION'], __('M\xe9tricas')) self.assertIn(expected_anchor6, response_data) expected_anchor7 = """<a href="%s" class="onlineSubmission">\n <span class="glyphBtn infoMenu"></span>\n %s %s\n </a>""" % (url_for('.about_collection'), __('Sobre o SciELO'), collection.name) self.assertIn(expected_anchor7, response_data) expected_anchor8 = """<li>\n <a href="/collection/about/">\n %s\n </a>\n </li>""" % __('Contatos') self.assertIn(expected_anchor8, response_data) expected_anchor9 = """<a href="#">\n <strong>SciELO.org - %s</strong>\n </a>""" % __('Rede SciELO') self.assertIn(expected_anchor9, response_data) # rede/scielo org expected_anchor10 = """<li>\n <a href="http://www.scielo.org/php/index.php">\n %s\n </a>\n </li>""" % __('Coleções nacionais e temáticas') self.assertIn(expected_anchor10, response_data) expected_anchor11 = """<li>\n <a href="http://www.scielo.org/applications/scielo-org/php/secondLevel.php?xml=secondLevelForAlphabeticList&xsl=secondLevelForAlphabeticList">\n %s\n </a>\n </li>""" % __('Lista alfabética de periódicos') self.assertIn(expected_anchor11, response_data) expected_anchor12 = """<li>\n <a href="http://www.scielo.org/applications/scielo-org/php/secondLevel.php?xml=secondLevelForSubjectByLetter&xsl=secondLevelForSubjectByLetter">\n %s\n </a>\n </li>""" % __('Lista de periódicos por assunto') self.assertIn(expected_anchor12, response_data) expected_anchor13 = """<li>\n <a href="%s">\n %s\n </a>\n </li>""" % (current_app.config['URL_SEARCH'], 'Busca') self.assertIn(expected_anchor13, response_data) expected_anchor14 = """<li>\n <a target="_blank" href="%s/?collection=%s">\n %s\n </a>\n </li>""" % (current_app.config['METRICS_URL'], current_app.config['OPAC_COLLECTION'], 'Métricas') self.assertIn(expected_anchor14, response_data) expected_anchor15 = """<li>\n <a href="http://www.scielo.org/php/level.php?lang=pt&component=56&item=9">\n %s\n </a>\n </li>""" % __('Acesso OAI e RSS') self.assertIn(expected_anchor15, response_data) expected_anchor16 = """<li>\n <a href="http://www.scielo.org/php/level.php?lang=pt&component=56&item=8">\n %s\n </a>\n </li>""" % __('Sobre a Rede SciELO') self.assertIn(expected_anchor16, response_data) expected_anchor17 = """<li>\n <a href="#">\n %s\n </a>\n </li>""" % __('Contatos') self.assertIn(expected_anchor17, response_data) # expected_anchor18 = u"""<li>\n <a href="#"><strong>%s</strong></a>\n </li>""" % __(u'Portal do Autor') # self.assertIn(expected_anchor18, response_data) def test_blog_link_in_hamburger_menu(self): """ Verificamos que o link para o blog em perspectiva fique apontando ao link certo considerando o idioma da sessão """ with current_app.app_context(): utils.makeOneCollection({'name': 'dummy collection'}) with self.client as c: # idioma em 'pt_br' response = c.get( url_for('main.set_locale', lang_code='pt_BR'), headers={'Referer': '/'}, follow_redirects=True) self.assertStatus(response, 200) expected_anchor = '<a href="http://blog.scielo.org/">' self.assertIn(expected_anchor, response.data.decode('utf-8')) # idioma em 'en' response = c.get( url_for('main.set_locale', lang_code='en'), headers={'Referer': '/'}, follow_redirects=True) self.assertStatus(response, 200) expected_anchor = '<a href="http://blog.scielo.org/en/">' self.assertIn(expected_anchor, response.data.decode('utf-8')) # idioma em 'es' response = c.get( url_for('main.set_locale', lang_code='es'), headers={'Referer': '/'}, follow_redirects=True) self.assertStatus(response, 200) expected_anchor = '<a href="http://blog.scielo.org/es/">' self.assertIn(expected_anchor, response.data.decode('utf-8')) # Journal Menu def test_journal_detail_menu(self): """ Teste para verificar se os botões estão ``anterior``, ``atual``, ``próximo`` estão disponíveis no ``journal/detail.html`` """ journal = utils.makeOneJournal() with current_app.app_context(): # Criando uma coleção para termos o objeto ``g`` na interface utils.makeOneCollection() utils.makeOneIssue({ 'journal': journal, 'year': '2016', 'volume': '1', 'number': '1', 'order': '1', }) issue2 = utils.makeOneIssue({ 'journal': journal, 'year': '2016', 'volume': '1', 'number': '2', 'order': '2', }) issue3 = utils.makeOneIssue({ 'journal': journal, 'year': '2016', 'volume': '1', 'number': '3', 'order': '3', }) response = self.client .get( url_for('main.journal_detail', url_seg=journal.url_segment)) self.assertStatus(response, 200) self.assertTemplateUsed('journal/detail.html') expect_btn_anterior = '<a href="%s" class="btn group">' % url_for( '.issue_toc', url_seg=journal.url_segment, url_seg_issue=issue2.url_segment) # número anterior expect_btn_atual = '<a href="%s" class="btn group ">' % url_for( '.issue_toc', url_seg=journal.url_segment, url_seg_issue=issue3.url_segment) # número atual expect_btn_proximo = '<a href="#" class="btn group disabled">' # número seguinte expected_btns = [expect_btn_anterior, expect_btn_atual, expect_btn_proximo] # Verificar se todos os btns do menu estão presentes no HTML da resposta for btn in expected_btns: self.assertIn(btn, response.data.decode('utf-8')) def test_journal_detail_menu_without_issues(self): """ Teste para verificar se os botões estão ``anterior``, ``atual``, ``próximo`` estão disponíveis no ``jorunal/detail.html`` quando o periódico não tem número. """ journal = utils.makeOneJournal() with current_app.app_context(): # Criando uma coleção para termos o objeto ``g`` na interface utils.makeOneCollection() response = self.client.get(url_for('main.journal_detail', url_seg=journal.url_segment)) self.assertStatus(response, 200) self.assertTemplateUsed('journal/detail.html') expect_btn_anterior = '<a href="#" class="btn group disabled">' # número seguinte expect_btn_atual = '<a href="#" class="btn group disabled">' # número atual expect_btn_proximo = '<a href="#" class="btn group disabled">' # número anterior expected_btns = [expect_btn_anterior, expect_btn_atual, expect_btn_proximo] # Verificar se todos os btns do menu estão presentes no HTML da resposta response_data = response.data.decode('utf-8') for btn in expected_btns: self.assertIn(btn, response_data) def test_journal_detail_menu_with_one_issue(self): """ Teste para verificar se os botões estão ``anterior``, ``atual``, ``próximo`` estão disponíveis no ``jorunal/detail.html`` quando o periódico tem um número o botão ``próximo`` e ``anterior`` deve vir desabilitados. """ journal = utils.makeOneJournal() with current_app.app_context(): # Criando uma coleção para termos o objeto ``g`` na interface utils.makeOneCollection() issue = utils.makeOneIssue({ 'journal': journal, 'year': '2016', 'volume': '1', 'number': '1', 'order': '1', }) response = self.client.get(url_for('main.journal_detail', url_seg=journal.url_segment)) self.assertStatus(response, 200) self.assertTemplateUsed('journal/detail.html') expect_btn_anterior = '<a href="#" class="btn group disabled">' # número anterior expect_btn_atual = '<a href="%s" class="btn group ">' % url_for( '.issue_toc', url_seg=journal.url_segment, url_seg_issue=issue.url_segment) # número atual expect_btn_proximo = '<a href="#" class="btn group disabled">' # número seguinte expected_btns = [expect_btn_anterior, expect_btn_atual, expect_btn_proximo] # Verificar se todos os btns do menu estão presentes no HTML da resposta response_data = response.data.decode('utf-8') for btn in expected_btns: self.assertIn(btn, response_data) def test_journal_detail_menu_access_issue_toc_on_any_issue(self): """ Teste para verificar se os botões estão ``anterior``, ``atual``, ``próximo`` estão disponíveis no ``jorunal/detail.html``, quando acessamos qualquer número. """ with current_app.app_context(): # Criando uma coleção para termos o objeto ``g`` na interface utils.makeOneCollection() journal = utils.makeOneJournal() issue1 = utils.makeOneIssue({'journal': journal, 'year': '2016', 'volume': '1', 'number': '1', 'order': '1', }) issue2 = utils.makeOneIssue({'journal': journal, 'year': '2016', 'volume': '1', 'number': '2', 'order': '2', }) issue3 = utils.makeOneIssue({'journal': journal, 'year': '2016', 'volume': '1', 'number': '3', 'order': '3', }) issue_toc_url = url_for( 'main.issue_toc', url_seg=journal.url_segment, url_seg_issue=issue2.url_segment) response = self.client .get(issue_toc_url) self.assertStatus(response, 200) self.assertTemplateUsed('issue/toc.html') expect_btn_anterior = '<a href="%s" class="btn group">' % url_for( '.issue_toc', url_seg=journal.url_segment, url_seg_issue=issue1.url_segment) # número anterior expect_btn_atual = '<a href="%s" class="btn group ">' % url_for( '.issue_toc', url_seg=journal.url_segment, url_seg_issue=issue3.url_segment) # número atual expect_btn_proximo = '<a href="%s" class="btn group">' % url_for( '.issue_toc', url_seg=journal.url_segment, url_seg_issue=issue3.url_segment) # número seguinte expected_btns = [expect_btn_anterior, expect_btn_atual, expect_btn_proximo] # Verificar se todos os btns do menu estão presentes no HTML da resposta response_data = response.data.decode('utf-8') for btn in expected_btns: self.assertIn(btn, response_data) def test_journal_detail_menu_access_issue_toc_lastest_issue(self): """ Teste para verificar se os botões estão ``anterior``, ``atual``, ``próximo`` estão disponíveis no ``jorunal/detail.html``, quando acessamos o número mais recente. """ journal = utils.makeOneJournal() with current_app.app_context(): # Criando uma coleção para termos o objeto ``g`` na interface utils.makeOneCollection() utils.makeOneIssue({ 'journal': journal, 'year': '2016', 'volume': '1', 'number': '1', 'order': '1', }) issue2 = utils.makeOneIssue({ 'journal': journal, 'year': '2016', 'volume': '1', 'number': '2', 'order': '2' }) issue3 = utils.makeOneIssue({ 'journal': journal, 'year': '2016', 'volume': '1', 'number': '3', 'order': '3' }) issue_toc_url = url_for( 'main.issue_toc', url_seg=journal.url_segment, url_seg_issue=issue3.url_segment) response = self.client.get(issue_toc_url) self.assertStatus(response, 200) self.assertTemplateUsed('issue/toc.html') expect_btn_anterior = '<a href="%s" class="btn group">' % url_for( '.issue_toc', url_seg=journal.url_segment, url_seg_issue=issue2.url_segment) # número anterior expect_btn_atual = '<a href="%s" class="btn group selected ">' % url_for( '.issue_toc', url_seg=journal.url_segment, url_seg_issue=issue3.url_segment) # número atual expect_btn_proximo = '<a href="#" class="btn group disabled">' # número seguinte expected_btns = [expect_btn_anterior, expect_btn_atual, expect_btn_proximo] # Verificar se todos os btns do menu estão presentes no HTML da resposta for btn in expected_btns: self.assertIn(btn, response.data.decode('utf-8')) def test_journal_detail_menu_access_issue_toc_oldest_issue(self): """ Teste para verificar se os botões estão ``anterior``, ``atual``, ``próximo`` estão disponíveis no ``jorunal/detail.html``, quando acessamos o número mais antigo. """ journal = utils.makeOneJournal() with current_app.app_context(): # Criando uma coleção para termos o objeto ``g`` na interface utils.makeOneCollection() issue1 = utils.makeOneIssue({'journal': journal, 'year': '2016', 'volume': '1', 'number': '1', 'order': '1', }) issue2 = utils.makeOneIssue({'journal':
<gh_stars>10-100 # Copyright 2021 san kim # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import copy import numpy as np from ke_t5 import pipe as seq_pipe _DEFAULT_SPAN_TAGS = ['O', 'B', 'I'] def _collapse_consecutive_spaces(text): return re.sub(r'\s+', ' ', text) def _string_join(lst, sep=' '): # Join on space, but collapse consecutive spaces. out = sep.join(lst) return _collapse_consecutive_spaces(out) def _pad_punctuation_general(text): # Pad everything except for: underscores (_), whitespace (\s), # numbers (\p{N}), letters (\p{L}) and accent characters (\p{M}). text = re.sub(r'([^_\s\w])', r' \1 ', text) # Collapse consecutive whitespace into one space. text = _collapse_consecutive_spaces(text) return text @seq_pipe.map_over_dataset def base_preproc_for_classification( x, benchmark_name, input_keys, label_names=None, no_label_idx=0, with_feature_key=True, sep=' '): strs_to_join = [] for key in input_keys: if with_feature_key: strs_to_join.append('{}:'.format(key)) strs_to_join.append(x[key]) ex = {} if label_names is not None: # put the name of benchmark if the model is generative strs_to_join.insert(0, benchmark_name) ex['targets'] = label_names[x['label']] if x['label'] >= 0 else '<unk>' else: ex['targets'] = x['label'] if x['label'] >= 0 else no_label_idx joined = sep.join(strs_to_join) ex['inputs'] = joined return ex @seq_pipe.map_over_dataset def base_preproc_for_regression( x, benchmark_name, input_keys, is_string_tgt=True, is_classification=False, with_feature_key=True, sep=' '): strs_to_join = [] for key in input_keys: if with_feature_key: strs_to_join.append('{}:'.format(key)) strs_to_join.append(x[key]) ex = {} if is_string_tgt: # put the name of benchmark if the model is generative strs_to_join.insert(0, benchmark_name) ex['targets'] = "{:.1f}".format(x['labels']['label']) else: if is_classification: ex['targets'] = x['labels']['binary-label'] else: ex['targets'] = x['labels']['real-label'] joined = sep.join(strs_to_join) ex['inputs'] = joined ex['id'] = x['id'] return ex @seq_pipe.map_over_dataset def re_preproc_for_classification( x, benchmark_name, label_names=None, no_label_idx=0, with_feature_key=True, sep=' '): # mark span using start index of the entity def _mark_span(text, span_str, span_idx, mark): pattern_tmpl = r'^((?:[\S\s]){N})(W)' pattern_tmpl = pattern_tmpl.replace('N', str(span_idx)) pattern = pattern_tmpl.replace('W', span_str) return re.sub(pattern, r'\1{0}\2{0}'.format(mark), text) # '*' for subejct entity '#' for object entity. text = x["sentence"] text = _mark_span(text, x['subject_entity']['word'], x['subject_entity']['start_idx'], '*') # Compensate for 2 added "words" added in previous step. span2_index = x['object_entity']['start_idx'] + 2 * \ (1 if x['subject_entity']['start_idx'] < x['object_entity']['start_idx'] else 0) text = _mark_span(text, x['object_entity']['word'], span2_index, '#') strs_to_join = [] if with_feature_key: strs_to_join.append('{}:'.format('text')) strs_to_join.append(text) ex = {} if label_names is not None: # put the name of benchmark if the model is generative strs_to_join.insert(0, benchmark_name) ex['targets'] = label_names[x['label']] if x['label'] >= 0 else '<unk>' else: ex['targets'] = x['label'] if x['label'] >= 0 else no_label_idx joined = sep.join(strs_to_join) ex['inputs'] = joined ex['id'] = x['id'] return ex @seq_pipe.map_over_dataset def base_preproc_for_conditional_generation( x, prefix, input_keys, with_feature_key=True, sep=' '): strs_to_join = [] for key in input_keys: if with_feature_key: strs_to_join.append('{}:'.format(key)) strs_to_join.append(x[key]) ex = {} strs_to_join.insert(0, prefix) joined = sep.join(strs_to_join) ex['inputs'] = joined return ex @seq_pipe.map_over_dataset def preprocess_quad(x, benchmark_name, include_context=True, impossible_answer_text='impossible', pad_punct=True): a = x['answers']['text'] q = x['question'] c = x['context'] if pad_punct: a = [_pad_punctuation_general(txt) for txt in a] q = _pad_punctuation_general(q) c = _pad_punctuation_general(c) strs_to_join = [] if include_context: strs_to_join.extend(['question:', q, 'context:', c]) else: strs_to_join.extend(['trivia question:', q]) strs_to_join.insert(0, benchmark_name) inputs = _string_join(strs_to_join) if 'is_impossible' in x: if x['is_impossible']: label = impossible_answer_text else: label = a[0] else: label = a[0] return { 'inputs': inputs, 'targets': label, 'id': x['id'], 'context': c, 'question': q, 'answers': a } _KLUE_NER_TAG=[ "B-DT", "I-DT", "B-LC", "I-LC", "B-OG", "I-OG", "B-PS", "I-PS", "B-QT", "I-QT", "B-TI", "I-TI", "O", ] def create_ner_example(chrs, tags): text = ''.join(chrs) ne_seq = [] start_idx = 0 tag_stack = [] chr_stack = [] for t_idx, tag in enumerate(tags): if tag.startswith('B'): if len(chr_stack) > 0: form = ''.join(chr_stack) ne_seq.append({'form': form, 'begin': start_idx, 'end': start_idx+len(form), 'label': tag_stack[0]}) chr_stack.clear() tag_stack.clear() start_idx = t_idx tag_stack.append(tag.split('-')[-1]) chr_stack.append(chrs[t_idx]) elif tag.startswith('I'): chr_stack.append(chrs[t_idx]) else: if len(chr_stack) > 0: form = ''.join(chr_stack) ne_seq.append({'form': form, 'begin': start_idx, 'end': start_idx+len(form), 'label': tag_stack[0]}) chr_stack.clear() tag_stack.clear() return text, ne_seq @seq_pipe.map_over_dataset def klue_ne_example_fmt(x): chrs = x["tokens"] tags = map(lambda x: _KLUE_NER_TAG[x], x["ner_tags"]) text, ne_seq = create_ner_example(chrs, tags) ret = { "NE": ne_seq, "text": text, } return ret @seq_pipe.map_over_dataset def tokenize_and_preproc_iob24klue(x, output_features, tags=None, iob2_tags=None, tag_label='NE', input_key='inputs', info4klue=True): ret = {} inputs = x[input_key] tokenizer = output_features[input_key].tokenizer ret[f'{input_key}_pretokenized'] = inputs input_hf = tokenizer(inputs) input_ids = input_hf.input_ids if info4klue: ret['klue_metric'] = {} ret['klue_metric']['char_to_token'] = [ input_hf.char_to_token(pos) for pos in range(len(inputs))] ret[f'{input_key}'] = input_ids #ret['char_to_token'] = {k:v for k, v in enumerate(char_to_token)} #ret[f'{input_key}_tokens'] = [tokenizer._convert_id_to_token(x) for x in input_ids] if tags and iob2_tags: outside_label = iob2_tags.index('O') tag_labels = x[tag_label] labels = np.ones_like(input_ids, dtype=np.int32) * outside_label if info4klue: ret['klue_metric']['char_tag'] = np.ones_like( ret['klue_metric']['char_to_token'], dtype=np.int32) * outside_label for tgl in tag_labels: label_txt = tgl['label'] if label_txt != 'O': if info4klue: for idx, pos_idx in enumerate(list(range(tgl['begin'], tgl['end']))): if idx == 0: ret['klue_metric']['char_tag'][pos_idx] = iob2_tags.index( 'B-'+label_txt) else: ret['klue_metric']['char_tag'][pos_idx] = iob2_tags.index( 'I-'+label_txt) pos_list = [input_hf.char_to_token( pos) for pos in range(tgl['begin'], tgl['end'])] #pos_list = copy.deepcopy(char_to_token[begin:end]) # there is None position in the case consecutive white spaces. pos_list = [x for x in pos_list if x is not None] token_set = set(pos_list) token_set_order = sorted(list(token_set)) for iter_idx, tk_idx in enumerate(token_set_order): if iter_idx == 0: labels[tk_idx] = iob2_tags.index('B-'+label_txt) else: labels[tk_idx] = iob2_tags.index('I-'+label_txt) ret['targets'] = labels return ret @seq_pipe.map_over_dataset def tokenize_and_preproc_iob2(x, output_features, tags=None, iob2_tags=None, tag_label='NE', input_key='inputs', info4klue=True): ret = {} inputs = x[input_key] tokenizer = output_features[input_key].tokenizer ret[f'{input_key}_pretokenized'] = inputs input_hf = tokenizer(inputs) input_ids = input_hf.input_ids if info4klue: ret['klue_metric'] = {} ret['klue_metric']['char_to_token'] = [ input_hf.char_to_token(pos) for pos in range(len(inputs))] ret[f'{input_key}'] = input_ids #ret['char_to_token'] = {k:v for k, v in enumerate(char_to_token)} #ret[f'{input_key}_tokens'] = [tokenizer._convert_id_to_token(x) for x in input_ids] if tags and iob2_tags: outside_label = iob2_tags.index('O') tag_labels = x[tag_label] labels = np.ones_like(input_ids, dtype=np.int32) * outside_label if info4klue: ret['klue_metric']['char_tag'] = np.ones_like( ret['klue_metric']['char_to_token'], dtype=np.int32) * outside_label for begin, end, label in zip(tag_labels['begin'], tag_labels['end'], tag_labels['label']): label_txt = tags[label] if label_txt != 'O': if info4klue: for idx, pos_idx in enumerate(list(range(begin, end))): if idx == 0: ret['klue_metric']['char_tag'][pos_idx] = iob2_tags.index( 'B-'+label_txt) else: ret['klue_metric']['char_tag'][pos_idx] = iob2_tags.index( 'I-'+label_txt) pos_list = [input_hf.char_to_token( pos) for pos in range(begin, end)] #pos_list = copy.deepcopy(char_to_token[begin:end]) # there is None position in the case consecutive white spaces. pos_list = [x for x in pos_list if x is not None] token_set = set(pos_list) token_set_order = sorted(list(token_set)) for iter_idx, tk_idx in enumerate(token_set_order): if iter_idx == 0: labels[tk_idx] = iob2_tags.index('B-'+label_txt) else: labels[tk_idx] = iob2_tags.index('I-'+label_txt) ret['targets'] = labels return ret @seq_pipe.map_over_dataset def tokenize_re_with_tk_idx(x, output_features, input_key='inputs'): ret = {} inputs = x[input_key] tokenizer = output_features[input_key].tokenizer ret[f'{input_key}_pretokenized'] = inputs input_hf = tokenizer(inputs) input_ids = input_hf.input_ids subject_entity = x['subject_entity'] object_entity = x['object_entity'] subject_tk_idx = [ input_hf.char_to_token(x) for x in range( subject_entity['start_idx'], subject_entity['end_idx'] ) ] subject_tk_idx = [x for x in subject_tk_idx if x is not None] subject_tk_idx = sorted(set(subject_tk_idx)) subject_start = subject_tk_idx[0] subject_end = subject_tk_idx[-1] object_tk_idx = [ input_hf.char_to_token(x) for x in range( object_entity['start_idx'], object_entity['end_idx'] ) ] object_tk_idx = [x for x in object_tk_idx if x is not None] object_tk_idx = sorted(set(object_tk_idx)) object_start = object_tk_idx[0] object_end = object_tk_idx[-1] ret['entity_token_idx'] = np.array([[subject_start, subject_end], [object_start, object_end]]) ret['inputs'] = input_ids return ret @seq_pipe.map_over_dataset def re_preproc_for_classification_with_idx( x, benchmark_name, label_names=None, no_label_idx=0, with_feature_key=True, sep=' '): # mark span using start index of the entity def _mark_span(text, span_str, span_idx, mark): pattern_tmpl = r'^((?:[\S\s]){N})(W)' pattern_tmpl = pattern_tmpl.replace('N', str(span_idx)) pattern = pattern_tmpl.replace('W', span_str) return re.sub(pattern, r'\1{0}\2{0}'.format(mark), text) # '*' for subejct entity '#' for object entity. text = x["sentence"] text = _mark_span(text, x['subject_entity']['word'], x['subject_entity']['start_idx'], '*') sbj_st, sbj_end, sbj_form = x['subject_entity']['start_idx'], x['subject_entity']['end_idx'], x['subject_entity']['word'] obj_st, obj_end, obj_form = x['object_entity']['start_idx'], x['object_entity']['end_idx'], x['object_entity']['word'] sbj_end += 2 obj_end += 2 if sbj_st < obj_st: obj_st += 2 obj_end += 2 else: sbj_st += 2 sbj_end += 2 # Compensate for 2 added "words" added in previous step. span2_index = x['object_entity']['start_idx'] + 2 * (1 if x['subject_entity']['start_idx'] < x['object_entity']['start_idx'] else 0) text = _mark_span(text, x['object_entity']['word'], span2_index, '#') strs_to_join = [] if with_feature_key: strs_to_join.append('{}:'.format('text')) strs_to_join.append(text) ex = {} if label_names is not None: # put the name of benchmark if the model is generative strs_to_join.insert(0, benchmark_name) ex['targets'] = label_names[x['label']] if x['label'] >= 0 else '<unk>' else: ex['targets'] = x['label'] if x['label'] >= 0 else no_label_idx
# -*- coding: utf-8 -*- import re def File_NetOut(f, netno, name, net, total) : for i, w in enumerate(net) : if i > 0 : f.write(', ') if (i // 8) > 0 and (i % 8) == 0 : f.write('\n') f.write(w) f.write('\n') str = "##### NET No.{0:6d}, Name:{1:10s}, Parts:{2:6d}, Total:{3:8d} #####\n".format(netno, name, len(net), total) f.write(str) #************************************************************************************************************************************************ #/TEC R015(2) C411(2) C308(1) T14(1); #/S5V R328(2) C325(2) J3(22) C326(2) R325(2) R323(2) R011(1) C006(2), # Q001(2); #/A5VL C328(1) C306(2) U301(106) U301(41) U301(70) U301(55) U301(128) C302(2), # U301(18) C310(1) C021(1) U301(29) C309(1) U301(107) U301(99) U301(144), # U301(113) U301(145) C305(1) U301(81) U301(114) U301(92) C301(1) U301(3), # SHORT7(2); #/RFI C332(2) U309(2) U301(91); # # FORMAT : Calay def Calay_Read(rfname, wfname = 'NET.TXT') : f = open(wfname, 'w') netname = []; netlist = []; net = [] name = ''; n = 0; t = 0 for line in open(rfname, 'r'): f.write(line) line = line.replace('\n', '') #改行削除 line = line.replace('\r', '') #改行削除 #print(line) words = re.split(" +", line) #1行をスペースで分離 #print(words) if n == 0 : #ネット名前を記憶 name = words[0].strip() if len(words) > 0 : #ネットが存在する時 w = words[-1] if w == '' : EOF = 1 elif w[-1] == ';' : #最後の文字が';'の時 words[-1] = w[:-1] #';'削除の上、再登録 for w in words[1:] : #ネットを記憶 w = w.strip() if len(w) > 0 : net.append(w) EOF = 1 #print(name) #print(net) #print() elif w[-1] == ',' : #最後の文字が','の時 words[-1] = w[:-1] #','削除の上、再登録 for w in words[1:] : #ネットを記憶 w = w.strip() if len(w) > 0 : net.append(w) EOF = 0 else : EOF = 1 else : EOF = 1 if EOF == 1 : if name != '' and len(net) > 0 : net.sort() #ネットリストの並べ替え netname.append(name) netlist.append(net) t += len(net) File_NetOut(f, len(netname), name, net, t) net = []; name = '' n = 0 else : n += 1 f.close() return (netname, netlist, rfname) #net1 = Calay_Read('01_calay.net', 'NET1.TXT') #print(net1[0]) #print(net1[1]) def Calay_Write(netlist, wfname = 'calay.net') : f = open(wfname, 'w') name_n = len(netlist[0]); name_n -= 1 for i, (name, net) in enumerate(zip(netlist[0], netlist[1])) : net_n = len(net); net_n -= 1 str = '{0:20s}\t'.format(name) f.write(str) for j, w in enumerate(net) : str = '{0:s}'.format(w) f.write(str) if j >= net_n : f.write(";\n") elif (j % 8) == 7 : f.write(",\n \t") else : f.write(" ") f.close() #Calay_Write(net, 'calay.net') #************************************************************************************************************************************************ #N220027: U303(21),R371(1),C108(1),R373(1); #N220028: R373(2),U302(26); #N220029: U303(20),R370(1); #SA5V: R011(2),U401(8),C121(2),U310(5),C412(1),R320(2); #A5V: C110(2),U309(5),C324(2),U302(6),C315(1),R012(2), # C005(1),R003(1),L001(1),Q001(3); #S5V: R328(2),C325(2),C326(2),J3(22),R011(1),Q001(2),C006(2), # R323(2),R325(2); # # FORMAT : CR-3000, CR-5000PWS, CR-8000(CCF) def CCF_Read(rfname, wfname = 'NET.TXT') : f = open(wfname, 'w') netname = []; netlist = []; net = [] name = ''; n = 0; t = 0 for line in open(rfname, 'r'): f.write(line) line = line.replace('\n', '') #改行削除 line = line.replace('\r', '') #改行削除 #print(line) if n == 0 : #ネット名前を記憶 words = line.split(':', 1) name = words[0].strip() if len(words) > 1 : words = (words[1].strip()).split(',') else : words = [] else : words = (line.strip()).split(',') #print(words) if len(words) > 0 : #ネットが存在する時 w = words[-1].strip() if w == '' : #最後が''の時 del words[-1] #''削除 for w in words : #ネットを記憶 net.append(w.strip()) EOF = 0 elif w[-1] == ';' : #最後の文字が';'の時 words[-1] = w[:-1] #';'削除の上、再登録 for w in words : #ネットを記憶 w = w.strip() if len(w) > 0 : net.append(w) EOF = 1 #print(name) #print(net) #print() else : EOF = 1 else : EOF = 1 if EOF == 1 : if name != '' and len(net) > 0 : net.sort() #ネットリストの並べ替え netname.append(name) netlist.append(net) t += len(net) File_NetOut(f, len(netname), name, net, t) net = []; name = '' n = 0 else : n += 1 f.close() return (netname, netlist, rfname) #net2 = CCF_Read('02_ccf.ccf', 'NET2.TXT') #print(net2[0]) #print(net2[1]) def CCF_Write(netlist, wfname = 'ccf.net') : f = open(wfname, 'w') name_n = len(netlist[0]); name_n -= 1 for i, (name, net) in enumerate(zip(netlist[0], netlist[1])) : net_n = len(net); net_n -= 1 str = '{0:20s}'.format(name + ":") f.write(str) for j, w in enumerate(net) : str = '{0:s}'.format(w) f.write(str) if j >= net_n : f.write(";\n") elif (j % 8) == 7 : f.write(",\n ") else : f.write(",") f.close() #CCF_Write(net, 'ccf.net') #************************************************************************************************************************************************ #1273 [RA21-7,T80-1,U201-97] #1274 [RA21-6,T81-1,U201-94] #A3.3V [C006-2,C302-2,C306-2,C340-2,C341-2,C342-2,R011-1, # T23-1,U301-72,U301-76,U301-85,U301-97,U301-102, # U301-104] #A5V [C005-1,C021-1,C110-2,C315-1,C324-2,C325-2,C327-1, # C328-1,C413-1,L002-1,Q001-3,R003-2,R328-2,T82-1,U302-6, # U302-37,U309-5,U318-5] # # FORMAT : DK-Σ def DKS_Read(rfname, wfname = 'NET.TXT') : f = open(wfname, 'w') netname = []; netlist = []; net = [] name = ''; n = 0; t = 0 for line in open(rfname, 'r'): f.write(line) line = line.replace('\n', '') #改行削除 line = line.replace('\r', '') #改行削除 #print(line) if n == 0 : #ネット名前を記憶 words = line.split('[', 1) name = words[0].strip() if len(words) > 1 : words = (words[1].strip()).split(',') else : words = [] else : words = (line.strip()).split(',') #print(words) if len(words) > 0 : #ネットが存在する時 w = words[-1].strip() if w == '' : #最後が''の時 del words[-1] #''削除 for w in words : #ネットを記憶 w = w.strip() if len(w) > 0 : w = w.replace('-', '(', 1) + ')' net.append(w) EOF = 0 elif w[-1] == ']' : #最後の文字が']'の時 words[-1] = w[:-1] #']'削除の上、再登録 for w in words : #ネットを記憶 w = w.strip() if len(w) > 0 : w = w.replace('-', '(', 1) + ')' net.append(w) EOF = 1 #print(name) #print(net) #print() else : EOF = 1 else : EOF = 1 if EOF == 1 : if name != '' and len(net) > 0 : net.sort() #ネットリストの並べ替え netname.append(name) netlist.append(net) t += len(net) File_NetOut(f, len(netname), name, net, t) net = []; name = '' n = 0 else : n += 1 f.close() return (netname, netlist, rfname) #net = DKS_Read('03_dks.net', 'NET.TXT') #print(net[0]) #print(net[1]) def DKS_Write(netlist, wfname = 'dks.net') : f = open(wfname, 'w') name_n = len(netlist[0]); name_n -= 1 for i, (name, net) in enumerate(zip(netlist[0], netlist[1])) : net_n = len(net); net_n -= 1 str = '{0:20s}['.format(name) f.write(str) for j, w in enumerate(net) : str = '{0:s}'.format((w.replace('(', '-', 1)).rstrip(')') ) f.write(str) if j >= net_n : f.write("]\n") elif (j % 8) == 7 : f.write(",\n ") else : f.write(",") f.close() #DKS_Write(net, 'dks.net') #************************************************************************************************************************************************ #$NET #DCIN ; U101-2, U102-2, U102-3, U103-2, C104-2, C105-2, C106-1, # C108-1, C109-1, C113-1, D105-4, FL101-1, FL101-4, P101-1, # R240-1, R252-2 #VINUPS ; D105-1, SHORT2-1 # ; U1-2, U2-1, C1-1, C10-1, C11-2, R11-2 # ; U1-3, C9-2, R5-2 # ; U1-5, R3-1, R4-2 #VOUTUPS ; D105-3, FL103-2, R217-2, SHORT3-1 #$END #$THM #$VDD1 ; U4-14, C106-1, C108-1, C113-1, C16-1, FL101-1, FL101-4, #$ J101-44B, J101-59A, J3-1, SHORT1-2, SHORT2-1, SHORT3-1 #$END # # FORMAT : MM-2 def MM2_Read(rfname, wfname = 'NET.TXT') : f = open(wfname, 'w') netname = []; netlist = []; net = [] name = ''; n = 0; t = 0 for line in open(rfname, 'r'): f.write(line) line = line.replace('\n', '') #改行削除 line = line.replace('\r', '') #改行削除 #print(line) if n == 0 : #ネット名前を記憶 words = line.split(';', 1) name = words[0].strip() if name == '' : #ネット名前が無い時に'@NET'で始まる名前を自動生成 name = "@NET{0}".format(len(netname) + 1) if len(words) > 1 : words = (words[1].strip()).split(',') else : words = [] else : #前のネットの続き if line[0] == '$' : #'$'で始まる場合、'$'を削除 line = line[1:] words = (line.strip()).split(',') #print(words) if len(words) > 0 : #ネットが存在する時 w = words[-1] if w == '' : #最後が','の時 del words[-1] #''削除 for w in words : #ネットを記憶 w = w.strip() if len(w) > 0 : w = w.replace('-', '(', 1) + ')' net.append(w) EOF = 0 else : for w in words : #ネットを記憶 w = w.strip() if len(w) > 0 : w = w.replace('-', '(', 1) + ')' net.append(w) EOF = 1 #print(name) #print(net) #print() else : EOF = 1 if EOF == 1 : if name != '' and len(net) > 0 : net.sort() #ネットリストの並べ替え netname.append(name) netlist.append(net) t += len(net) File_NetOut(f, len(netname), name, net, t) net = []; name = '' n = 0 else : n += 1 f.close() return (netname, netlist, rfname) #net = MM2_Read('04_mm2.net', 'NET.TXT') #print(net[0]) #print(net[1]) def MM2_Write(netlist, wfname = 'mm2.net') : f = open(wfname, 'w') name_n = len(netlist[0]); name_n -= 1 f.write("$NET\n") for i, (name, net) in enumerate(zip(netlist[0], netlist[1])) : net_n = len(net); net_n -= 1 str = '{0:20s}; '.format(name) f.write(str) for j, w in enumerate(net) : str = '{0:s}'.format((w.replace('(', '-', 1)).rstrip(')')
fac.UpdateInstruction(cc_0, True) ])) st_0._set_transitionSet(transitions) return fac.Automaton(states, counters, True, containing_state=None) DocumentedType._Automaton = _BuildAutomaton_() def _BuildAutomaton_2 (): # Remove this helper function from the namespace after it is invoked global _BuildAutomaton_2 del _BuildAutomaton_2 import pyxb.utils.fac as fac counters = set() cc_0 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 37, 6)) counters.add(cc_0) states = [] final_update = set() final_update.add(fac.UpdateInstruction(cc_0, False)) symbol = pyxb.binding.content.ElementUse(ExtensibleDocumentedType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 37, 6)) st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_0) transitions = [] transitions.append(fac.Transition(st_0, [ fac.UpdateInstruction(cc_0, True) ])) st_0._set_transitionSet(transitions) return fac.Automaton(states, counters, True, containing_state=None) ExtensibleDocumentedType._Automaton = _BuildAutomaton_2() DescriptionType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'import'), ImportType, scope=DescriptionType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 105, 2))) DescriptionType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'include'), IncludeType, scope=DescriptionType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 118, 2))) DescriptionType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'types'), TypesType, scope=DescriptionType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 130, 2))) DescriptionType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'interface'), InterfaceType, scope=DescriptionType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 142, 2))) DescriptionType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'binding'), BindingType, scope=DescriptionType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 250, 2))) DescriptionType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'service'), ServiceType, scope=DescriptionType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 316, 2))) def _BuildAutomaton_3 (): # Remove this helper function from the namespace after it is invoked global _BuildAutomaton_3 del _BuildAutomaton_3 import pyxb.utils.fac as fac counters = set() cc_0 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 37, 6)) counters.add(cc_0) cc_1 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 90, 8)) counters.add(cc_1) states = [] final_update = set() final_update.add(fac.UpdateInstruction(cc_0, False)) symbol = pyxb.binding.content.ElementUse(DescriptionType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 37, 6)) st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_0) final_update = set() final_update.add(fac.UpdateInstruction(cc_1, False)) symbol = pyxb.binding.content.ElementUse(DescriptionType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'import')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 91, 10)) st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_1) final_update = set() final_update.add(fac.UpdateInstruction(cc_1, False)) symbol = pyxb.binding.content.ElementUse(DescriptionType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'include')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 92, 10)) st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_2) final_update = set() final_update.add(fac.UpdateInstruction(cc_1, False)) symbol = pyxb.binding.content.ElementUse(DescriptionType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'types')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 93, 10)) st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_3) final_update = set() final_update.add(fac.UpdateInstruction(cc_1, False)) symbol = pyxb.binding.content.ElementUse(DescriptionType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'interface')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 94, 10)) st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_4) final_update = set() final_update.add(fac.UpdateInstruction(cc_1, False)) symbol = pyxb.binding.content.ElementUse(DescriptionType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'binding')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 95, 7)) st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_5) final_update = set() final_update.add(fac.UpdateInstruction(cc_1, False)) symbol = pyxb.binding.content.ElementUse(DescriptionType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'service')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 96, 7)) st_6 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_6) final_update = set() final_update.add(fac.UpdateInstruction(cc_1, False)) symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, u'http://www.w3.org/ns/wsdl')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 97, 10)) st_7 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_7) transitions = [] transitions.append(fac.Transition(st_0, [ fac.UpdateInstruction(cc_0, True) ])) transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_0, False) ])) transitions.append(fac.Transition(st_2, [ fac.UpdateInstruction(cc_0, False) ])) transitions.append(fac.Transition(st_3, [ fac.UpdateInstruction(cc_0, False) ])) transitions.append(fac.Transition(st_4, [ fac.UpdateInstruction(cc_0, False) ])) transitions.append(fac.Transition(st_5, [ fac.UpdateInstruction(cc_0, False) ])) transitions.append(fac.Transition(st_6, [ fac.UpdateInstruction(cc_0, False) ])) transitions.append(fac.Transition(st_7, [ fac.UpdateInstruction(cc_0, False) ])) st_0._set_transitionSet(transitions) transitions = [] transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_2, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_3, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_4, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_5, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_6, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_7, [ fac.UpdateInstruction(cc_1, True) ])) st_1._set_transitionSet(transitions) transitions = [] transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_2, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_3, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_4, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_5, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_6, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_7, [ fac.UpdateInstruction(cc_1, True) ])) st_2._set_transitionSet(transitions) transitions = [] transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_2, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_3, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_4, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_5, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_6, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_7, [ fac.UpdateInstruction(cc_1, True) ])) st_3._set_transitionSet(transitions) transitions = [] transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_2, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_3, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_4, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_5, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_6, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_7, [ fac.UpdateInstruction(cc_1, True) ])) st_4._set_transitionSet(transitions) transitions = [] transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_2, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_3, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_4, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_5, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_6, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_7, [ fac.UpdateInstruction(cc_1, True) ])) st_5._set_transitionSet(transitions) transitions = [] transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_2, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_3, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_4, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_5, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_6, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_7, [ fac.UpdateInstruction(cc_1, True) ])) st_6._set_transitionSet(transitions) transitions = [] transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_2, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_3, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_4, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_5, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_6, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_7, [ fac.UpdateInstruction(cc_1, True) ])) st_7._set_transitionSet(transitions) return fac.Automaton(states, counters, True, containing_state=None) DescriptionType._Automaton = _BuildAutomaton_3() def _BuildAutomaton_4 (): # Remove this helper function from the namespace after it is invoked global _BuildAutomaton_4 del _BuildAutomaton_4 import pyxb.utils.fac as fac counters = set() cc_0 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 37, 6)) counters.add(cc_0) cc_1 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 110, 7)) counters.add(cc_1) states = [] final_update = set() final_update.add(fac.UpdateInstruction(cc_0, False)) symbol = pyxb.binding.content.ElementUse(ImportType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 37, 6)) st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_0) final_update = set() final_update.add(fac.UpdateInstruction(cc_1, False)) symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_strict, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, u'http://www.w3.org/ns/wsdl')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 110, 7)) st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_1) transitions = [] transitions.append(fac.Transition(st_0, [ fac.UpdateInstruction(cc_0, True) ])) transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_0, False) ])) st_0._set_transitionSet(transitions) transitions = [] transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_1, True) ])) st_1._set_transitionSet(transitions) return fac.Automaton(states, counters, True, containing_state=None) ImportType._Automaton = _BuildAutomaton_4() def _BuildAutomaton_5 (): # Remove this helper function from the namespace after it is invoked global _BuildAutomaton_5 del _BuildAutomaton_5 import pyxb.utils.fac as fac counters = set() cc_0 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 37, 6)) counters.add(cc_0) cc_1 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 123, 7)) counters.add(cc_1) states = [] final_update = set() final_update.add(fac.UpdateInstruction(cc_0, False)) symbol = pyxb.binding.content.ElementUse(IncludeType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 37, 6)) st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_0) final_update = set() final_update.add(fac.UpdateInstruction(cc_1, False)) symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_strict, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, u'http://www.w3.org/ns/wsdl')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 123, 7)) st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_1) transitions = [] transitions.append(fac.Transition(st_0, [ fac.UpdateInstruction(cc_0, True) ])) transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_0, False) ])) st_0._set_transitionSet(transitions) transitions = [] transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_1, True) ])) st_1._set_transitionSet(transitions) return fac.Automaton(states, counters, True, containing_state=None) IncludeType._Automaton = _BuildAutomaton_5() def _BuildAutomaton_6 (): # Remove this helper function from the namespace after it is invoked global _BuildAutomaton_6 del _BuildAutomaton_6 import pyxb.utils.fac as fac counters = set() cc_0 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 37, 6)) counters.add(cc_0) cc_1 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 135, 7)) counters.add(cc_1) states = [] final_update = set() final_update.add(fac.UpdateInstruction(cc_0, False)) symbol = pyxb.binding.content.ElementUse(TypesType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 37, 6)) st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_0) final_update = set() final_update.add(fac.UpdateInstruction(cc_1, False)) symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_strict, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, u'http://www.w3.org/ns/wsdl')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 135, 7)) st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_1) transitions = [] transitions.append(fac.Transition(st_0, [ fac.UpdateInstruction(cc_0, True) ])) transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_0, False) ])) st_0._set_transitionSet(transitions) transitions = [] transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_1, True) ])) st_1._set_transitionSet(transitions) return fac.Automaton(states, counters, True, containing_state=None) TypesType._Automaton = _BuildAutomaton_6() InterfaceType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'operation'), InterfaceOperationType, scope=InterfaceType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 156, 10))) InterfaceType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'fault'), InterfaceFaultType, scope=InterfaceType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 157, 10))) def _BuildAutomaton_7 (): # Remove this helper function from the namespace after it is invoked global _BuildAutomaton_7 del _BuildAutomaton_7 import pyxb.utils.fac as fac counters = set() cc_0 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 37, 6)) counters.add(cc_0) cc_1 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 155, 8)) counters.add(cc_1) states = [] final_update = set() final_update.add(fac.UpdateInstruction(cc_0, False)) symbol = pyxb.binding.content.ElementUse(InterfaceType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 37, 6)) st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_0) final_update = set() final_update.add(fac.UpdateInstruction(cc_1, False)) symbol = pyxb.binding.content.ElementUse(InterfaceType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'operation')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 156, 10)) st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_1) final_update = set() final_update.add(fac.UpdateInstruction(cc_1, False)) symbol = pyxb.binding.content.ElementUse(InterfaceType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'fault')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 157, 10)) st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_2) final_update = set() final_update.add(fac.UpdateInstruction(cc_1, False)) symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, u'http://www.w3.org/ns/wsdl')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 158, 10)) st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False) states.append(st_3) transitions = [] transitions.append(fac.Transition(st_0, [ fac.UpdateInstruction(cc_0, True) ])) transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_0, False) ])) transitions.append(fac.Transition(st_2, [ fac.UpdateInstruction(cc_0, False) ])) transitions.append(fac.Transition(st_3, [ fac.UpdateInstruction(cc_0, False) ])) st_0._set_transitionSet(transitions) transitions = [] transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_2, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_3, [ fac.UpdateInstruction(cc_1, True) ])) st_1._set_transitionSet(transitions) transitions = [] transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_2, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_3, [ fac.UpdateInstruction(cc_1, True) ])) st_2._set_transitionSet(transitions) transitions = [] transitions.append(fac.Transition(st_1, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_2, [ fac.UpdateInstruction(cc_1, True) ])) transitions.append(fac.Transition(st_3, [ fac.UpdateInstruction(cc_1, True) ])) st_3._set_transitionSet(transitions) return fac.Automaton(states, counters, True, containing_state=None) InterfaceType._Automaton = _BuildAutomaton_7() InterfaceOperationType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'input'), MessageRefType, scope=InterfaceOperationType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 179, 10))) InterfaceOperationType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'output'), MessageRefType, scope=InterfaceOperationType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 180, 10))) InterfaceOperationType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'infault'), MessageRefFaultType, scope=InterfaceOperationType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 181, 10))) InterfaceOperationType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'outfault'), MessageRefFaultType, scope=InterfaceOperationType, location=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 182, 10))) def _BuildAutomaton_8 (): # Remove this helper function from the namespace after it is invoked global _BuildAutomaton_8 del _BuildAutomaton_8 import pyxb.utils.fac as fac counters = set() cc_0 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 37, 6)) counters.add(cc_0) cc_1 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 178, 8)) counters.add(cc_1) states = [] final_update = set() final_update.add(fac.UpdateInstruction(cc_0, False)) symbol = pyxb.binding.content.ElementUse(InterfaceOperationType._UseForTag(pyxb.namespace.ExpandedName(Namespace, u'documentation')), pyxb.utils.utility.Location('/tmp/pyxbdist.FvaD3zy/PyXB-1.2.3/pyxb/bundles/wssplat/schemas/wsdl20.xsd', 37, 6)) st_0
from __future__ import division import numpy as np from numpy import pi, sqrt, exp, power, log, log10 import os import constants as ct import particle as pt import tools as tl ############################## # Preparing SKA configurations ############################## def initialize(): """This routine is supposed to be run only once, \ i.e. when the module is loaded, therefore\ the I/O is not optimized for speed concerns. """ SKA_conf = {} # # -------------- for exper in ['low', 'mid']: # if exper == "low": # path = local_path + "/data/SKA1-low_accumu.csv" # elif exper == "mid": # path = local_path + "/data/SKA1-mid_accumu.csv" # data_raw = np.loadtxt(path, delimiter=',') # radius = data_raw[:, 0] # fraction = data_raw[:, 1] # bins_radius = np.logspace(1, 5, 20) # bin it # hist_radius = np.interp(np.log10(bins_radius), np.log10( # radius), fraction, left=0) # sample at the bin edges # if exper == "low": # # compute the x-y coordinates of all units # x_arr, y_arr = get_telescope_coordinate( # fraction*ct._SKALow_number_of_stations_, radius, SKA=exper) # # save it # SKA_conf['low radius'] = (data_raw, x_arr, y_arr, bins_radius, # hist_radius) # elif exper == "mid": # x_arr, y_arr = get_telescope_coordinate( # fraction*ct._SKA1Mid_number_of_dishes_, radius, SKA=exper) # SKA_conf['mid radius'] = (data_raw, x_arr, y_arr, bins_radius, # hist_radius) # get coordinates if exper == "low": SKA_conf['low0'] = np.loadtxt( local_path + "/data/SKA1_config_low0.csv", delimiter=',') SKA_conf['low1'] = np.loadtxt( local_path + "/data/SKA1_config_low1.csv", delimiter=',') SKA_conf['low2'] = np.loadtxt( local_path + "/data/SKA1_config_low2_6clusters.csv", delimiter=',') # update clusters, it's 6 stations per cluster new_arr = [] for xy in (SKA_conf['low2']): for j in range(2): for k in range(3): x = xy[0] + j*50 y = xy[1] + (k-1)*50 new_arr.append([x, y]) new_arr = np.array(new_arr) SKA_conf['low2'] = new_arr # combine them SKA_conf['low_coord'] = np.concatenate( (SKA_conf['low0'], SKA_conf['low1'], SKA_conf['low2'])) x_arr = SKA_conf['low_coord'][:, 0] y_arr = SKA_conf['low_coord'][:, 1] elif exper == "mid": SKA_conf['mid0_MeerKAT'] = np.loadtxt( local_path + "/data/SKA1_config_mid0_MK.csv", delimiter=',') SKA_conf['mid0_SKA'] = np.loadtxt( local_path + "/data/SKA1_config_mid0_SKA.csv", delimiter=',') SKA_conf['mid1_MeerKAT'] = np.loadtxt( local_path + "/data/SKA1_config_mid1_MK.csv", delimiter=',') SKA_conf['mid1_SKA'] = np.loadtxt( local_path + "/data/SKA1_config_mid1_SKA.csv", delimiter=',') SKA_conf['mid2_SKA'] = np.loadtxt( local_path + "/data/SKA1_config_mid2_SKA.csv", delimiter=',') # combine them SKA_conf['mid_coord'] = np.concatenate( (SKA_conf['mid0_MeerKAT'], SKA_conf['mid0_SKA'], SKA_conf['mid1_MeerKAT'], SKA_conf['mid1_SKA'], SKA_conf['mid2_SKA'])) # convert km to m SKA_conf['mid_coord'][:, 0] = SKA_conf['mid_coord'][:, 0]*1.e3 SKA_conf['mid_coord'][:, 1] = SKA_conf['mid_coord'][:, 1]*1.e3 x_arr = SKA_conf['mid_coord'][:, 0] y_arr = SKA_conf['mid_coord'][:, 1] # get baseline distribution baseline_arr = get_baseline(x_arr, y_arr) hist_baseline, bins_baseline = np.histogram( baseline_arr, bins=np.logspace(1, 5, 20000)) # correcting the over-counting of baseline pair hist_baseline = hist_baseline/2. hist_baseline_cumsum = np.cumsum(hist_baseline) # save it if exper == "low": SKA_conf['low baseline'] = ( baseline_arr, hist_baseline, bins_baseline, hist_baseline_cumsum) elif exper == "mid": SKA_conf['mid baseline'] = ( baseline_arr, hist_baseline, bins_baseline, hist_baseline_cumsum) # about effective area if exper == "low": path = local_path + "/data/SKA1-low_Aeff_over_Tsys.txt" data_raw = np.loadtxt(path) # low is given in MHz, convert to GHz data_raw[:, 0] = data_raw[:, 0] * 1.e-3 SKA_conf['low A/T'] = data_raw elif exper == "mid": path = local_path + "/data/SKA1-mid_Aeff_over_Tsys.txt" data_raw = np.loadtxt(path) SKA_conf['mid A/T'] = data_raw SKA_conf['A/T'] = np.concatenate((SKA_conf['low A/T'], SKA_conf['mid A/T'])) # computing efficiency # make a nu grid Nsteps = 2001 nulow = np.logspace(log10(ct._nu_min_ska_low_), log10( ct._nu_max_ska_low_), Nsteps//2)[1:] # ... and SKA mid... numid = np.logspace(log10(ct._nu_min_ska_mid_), log10( ct._nu_max_ska_mid_), Nsteps - Nsteps//2)[1:] Aeff_over_Tsys = SKA_conf['A/T'] # Mid nu_arr = numid Aeff_over_Tsys_arr = np.interp( nu_arr, Aeff_over_Tsys[:, 0], Aeff_over_Tsys[:, 2]) Tsys_arr = T_sys_mid(nu_arr) eta_arr = Aeff_over_Tsys_arr * Tsys_arr / ct._area_ska_mid_ SKA_conf['eta mid'] = (nu_arr, eta_arr) # Low nu_arr = nulow Aeff_over_Tsys_arr = np.interp( nu_arr, Aeff_over_Tsys[:, 0], Aeff_over_Tsys[:, 2]) Tsys_arr = T_sys_low(nu_arr) eta_arr = Aeff_over_Tsys_arr * Tsys_arr / ct._area_ska_low_ SKA_conf['eta low'] = (nu_arr, eta_arr) # combined storage nu_arr = np.concatenate((SKA_conf['eta low'][0], SKA_conf['eta mid'][0])) eta_arr = np.concatenate((SKA_conf['eta low'][1], SKA_conf['eta mid'][1])) SKA_conf['eta'] = (nu_arr, eta_arr) return SKA_conf ################ # SKA properties ################ def SKA_get_active_baseline(length, exper_mode): """Get the active number of baselines in the interferometry mode :param length: critical baseline below which the signal can be resolved :param exper_mode: "SKA low" or "SKA mid" :returns: number of baselines that sees the signal """ length_arr, is_scalar = tl.treat_as_arr(length) if exper_mode == "SKA low": (baseline_arr, hist_baseline, bins_baseline, hist_baseline_cumsum) = SKA_conf['low baseline'] if exper_mode == "SKA mid": (baseline_arr, hist_baseline, bins_baseline, hist_baseline_cumsum) = SKA_conf['mid baseline'] res = np.interp(np.log(length_arr), np.log(bins_baseline[:-1]), hist_baseline_cumsum, left=ct._zero_) if exper_mode == "SKA low": res[length_arr < ct._SKALow_station_diameter_] = ct._zero_ if exper_mode == "SKA mid": res[length_arr < ct._SKA1Mid_dish_diameter_] = ct._zero_ if is_scalar: res = np.squeeze(res) return res def SKA_exper_nu(nu): """ Returns the SKA experiment mode (low/mid) sensitive to the given frequency nu [GHz]. Parameters ---------- nu : frequency [GHz] """ if (nu < ct._nu_min_ska_low_): # frequency below SKA low lower threshold exper_mode = None # just a placeholder, won't matter elif (nu <= ct._nu_max_ska_low_): # frequency within SKA low range exper_mode = 'SKA low' elif (nu <= ct._nu_max_ska_mid_): # frequency within SKA mid range exper_mode = 'SKA mid' else: # frequency above SKA mid upper threshold exper_mode = None # just a placeholder, won't matter return exper_mode def SKA_specs(nu, exper_mode, correlation_mode=None, theta_sig=None): """ Returns the SKA specifications for the given experiment mode and frequency [GHz]: area [m^2], window, receiver noise brightness temperature [K], efficiency, solid angle resolution [sr], number_of_dishes, and number_of_measurements. Parameters ---------- nu : frequency [GHz] exper_mode : mode in which the experiment is working correlation_mode: whether to run in interferometry mode or single dish mode. Default None is meant to raise error if not assigned explicitly. theta_sig: the signal size we want to observe [radian] """ if exper_mode == None: area, window, Tr, eta, Omega_res, number_of_dishes, number_of_measurements = 0., 0., 0., 0., 1.e-100, 0., 0. # set to zero so it will raise error if not treated elif exper_mode == 'SKA low' and correlation_mode == "single dish": area = ct._area_ska_low_ window = np.heaviside(nu - ct._nu_min_ska_low_, 1.) * \ np.heaviside(ct._nu_max_ska_low_ - nu, 1.) # Tr = ct._Tr_ska_low_ # DEPRECATED Tr = Trec_low(nu) eta = eta_nu(nu) # finding resolution: wavelength = pt.lambda_from_nu(nu)/100. # wavelength [m] # angular size of pixel resolution [rad] # assuming this is the aperture angle and not the radial angle theta_res = (1.22*wavelength) / \ ct._SKALow_station_diameter_ # /sqrt(eta) Omega_res = ct.angle_to_solid_angle( theta_res) # solid angle of resolution [sr] number_of_dishes = ct._SKALow_number_of_stations_ number_of_measurements = number_of_dishes # Omega_max = np.inf # being sloppy here but we never reach FOV elif exper_mode == 'SKA low' and correlation_mode == "interferometry": window = np.heaviside(nu - ct._nu_min_ska_low_, 1.) * \ np.heaviside(ct._nu_max_ska_low_ - nu, 1.) # Tr = ct._Tr_ska_low_ # DEPRECATED Tr = Trec_low(nu) eta = eta_nu(nu) # get the required baseline length for nu wavelength = pt.lambda_from_nu(nu) / 100. # wavelength [m] critical_baseline_length = ( 1.22*wavelength) / (theta_sig)\ * ct._SKA_factor_lose_signal_ # fudge factor for when invisible # get the active number of baselines active_number_of_baselines = SKA_get_active_baseline( critical_baseline_length, exper_mode='SKA low') # taking the resolution to be exactly the signal size # penalty is taken care of through active_number_of_baselines theta_res = theta_sig Omega_res = ct.angle_to_solid_angle( theta_res) # solid angle of resolution [sr] # for interferometry mode noise has 1/sqrt(number of active baselines) number_of_measurements = active_number_of_baselines # NOTE: N.B.: this reception area is the total area, and is correct only assuming all dishes/stations contribute # which is NOT true for large signal angular size. The code needs to be updated to include the fact that # only active dishes/stations/telescopes are contributing. Thus, for large signal angular sizes, # the individual values of the S and N CANNOT BE TRUSTED. # However, since S and N scale the same with reception area, S/N cancels out # in the end only the number of measurements (baselines) matter. # Therefore, our S/N CAN INDEED be trusted. area = ct._area_ska_low_ number_of_dishes = ct._SKALow_number_of_stations_ elif exper_mode == 'SKA mid' and correlation_mode == "single dish": area = ct._area_ska_mid_ window = np.heaviside(nu - ct._nu_min_ska_mid_, 0.) * \ np.heaviside(ct._nu_max_ska_mid_ - nu, 1.) # Tr = ct._Tr_ska_mid_ # DEPRECATED, AND INCONSISTENT Tr = Trec_mid(nu) eta = eta_nu(nu) # finding resolution: wavelength = pt.lambda_from_nu(nu)/100. # wavelength [m] # angular size of pixel resolution [rad] # assuming this is the aperture angle and not the radial angle # theta_res = (1.22*wavelength)/sqrt(eta*4.*area/pi) theta_res = (1.22*wavelength)/ct._SKA1Mid_dish_diameter_ # /sqrt(eta) Omega_res = ct.angle_to_solid_angle( theta_res) # solid angle
#!/usr/bin/env python from __future__ import print_function, division import os import tarfile import pandas as pd import numpy as np import gzip import shutil import itertools import multiprocessing as mp import astropy.units as u from astropy.table import Table from astropy.coordinates import SkyCoord from astropy.io import ascii, fits from astropy import utils, io from astroquery.vizier import Vizier from astroquery.irsa import Irsa from astroquery.vsa import Vsa from astroquery.ukidss import Ukidss from astroquery.sdss import SDSS from dl import queryClient as qc try: from urllib2 import urlopen #python2 from httplib import IncompleteRead from urllib2 import HTTPError except ImportError: from urllib.request import urlopen #python3 from urllib.error import HTTPError from http.client import IncompleteRead #SIA from pyvo.dal import sia import pyvo from qso_toolbox import utils as ut from qso_toolbox import vlass_quicklook # ------------------------------------------------------------------------------ # Supported surveys, data releases, bands # ------------------------------------------------------------------------------ astroquery_dict = { 'tmass': {'service': 'irsa', 'catalog': 'fp_psc', 'ra': 'ra', 'dec': 'dec', 'mag_name': 'TMASS_J', 'mag': 'j_m', 'distance': 'dist', 'data_release': None}, 'nomad': {'service': 'vizier', 'catalog': 'NOMAD', 'ra': 'RAJ2000', 'dec': 'DECJ2000', 'mag_name': 'R', 'mag': 'Rmag', 'distance': 'distance', 'data_release': None}, 'vhsdr6': {'service': 'vsa', 'catalog': 'VHS', 'ra': 'ra', 'dec': 'dec', 'data_release': 'VHSDR6', 'mag_name': 'VHS_J', 'mag': 'jAperMag3', 'distance': 'distance'}, # new, needs to be tested! 'vikingdr5': {'service': 'vsa', 'catalog': 'VIKING', 'ra': 'ra', 'dec': 'dec', 'data_release': 'VIKINGDR5', 'mag_name': 'VHS_J', 'mag': 'jAperMag3', 'distance': 'distance'} # , # 'sdss': {'service': 'sdss', 'catalog': 'VIKING', # 'ra': 'ra', 'dec': 'dec', # 'data_release': 'VIKINGDR5', 'mag_name': 'VHS_J', # 'mag': 'jAperMag3', 'distance': 'distance'} } datalab_offset_dict = {'des_dr1.main': {'ra': 'ra', 'dec': 'dec', 'mag': 'mag_auto_z', 'mag_name': 'mag_auto_z'}} # To add more surveys from the VISTA Science Archive, this dictionary can be # expanded: vsa_info_dict = {'vhsdr6': ('VHS', 'VHSDR6', 'tilestack'), # new, needs to be tested 'vikingdr5': ('VIKING', 'VIKINGDR5', 'tilestack')} # Surveys as serviced by VSA, append list if necessary (see VSA dictionary # above) vsa_survey_list = ['vhsdr6', 'vikingdr5'] # all surveys that directly allow to download fits files unzipped_download_list = ['desdr1', 'desdr2', 'ps1', 'vhsdr6', 'vikingdr5', '2MASS', 'DSS2', 'skymapper', 'ukidss'] # ------------------------------------------------------------------------------ # Input table manipulation # ------------------------------------------------------------------------------ # copied from http://docs.astropy.org/en/stable/_modules/astropy/io/fits/column.html # L: Logical (Boolean) # B: Unsigned Byte # I: 16-bit Integer # J: 32-bit Integer # K: 64-bit Integer # E: Single-precision Floating Point # D: Double-precision Floating Point # C: Single-precision Complex # M: Double-precision Complex # A: Character fits_to_numpy = {'L': 'i1', 'B': 'u1', 'I': 'i2', 'J': 'i4', 'K': 'i8', 'E': 'f4', 'D': 'f8', 'C': 'c8', 'M': 'c16', 'A': 'a'} def fits_to_hdf(filename): """ Convert fits data table to hdf5 data table. :param filename: :return: """ hdu = fits.open(filename) filename = os.path.splitext(filename)[0] df = pd.DataFrame() format_list = ['D', 'J'] dtype_dict = {} # Cycle through all columns in the fits file for idx, column in enumerate(hdu[1].data.columns): # Check whether the column is in a multi-column format if len(column.format) > 1 and column.format[-1] in format_list: n_columns = int(column.format[:-1]) # unWISE specific solution if column.name[:6] == 'unwise' and n_columns == 2: passbands = ['w1', 'w2'] for jdx, passband in enumerate(passbands): new_column_name = column.name + '_' + passband print(new_column_name) df[new_column_name] = hdu[1].data[column.name][:, jdx] numpy_type = fits_to_numpy[column.format[-1]] dtype_dict.update({new_column_name: numpy_type}) # SOLUTIONS FOR OTHER SURVEYS MAY BE APPENDED HERE # else for single columns else: print(column.name) df[column.name] = hdu[1].data[column.name] numpy_type = fits_to_numpy[column.format[-1]] dtype_dict.update({column.name: numpy_type}) # update the dtype for the DataFrame print(dtype_dict) # df = df.astype(dtype_dict) df.to_hdf(filename+'.hdf5', 'data', format='table') def check_if_table_is_pandas_dataframe(table): """ Check whether the supplied table is a pandas Dataframe and convert to it if necessary. This function also returns the original file type. Current file types implemented include: - astropy tables - fits record arrays :param table: object :return: pd.DataFrame, string """ if type(table) == pd.DataFrame: return table, 'pandas_dataframe' elif type(table) == Table: return table.to_pandas(), 'astropy_table' elif type(table) == fits.fitsrec.FITS_rec: return Table(table).to_pandas(), 'fits_rec' def convert_table_to_format(table, format): """ Convert a pandas Dataframe back to an original format. Conversions to the following file types are possible: -astropy table :param table: pd.DataFrame :param format: string :return: object """ if format == 'astropy_table': return Table.from_pandas(table) elif format == 'fits_rec': print('Warning: You entered a fits record array. However, this code ' 'does not support this data type. Your table is returned as an' 'astropy table!') return Table.from_pandas(table) else: return table def convert_urltable_to_pandas(data, sep=',', header=0, skip_header=1, skip_footer=1, linesep='\n'): """ :param data: :param sep: :param header: :param skip_header: :param skip_footer: :param linesep: :return: """ data_string = data.read().decode('utf-8').split(linesep) if data_string[0] == 'no rows found': return None else: df = pd.DataFrame(columns=data_string[header].split(sep)) for dat in data_string[skip_header:-skip_footer]: df = df.append(pd.Series(dat.split(sep), index=data_string[0].split(sep)), ignore_index=True) return df # ------------------------------------------------------------------------------ # Download catalog data / Offset star queries # ------------------------------------------------------------------------------ def query_region_astroquery(ra, dec, radius, service, catalog, data_release=None): """ Returns the catalog data of sources within a given radius of a defined position using astroquery. :param ra: float Right ascension :param dec: float Declination :param radius: float Region search radius in arcseconds :param service: string Astroquery class used to query the catalog of choice :param catalog: string Catalog to query :param data_release: If needed by astroquery the specified data release (e.g. needed for VSA) :return: pandas.core.frame.DataFrame Returns the dataframe with the returned matches """ target_coord = SkyCoord(ra=ra, dec=dec, unit=(u.deg, u.deg), frame='icrs') if service == 'vizier': result = Vizier.query_region(target_coord, radius=radius * u.arcsecond, catalog=catalog, spatial='Cone') result = result[0] elif service == 'irsa': result = Irsa.query_region(target_coord, radius=radius * u.arcsecond, catalog=catalog, spatial='Cone') elif service == 'vsa': result = Vsa.query_region(target_coord, radius=radius * u.arcsecond, programme_id=catalog, database=data_release) # elif service == 'sdss': # result = SDSS.query_region(target_coord, radius=radius * u.arcsecond, # programme_id=catalog, database=data_release) else: raise KeyError('Astroquery class not recognized. Implemented classes ' 'are: Vizier, Irsa, VSA') return result.to_pandas() def get_astroquery_offset(target_name, target_ra, target_dec, radius, catalog, quality_query=None, n=3, verbosity=0): """Return the n nearest offset stars specified by the quality criteria around a given target using astroquery. :param target_name: string Identifier for the target :param target_ra: float Target right ascension :param target_dec: Target Declination :param radius: float Maximum search radius in arcseconds :param catalog: string Catalog (and data release) to retrieve the offset star data from. See astroquery_dict for implemented catalogs. :param quality_query: string A string written in pandas query syntax to apply quality criteria on potential offset stars around the target. :param n: int Number of offset stars to retrieve. (Maximum: n=5) :param verbosity: Verbosity > 0 will print verbose statements during the execution. :return: pandas.core.frame.DataFrame Returns the dataframe with the retrieved offset stars for the given target. """ service = astroquery_dict[catalog]['service'] cat = astroquery_dict[catalog]['catalog'] ra = astroquery_dict[catalog]['ra'] dec = astroquery_dict[catalog]['dec'] mag = astroquery_dict[catalog]['mag'] mag_name = astroquery_dict[catalog]['mag_name'] distance = astroquery_dict[catalog]['distance'] dr = astroquery_dict[catalog]['data_release'] df = query_region_astroquery(target_ra, target_dec, radius, service, cat, dr).copy() if quality_query is not None: df.query(quality_query, inplace=True) if df.shape[0] > 0: # Sort DataFrame by match distance df.sort_values(distance, ascending=True, inplace=True) # Keep only the first three entries offset_df = df[:n] # Build the offset DataFrame offset_df.loc[:, 'target_name'] = target_name offset_df.loc[:, 'target_ra'] = target_ra offset_df.loc[:, 'target_dec'] = target_dec offset_df.loc[:, 'offset_ra'] = df[ra] offset_df.loc[:, 'offset_dec'] = df[dec] for jdx, idx in enumerate(offset_df.index): abc_dict = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4:'E'} letter = abc_dict[jdx] offset_df.loc[idx, 'offset_name'] = target_name + '_offset_' + \ letter offset_df.loc[ idx, 'offset_shortname'] = target_name + '_offset_' + letter offset_df.loc[:, mag_name] = df[mag] # GET THIS INTO A SEPARATE FUNCTION target_coords = SkyCoord(ra=target_ra, dec=target_dec, unit=(u.deg, u.deg), frame='icrs') offset_coords = SkyCoord(ra=offset_df.offset_ra.values, dec=offset_df.offset_dec, unit=(u.deg, u.deg), frame='icrs') # Calculate position angles and separations (East of North) pos_angles = offset_coords.position_angle(target_coords).to(u.deg) separations = offset_coords.separation(target_coords).to(u.arcsecond) dra, ddec = offset_coords.spherical_offsets_to(target_coords) # UNTIL HERE if verbosity > 1: print('Offset delta ra: {}'.format(dra)) print('Offset delta dec: {}'.format(ddec)) print('Offset separation: {}'.format(separations)) print('Offset position angle: {}'.format(pos_angles)) offset_df.loc[:, 'separation'] = separations.value offset_df.loc[:, 'pos_angle'] = pos_angles.value offset_df.loc[:, 'dra_offset'] = dra.to(u.arcsecond).value offset_df.loc[:, 'ddec_offset'] = ddec.to(u.arcsecond).value return offset_df[ ['target_name', 'target_ra', 'target_dec', 'offset_name', 'offset_shortname', 'offset_ra', 'offset_dec', mag, 'separation', 'pos_angle', 'dra_offset', 'ddec_offset']] else: print("Offset star for {} not found.".format(target_name)) return pd.DataFrame() def get_offset_stars_astroquery(df, target_name_column, target_ra_column, target_dec_column, radius, catalog='tmass', n=3, quality_query=None, verbosity=0): """Get offset stars for all targets in the input DataFrame using astroquery. :param df: pandas.core.frame.DataFrame Dataframe with targets to retrieve offset stars for :param target_name_column: string Name of the target identifier column :param target_ra_column: string Right ascension column name :param target_dec_column: string Declination column name :param radius: float Maximum search radius in arcseconds :param catalog: string Catalog (and data release) to retrieve the offset star data from. See astroquery_dict for implemented catalogs. :param n: int Number of offset stars to retrieve. (Maximum: n=5) :param quality_query:
#!/bin/bash/python #========================================== # QUANTIFICATION OF TURNOVER ALONG GRADIENTS # By <NAME> # Version 08-June-2018 # Built under python 2.7.11 # April 10th, 2017 #========================================== ##### TO DELETE#### # Not checked yet: # changed taxaTable and metadata to taxaTablePWD and metadataPWD # added minCountTable and minCountBin # added minCountOTU filtering in loading of table # added manual sigthresh # got rid of tree # fixed the way ubiq was calculated; fixed math but also added in a requirement that all ubiq must have half of observations as NON zeros. # changed flow of inter blooms; was impossibly to reach before. Now, it is assessed with other bloom types # changed order of saving typeoutput; was extraneous before. # added bestfitTies file which tells you about warnings # added levene's test to test difference in variances; also introduced new critpVar** need to assess side effects # Maybe get rid of boundaries.txt in future because it seemed extraneous; will have to edit plotting script # Got rid of checkValue because it didn't work as it should; not sure how it should work # Got rid of suppress printing bins because it didn't seem to be necessary; might as well print everything. # changed thresh test to <= for typeTaxa so that you can set to '0' if you want it to be TRULY absent # tried using man whitney U test for means comparisons instead, since the data is very non-parametric. Seems to have more power this way; less things are classified as "noclass" # sometimes manwhitney U test is too sensitive, esp during zero-inflated groups. So changed the requirements so that difference of means must be sig AND one of them must be greater than thresh # Changed OTUTabletext.txt into the "after" OTU table, not before.Delete original text one # Edited loading of OTU table so you *could* use non-qiime formats # 11aug: added minSamplePres to get rid of OTUs that are exclusively in X number of samples (default at LEAST 3) ###### # This script takes an OTU table (output from QIIME) and mappingfile with salinity (QIIME-compatible) and produces the following text outputs: # 1. modelBoundaries_type.txt (Rows are unique OTUs; headers are 'OTU ID', 'taxonomy', 'type', 'typeSimple', 'X', 'Y', 'sigAB', 'sigBC', 'sigAC', 'boundaries', 'boundariestwo', 'bloom' ) # 2. boundaries.txt (a list of all boundaries) # 3. taxa_abundances_across_salinity.txt (A table: Rows are taxa and headers are salinity; shows abundance across salinity (averaged for replicate salinities)) # 4. types_across_salinity[_all/condensed].txt (A table: Rows are 'types' and headers are salinity; shows abundance of each type across salinity # 5. OTUTablebyType.txt (OTU Table where rows are 'type' and columns are OTU IDs; meant count data) # 6. taxaIDLegend.txt (List of all unique OTUs and their assigned taxonomy) # 7. gradient.txt (List of names of gradient) # 8. LOG.txt (Printed output of settings for that run) # What QTAG does is: # For each OTU in taxasummaries, finds a best-fit 3-piece model where each piece is the mean relative abundance for that region # - iterates through all X and Y (where X < Y, |X-Y| > diffXY, X > minVal and Y < maxVal) # Then, QTAG decides whether it is a Low,Inter, or High specialist (or none of them) # It does this by comparing the mean relative abundance of three groups (groupA, groupB, groupC), # The three groups are separated by boundaries X and Y. # Then, it uses Welch's T-test to see if there are significant differences in the means between groupA, groupB, and groupC # eg. if groupA > groupC (significant), then it is classified as Low. # It uses X and Y to calculate the places were each OTU seems to be 'turning over' (ie, changing in abundance significantly) # REQUIREMENTS: # Imports: # numpy # math # scipy stats # argparse # os # subprocess # time sleep #sys # Also: # macqiime # Optional dependencies: # R script for plotting #========================================== import math import numpy # For standard deviation # Required for BINNING and BINNING SALINITY sections from scipy import stats # For welch's t-test # Required TYPETAXA sections # from graphics import * import argparse import os import subprocess from time import sleep import sys import copy # to copy lists and dictionaries #========================================== # BEGIN #========================================== # FUNCTIONS TO LOAD FILES def makeTaxaSummaries(taxaTablePWD,metadataPWD): # Input is file path for OTU table and metadata # Output is # (a) Dictionary where keys are OTU IDs and content is two lists. (taxaTable) # The first list contains the relative abundance of that OTU across salinity # The second list contains the corresponding salinities in the first list # (b) Dictionary where keys are OTU IDs and content is taxonomy (taxaIDs) # Taxonomies are taken from the observation metadata from the OTU table global metadata_name # This is the header name in the metadata file of the values to be assessed global minCountOTUinSample global minCountTable global minSamplePres print "Making and loading taxa table and metadata table..." if '.biom' in taxaTablePWD: os.system('biom convert -i ' + taxaTablePWD + ' --to-tsv --header-key taxonomy --table-type="OTU table" -o ./OTUTableText_temp.txt') # This is done in BASH taxaOpen = open('./OTUTableText_temp.txt', 'rU') else: taxaOpen = open(taxaTablePWD, 'rU') taxaOpenTemp = [] for i in taxaOpen: # Read each line and concatenate into single file taxaOpenTemp += [i] # A single string; OTU table should be Unix(LF) format (\n at ends) taxaOpen.close() if '.biom' in taxaTablePWD: os.system('rm ./OTUTableText_temp.txt') tempList =[] for j in taxaOpenTemp: # Each line split by "\n" tempLine = j.strip() tempList += [tempLine.split('\t')] # Make a list of lists; each smaller list is abundance data for each OTU while '#OTU ID' not in tempList[0]: del tempList[0] # Deletes first row until the first row is #OTU ID # Sort information in relavent piles taxaIDs = {} taxaCountsTemp = {} first = True for y in tempList: # tempList is a list of lists; smaller list is OTUID,+ abundance data if y[0] == '#OTU ID': # The first line should be #OTU ID sites = y[1:len(y)-1] # So we will take the site names from the first row else: # Every other row is abundance data taxaIDs[y[0]] = y[len(y)-1] # Make dictionary of taxaIDs for later for x in range(len(y)): # Make file of 'total' counts for each OTU if (x != 0) and (x != (len(y)-1)): # If it's not the first element (which is the OTU ID) or last element (which is the taxonomy), then add to list if first: # First is made 'equal', but everything else is just appended. taxaCountsTemp[str(x)] = float(y[x]) else: taxaCountsTemp[str(x)] += float(y[x]) first = False # Output of this loop is taxaCountsTemp (a dictionary of abundance data), taxaIDs, and sites headers = sites[:] taxaTable = {} for i in range(len(tempList)): taxaTable[tempList[i][0]] = [[]] # Make empty list of lists for each OTU ID if tempList[i][0] == '#OTU ID': # pass else: for j in range(1,len(tempList[i])-1): # Sum of all abundances to make relative abundances # sumAbund = int(taxaCountsTemp[str(j)]) value = float(tempList[i][j]) if value < float(minCountOTUinSample): value = 0.0 taxaTable[tempList[i][0]][0].append(value) # Save values as relative abundances instead of absolute ones # Now get rid of low abundance in table taxaTableFilt = deleteLowAbund(taxaTable,minCountTable,minSamplePres) # Get total counts totalCounts = [ 0 for i in range(len(sites))] for i in range(len(sites)): for taxa in taxaTableFilt: totalCounts[i] += taxaTableFilt[taxa][0][i] # Convert to relative abundance taxaTableFinal =copy.deepcopy(taxaTableFilt) for OTU in taxaTableFilt.keys(): tempOTUlist = [0 for i in range(len(sites))] for i in range(len(sites)): tempOTUlist[i] = float(taxaTableFilt[OTU][0][i])/float(totalCounts[i]) taxaTableFinal[OTU][0] = tempOTUlist metadataOpen = open(metadataPWD, 'rU') metadataOpenTemp = [] for i in metadataOpen: metadataOpenTemp += [i] metadataOpen.close() tempMeta =[] for j in metadataOpenTemp: tempLine = j.strip() tempMeta += [tempLine.split('\t')] positionSal = tempMeta[0].index(metadata_name) metadata = [] for line in tempMeta: metadata.append([line[0],line[positionSal]]) # Now, change key names so they're not longer sites; they're gradient values for site in metadata: sites = [site[1] if x == site[0] else x for x in sites] # Make proper format, but with site names instead of numbers for taxa in taxaTableFinal: taxaTableFinal[taxa].append(sites) # Make abundance values integer as well for x in taxaTableFinal: taxaTableFinal[x] = [[float(strings) for strings in keys] for keys in taxaTableFinal[x]] printOTUTable(taxaTableFilt,headers,taxaIDs) return taxaTableFinal,taxaIDs #========================================== # Delete certain taxa based on total abundance; must be sufficiently abundant in order to work def deleteLowAbund(taxaTable,minCountTable,minSamplePres): # Change to absolute threshold, or get rid of entirely. Get rid of show ups in < 3 samples newTaxaSummary = {} for taxa in taxaTable: nonzeroCount = 0 presCount = 0 for i in taxaTable[taxa][0]: nonzeroCount += i if i > 0: presCount += 1 if int(nonzeroCount) >= int(minCountTable) and int(presCount) >= int(minSamplePres): newTaxaSummary[taxa] = taxaTable[taxa] return newTaxaSummary #========================================== # Msc functions def average(listValues): # shortcut for 'average' # finds average for list of numbers if len(listValues) == 0: return None else: average = float(sum(listValues))/len(listValues) return average def is_numeric(s): # Is it a number try: float(s) return True except ValueError: return False def printOTUTable(taxaTableFilt,headers,taxaIDs): OTUtabletoPrint = open('OTUTableText.txt','w') toPrint = '#OTUID' for head in headers: toPrint
terminate_parser = subparsers.add_parser('stop-ad', description=terminate_commanddesc, help=terminate_commanddesc) terminate_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None, help=('id of target workspace deployment' ' (can be unique prefix)')) terminate_parser.add_argument('-f', '--force', action='store_true', default=False, help=('if there is an active instance, then' ' stop it without waiting'), dest='force_terminate') help_message_purge = ('if the server indicates that an instance is active,' ' but there is not one or it is otherwise in a' ' non-recoverable state, then mark it remotely as' ' terminated and attempt local clean-up; this' ' command is a last resort. First, try `hardshare' ' terminate` without --purge.') terminate_parser.add_argument('--purge', action='store_true', default=False, help=help_message_purge, dest='purge_supposed_instance') argv_parsed = argparser.parse_args(argv) if argv_parsed.print_version or argv_parsed.command == 'version': from . import __version__ as hardshare_pkg_version print(hardshare_pkg_version) return 0 elif argv_parsed.command is None or argv_parsed.command == 'help': if hasattr(argv_parsed, 'help_target_command') and argv_parsed.help_target_command is not None: if argv_parsed.help_target_command == 'config': config_parser.print_help() elif argv_parsed.help_target_command == 'rules': rules_parser.print_help() elif argv_parsed.help_target_command == 'register': register_parser.print_help() elif argv_parsed.help_target_command == 'check': check_parser.print_help() elif argv_parsed.help_target_command == 'dissolve': dissolve_parser.print_help() elif argv_parsed.help_target_command == 'status': status_parser.print_help() elif argv_parsed.help_target_command == 'attach-camera': attach_camera_parser.print_help() elif argv_parsed.help_target_command == 'stop-cameras': stop_cameras_parser.print_help() elif argv_parsed.help_target_command == 'addon-cmdsh': addon_cmdsh_parser.print_help() elif argv_parsed.help_target_command == 'addon-vnc': addon_vnc_parser.print_help() elif argv_parsed.help_target_command == 'addon-mistyproxy': addon_mistyproxy_parser.print_help() elif argv_parsed.help_target_command == 'ad': advertise_parser.print_help() elif argv_parsed.help_target_command == 'stop-ad': terminate_parser.print_help() else: argparser.print_help() else: argparser.print_help() return 0 if argv_parsed.verbose: pkglogger.setLevel(logging.DEBUG) if argv_parsed.output_format is not None: output_format = argv_parsed.output_format.lower() if output_format not in ['yaml', 'json']: print('output format unrecognized: {}'.format(argv_parsed.output_format)) return 1 else: output_format = None try: ac = HSAPIClient() except: ac = None if argv_parsed.command == 'status': try: config = get_local_config() except: print('error loading configuration data. does it exist?') return 1 if argv_parsed.id_prefix is None: if len(config['wdeployments']) == 0: findings = [WorkspaceInstance.inspect_instance()] else: findings = [] for wd in config['wdeployments']: findings.append(WorkspaceInstance.inspect_instance(wdeployment=wd)) else: findings = [] for m in find_wd(config, argv_parsed.id_prefix, one_or_none=False): findings.append(WorkspaceInstance.inspect_instance(wdeployment=config['wdeployments'][m])) if output_format == 'json': print(json.dumps(findings)) else: # output_format == 'yaml' print(yaml.dump(findings, default_flow_style=False)) elif argv_parsed.command == 'attach-camera': config, indices, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc wdeployments = [config['wdeployments'][jj]['id'] for jj in indices] local_keys = list_local_keys() if len(local_keys) < 1: print('No valid keys available. Check: `hardshare config -l`') return 1 with open(local_keys[0], 'rt') as fp: tok = fp.read().strip() if argv_parsed.attach_camera_res: width, height = [int(x) for x in argv_parsed.attach_camera_res.split(',')] if width < 1 or height < 1: print('Width, height must be positive') return 1 else: width, height = None, None if argv_parsed.attach_camera_crop_config: crop = json.loads(argv_parsed.attach_camera_crop_config) else: crop = None if argv_parsed.become_daemon: if os.fork() != 0: return 0 os.close(0) os.close(1) os.close(2) try: camera_main(wdeployments, tok=tok, dev=argv_parsed.camera, width=width, height=height, crop=crop) except ConnectionError: if not argv_parsed.become_daemon: print('ERROR: failed to reach server. Are you connected to the Internet?') return 1 elif argv_parsed.command == 'stop-cameras': local_keys = list_local_keys() if len(local_keys) < 1: print('No valid keys available. Check: `hardshare config -l`') return 1 with open(local_keys[0], 'rt') as fp: tok = fp.read().strip() try: stop_cameras(tok, allcam=argv_parsed.all_cameras) except ConnectionError: print('ERROR: failed to reach server. Are you connected to the Internet?') return 1 elif argv_parsed.command == 'addon-cmdsh': if ac is None: print('cannot register without initial local configuration.' ' (try `hardshare config --create`)') return 1 config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc wdeployment_id = config['wdeployments'][index]['id'] local_keys = list_local_keys() if len(local_keys) < 1: print('No valid keys available. Check: `hardshare config -l`') return 1 with open(local_keys[0], 'rt') as fp: tok = fp.read().strip() try: if argv_parsed.add_addon_cmdsh: add_cmdsh(wdeployment_id, tok) elif argv_parsed.rm_addon_cmdsh: rm_cmdsh(wdeployment_id, tok) else: print('Use `hardshare addon-cmdsh` with a switch.') print('To get a help message, enter\n\n hardshare help addon-cmdsh') return 1 except ValueError as err: print('ERROR: {}'.format(err)) return 1 elif argv_parsed.command == 'addon-vnc': if ac is None: print('cannot register without initial local configuration.' ' (try `hardshare config --create`)') return 1 config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc wdeployment_id = config['wdeployments'][index]['id'] local_keys = list_local_keys() if len(local_keys) < 1: print('No valid keys available. Check: `hardshare config -l`') return 1 with open(local_keys[0], 'rt') as fp: tok = fp.read().strip() try: if argv_parsed.add_addon_vnc: add_vnc(wdeployment_id, tok) elif argv_parsed.rm_addon_vnc: rm_vnc(wdeployment_id, tok) else: print('Use `hardshare addon-vnc` with a switch.') print('To get a help message, enter\n\n hardshare help addon-vnc') return 1 except ValueError as err: print('ERROR: {}'.format(err)) return 1 elif argv_parsed.command == 'addon-mistyproxy': if ac is None: print('cannot register without initial local configuration.' ' (try `hardshare config --create`)') return 1 config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc wdeployment_id = config['wdeployments'][index]['id'] local_keys = list_local_keys() if len(local_keys) < 1: print('No valid keys available. Check: `hardshare config -l`') return 1 with open(local_keys[0], 'rt') as fp: tok = fp.read().strip() try: if argv_parsed.add_addon_mistyproxy: if argv_parsed.targetaddr is None: print('--ip is required with --add') return 1 add_mistyproxy(wdeployment_id, tok, argv_parsed.targetaddr) elif argv_parsed.rm_addon_mistyproxy: rm_mistyproxy(wdeployment_id, tok) else: print('Use `hardshare addon-mistyproxy` with a switch.') print('To get a help message, enter\n\n hardshare help addon-mistyproxy') return 1 except ValueError as err: print('ERROR: {}'.format(err)) return 1 elif argv_parsed.command == 'ad': if ac is None: print('cannot register without initial local configuration.' ' (try `hardshare config --create`)') return 1 config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc if 'ssh_key' not in config or config['ssh_key'] is None: print('WARNING: local configuration does not declare SSH key.\n' 'Instances with connection type sshtun cannot launch.') pkglogger.removeHandler(loghandler) if argv_parsed.become_daemon: if os.fork() != 0: return 0 os.close(0) os.close(1) os.close(2) else: pkglogger.addHandler(logging.StreamHandler()) logfname = 'hardshare_client.{}.log'.format(config['wdeployments'][index]['id']) loghandler = logging.FileHandler(filename=logfname, mode='a', delay=True) loghandler.setLevel(logging.DEBUG) loghandler.setFormatter(logging.Formatter('%(name)s.%(funcName)s (%(levelname)s) (pid: {});' ' %(asctime)s ; %(message)s' .format(os.getpid()))) pkglogger.addHandler(loghandler) return ac.run_sync(config['wdeployments'][index]['id']) elif argv_parsed.command == 'stop-ad': config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc if argv_parsed.purge_supposed_instance: cprovider = config['wdeployments'][index]['cprovider'] if cprovider == 'proxy': print('--purge not supported for cprovider `proxy`') return 1 elif cprovider not in ['docker', 'podman']: print('unknown cprovider: {}'.format(cprovider)) return 1 findings = WorkspaceInstance.inspect_instance(wdeployment=config['wdeployments'][index]) if 'container' in findings: try: subprocess.check_call([cprovider, 'rm', '-f', findings['container']['name']], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) except: print('failed to stop container `{}`'.format(findings['container']['name'])) return 1 return 0 else: print('failed to detect local instance') return 1 else: if ac is None: print('cannot terminate without valid API client') return 1 try: ac.terminate(config['wdeployments'][index]['id']) except FileNotFoundError: print('ERROR: cannot reach daemon. Does it exist? (Try `hardshare status`)') return 1 return 0 elif argv_parsed.command == 'register': if ac is None: print('cannot register without initial local configuration.' ' (try `hardshare config --create`)') return 1 try: print(ac.register_new(at_most_one=argv_parsed.register_at_most_one)) except HSError as err: print('ERROR: {}'.format(err)) return 1 except ConnectionError: print('ERROR: failed to reach server. Are you connected to the Internet?') return 1 elif argv_parsed.command == 'rules': if ac is None: print('no local configuration found. (try `hardshare config -h`)') return 1 if argv_parsed.id_prefix is None: wdid = None else: try: wdid = str(uuid.UUID(argv_parsed.id_prefix)) except: config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: print('The given ID does not appear to be valid.') return 1 wdid = config['wdeployments'][index]['id'] if argv_parsed.list_rules: try: res = ac.get_access_rules(wdid) except Exception as err: print('{}'.format(err)) return 1 if 'err' in res: if res['err'] == 'wrong authorization token': print('wrong API token. Did it expire?') else: print(res['err']) return 1 res['comments'] = [ 'Access is denied unless a rule explicitly permits it.', ] if output_format == 'json': print(json.dumps(res)) else: # output_format == 'yaml' print(yaml.dump(res, default_flow_style=False)) elif argv_parsed.drop_all_rules or argv_parsed.add_rule_permit_me: try: if argv_parsed.drop_all_rules: ac.drop_access_rules(wdid) elif argv_parsed.add_rule_permit_me: ac.add_access_rule(wdid) except Exception as err: print('{}'.format(err)) return 1 elif argv_parsed.add_rule_permit_all: ui_input = None while ui_input not in ('y', 'yes'): print('Do you want to permit access by anyone? [y/N] ', end='') ui_input = input().lower() if ui_input in ('n', 'no', ''): return 1 try: ac.add_access_rule(wdid, to_user='*') except Exception as err: print('{}'.format(err)) return 1 else: print('Use `hardshare rules` with a switch. For example, `hardshare rules -l`') print('or to get a help message, enter\n\n hardshare help rules') return 1 elif argv_parsed.command == 'check': if ac is None: print('no local configuration found. (try `hardshare config -h`)') return 1 try: res = ac.check_registration(argv_parsed.id_prefix) except: print('Error occurred while contacting remote server ' 'at {}'.format(ac.base_uri)) return 1 if 'err' in res: if res['err'] == 'not found': print('not found: workspace deployment with id prefix {}' .format(res['id_prefix'])) elif res['err'] == 'wrong authorization token': print('wrong API token. Did it expire?') else: print(res['err']) return 1 else: print('summary of workspace deployment {}'.format(res['id'])) print('\tcreated: {}'.format(res['date_created'])) print('\torigin (address) of registration:
<filename>src/support.py """ Discord Pybot Support ~~~~~~~~~~~~~~~~~ Support File with stuff :copyright: (c) 2021-2021 M2rsho :license: MIT, see LICENSE for more details. """ if __name__ == "__main__": print("Huh?") exit from datetime import datetime from colorama import * import yaml from random import choice from PIL import Image, ImageFont, ImageDraw from gtts import gTTS from datetime import datetime import requests from io import BytesIO import os from pathlib import Path from werkzeug.utils import secure_filename time = datetime.utcnow() startup_date = f"{time.day}_{time.month}_{time.year}-{time.hour:02d}-{time.minute:02d}.{time.second:02d}.{time.microsecond:03d}" startup_timestamp = time.timestamp() path = f"{__file__}".replace("\\", "/") path = path.replace("/support.py", "") config = open(f"{path}/config.yaml") config = yaml.load(config, Loader=yaml.FullLoader) prefix = config.get("prefix") cooldown = config.get("cooldown") with open(f"{path}/data/alts.txt") as file: alts = file.readlines() async def getAlt(): return choice(alts) debug = not config.get("debug") def log(date, type, arg1, arg2): time = f"{date.hour:02d}:{date.minute:02d}:{date.second:02d}" if type == "COMMAND": print( f"""{Back.BLACK}{Fore.LIGHTYELLOW_EX}{time}{Style.RESET_ALL} [{Fore.LIGHTGREEN_EX}INFO{Style.RESET_ALL}] {Fore.LIGHTYELLOW_EX}{arg1}{Style.RESET_ALL}: {Fore.LIGHTGREEN_EX}Invoked Command{Style.RESET_ALL}: '{Fore.LIGHTWHITE_EX}{Back.LIGHTBLACK_EX}{arg2}{Style.RESET_ALL}'{Style.RESET_ALL}""") else: print( f"""{Back.BLACK}{Fore.LIGHTYELLOW_EX}{time}{Style.RESET_ALL} [{Fore.LIGHTGREEN_EX}{type}{Style.RESET_ALL}] {Fore.LIGHTYELLOW_EX}{arg1}{Style.RESET_ALL}: {Fore.LIGHTGREEN_EX}{arg2}{Style.RESET_ALL}""") if debug: with open(f"{path}/logs/{startup_date}.log", "a+") as file: type = type if type != "COMMAND" else "INFO" file.write(f"{time} [{type}] {arg1}: {arg2}\n") class colours: default = 0x7842ff red = 0xff7777 green = 0x77dd77 yellow = 0xeb9226 class processing: async def GENERATE_CAN(name, text, bottom_text=""): if len(text) > 90 or len(bottom_text) > 90: return False def add_n(text, after: int): x = "" for i, letter in enumerate(text): if i % after == 0: x += '\n' x += letter x = x[1:] return x text = add_n(text, 20) bottom_text = add_n(bottom_text, 30) W, H = (582, 975) font_ = ImageFont.truetype( f"{path}/data/resources/fonts/NotoSansJP-Medium.otf", 50) font__ = ImageFont.truetype( f"{path}/data/resources/fonts/NotoSansJP-Medium.otf", 30) img = Image.open(f"{path}/data/resources/templates/can_template.png") draw = ImageDraw.Draw(img) w, h = draw.textsize(text, font=font_) w2, h2 = draw.textsize(bottom_text, font=font__) draw.text(((W-w)/2, 300-(h/2)), text, (255, 255, 0), font=font_) draw.text(((W-w2)/2, 700-(h2/2)), bottom_text, (0, 0, 0), font=font__) img.save(f"{path}/data/temp/{name}.png") return(f"{path}/data/temp/{name}.png") async def tts(txt, languag): date = str(datetime.utcnow()).replace(":", "-") speech = gTTS(text=u'{}'.format(txt), lang=languag, slow=False) speech.save(f"{path}/data/temp/{date}.mp3") return(f"{path}/data/temp/{date}.mp3") async def overlay(background_url, foreground, user_id): response=requests.get(background_url) background = Image.open(BytesIO(response.content)).resize((1024, 1024), Image.ANTIALIAS).convert("RGBA") foreground = Image.open(foreground).resize((1024, 1024), Image.ANTIALIAS).convert("RGBA") background.paste(foreground, (0, 0), foreground) background.save(f"{path}/data/temp/{user_id}.png") return(f"{path}/data/temp/{user_id}.png") async def overlay_position(background_url, foreground, xy, xsys, user_id, image_size): img = Image.new('RGBA', image_size, (255, 0, 0, 0)) response=requests.get(background_url) background = Image.open(BytesIO(response.content)).resize(xsys, Image.ANTIALIAS).convert("RGBA") foreground = Image.open(foreground).convert("RGBA") img.paste(background, xy, background) img.paste(foreground, (0, 0), foreground) img.save(f"{path}/data/temp/{user_id}.png") return(f"{path}/data/temp/{user_id}.png") async def generate_social_credit(value, user_id): if value > 0: value = str(value) img = Image.open(f"{path}/data/resources/templates/socialcredit/10.jpg").resize((287, 175), Image.ANTIALIAS).convert("RGBA") font = ImageFont.truetype(f"{path}/data/resources/fonts/NotoSansJP-Medium.otf", 35) draw = ImageDraw.Draw(img) w, h = draw.textsize(value, font=font) draw.text(((220-w)/2, 50-(h/2)), value, (255, 255, 255), font=font) img.save(f"{path}/data/temp/{user_id}.png") return(f"{path}/data/temp/{user_id}.png") elif -15 < value <= 0: value = str(value) img = Image.open(f"{path}/data/resources/templates/socialcredit/-15.jpg").resize((287, 175), Image.ANTIALIAS).convert("RGBA") font = ImageFont.truetype(f"{path}/data/resources/fonts/NotoSansJP-Medium.otf", 50) draw = ImageDraw.Draw(img) w, h = draw.textsize(value, font=font) draw.text(((220-w)/2, 50-(h/2)), value, (255, 255, 255), font=font) img.save(f"{path}/data/temp/{user_id}.png") return(f"{path}/data/temp/{user_id}.png") elif -30 <= value <= -15: value = str(value) img = Image.open(f"{path}/data/resources/templates/socialcredit/-30.jpg").resize((287, 175), Image.ANTIALIAS).convert("RGBA") font = ImageFont.truetype(f"{path}/data/resources/fonts/NotoSansJP-Medium.otf", 50) draw = ImageDraw.Draw(img) w, h = draw.textsize(value, font=font) draw.text(((220-w)/2, 50-(h/2)), value, (255, 255, 255), font=font) img.save(f"{path}/data/temp/{user_id}.png") return(f"{path}/data/temp/{user_id}.png") else: value = str(value) img = Image.open(f"{path}/data/resources/templates/socialcredit/-100.jpg").resize((287, 175), Image.ANTIALIAS).convert("RGBA") font = ImageFont.truetype(f"{path}/data/resources/fonts/NotoSansJP-Medium.otf", 50) draw = ImageDraw.Draw(img) w, h = draw.textsize(value, font=font) draw.text(((220-w)/2, 50-(h/2)), value, (255, 255, 255), font=font) img.save(f"{path}/data/temp/{user_id}.png") return(f"{path}/data/temp/{user_id}.png") from requests import Session import json sfa_url = 'https://api.mojang.com/user/security/challenges' class check(): def __init__(self, loginpassword): self.result = self.check_alt(loginpassword) def secure_check(self, token): session = Session() headers = {'Pragma': 'no-cache', "Authorization": f"Bearer {token}"} z = session.get(url=sfa_url, headers=headers).text return z == '[]' def check_alt(self, loginpassword): session = Session() alt = loginpassword.split(":", 1) jsonheaders = {"Content-Type": "application/json", 'Pragma': 'no-cache'} email = str(alt[0]).replace("\n", "") password = str(alt[1]).replace("\n", "") payload = ({ "agent": { "name": "Minecraft", "version": 1 }, "username": f"{email}", "password": f"{password}", "requestUser": True }) bad = 'Invalid credentials' answer = session.post(url="https://authserver.mojang.com/authenticate", json=payload, headers=jsonheaders, timeout=10000) if ( bad in answer.text or 'Client sent too many requests too fast.' in answer.text ): return json.loads(answer.text)["errorMessage"] ajson = answer.json() username = ajson['availableProfiles'][0]['name'] token = ajson['accessToken'] uuid = ajson['availableProfiles'][0]["id"] securec = self.secure_check(token) return f''' Original Combo: `{loginpassword}` Username: `{username}` UUID: `{uuid}` Email: `{email}` Password: <PASSWORD>}` Sfa: `{securec}` ''' import sqlite3 class database: def __init__(self, path): self.con = sqlite3.connect(path) self.cur = self.con.cursor() self.cur.execute('''CREATE TABLE IF NOT EXISTS users (id integer, username text, balance integer, banned integer, admin integer, reason text, banned_by text, date text, duration integer, socialCredit integer)''') self.cur.execute('''CREATE TABLE IF NOT EXISTS guilds (id integer, name text, language text, prefix text)''') async def getUser(self, user): u = self.cur.execute(f'''SELECT * FROM users WHERE id=?''', (user.id, )).fetchone() if u is None: self.cur.execute(f'INSERT INTO users VALUES (?, ?, 10000, 0, 0, "None", "None", "None", 0, 1000)', (user.id, str(user), )) self.con.commit() return self.cur.execute(f'''SELECT * FROM users WHERE id=?''', (user.id, )).fetchone() def getUserSync(self, user): u = self.cur.execute(f'''SELECT * FROM users WHERE id=?''', (user.id, )).fetchone() if u is None: self.cur.execute(f'INSERT INTO users VALUES (?, ?, 10000, 0, 0, "None", "None", "None", 0, 1000)', (user.id, str(user), )) self.con.commit() return self.cur.execute(f'''SELECT * FROM users WHERE id=?''', (user.id, )).fetchone() async def getAllUsers(self): return self.cur.execute(f'''SELECT * FROM users''').fetchall() def getAllUsers_sync(self): return self.cur.execute(f'''SELECT * FROM users''').fetchall() async def setBalance(self, user, balance: int): await self.getUser(user) self.cur.execute(f'''UPDATE users SET balance=? WHERE id=?''', (balance, user.id)) self.con.commit() async def addBalance(self, user, balance: int): balance = await self.getBalance(user) + balance self.cur.execute(f'''UPDATE users SET balance=? WHERE id=?''', (balance, user.id)) self.con.commit() async def addEveryoneBalance(self, balance: int): self.cur.execute(f'''UPDATE users SET balance=balance+?''', (balance, )) self.con.commit() async def setEveryoneBalance(self, balance: int): self.cur.execute(f'''UPDATE users SET balance=?''', (balance, )) self.con.commit() async def removebalance(self, user, balance: int): balance = await self.getBalance(user) - balance self.cur.execute(f'''UPDATE users SET balance=? WHERE id=?''', (balance, user.id)) self.con.commit() async def getBalance(self, user): balance = (await self.getUser(user))[2] return balance async def banUser(self, user , reason, date, author): await self.getUser(user) self.cur.execute(f'''UPDATE users SET banned=1, reason=?, date=?, banned_by=? WHERE id=?''', (str(reason), str(date), str(author), user.id)) self.con.commit() async def unbanUser(self, user): await self.getUser(user) self.cur.execute(f'''UPDATE users SET banned=0, reason="None", date="None", banned_by="None" WHERE id=?''', (user.id,)) self.con.commit() async def opUser(self, user): await self.getUser(user) self.cur.execute(f'''UPDATE users SET admin=1 WHERE id=?''', (user.id,)) self.con.commit() async def deopUser(self, user): await self.getUser(user) self.cur.execute(f'''UPDATE users SET admin=0 WHERE id=?''', (user.id,)) self.con.commit() async def getBanned(self): u = list(self.cur.execute(f'''SELECT id FROM users WHERE banned="1"''').fetchall()) banned = [list(item) for item in u] return banned async def getOps(self): u = list(self.cur.execute(f'''SELECT id FROM users WHERE admin="1"''').fetchall()) ops = [list(item) for item in u] return ops async def getGuild(self, guild): u = self.cur.execute(f'''SELECT * FROM guilds WHERE id=?''', (guild.id, )).fetchone() if u is None: self.cur.execute(f'INSERT INTO guilds VALUES (?, ?, "en.json", "!")', (guild.id, str(guild), )) self.con.commit() return self.cur.execute(f'''SELECT * FROM guilds WHERE id=?''', (guild.id, )).fetchone() def getGuildSync(self, guild): u = self.cur.execute(f'''SELECT * FROM guilds WHERE id=?''', (guild.id, )).fetchone() if u is None: self.cur.execute(f'INSERT INTO guilds VALUES (?, ?, "en.json", "!")', (guild.id, str(guild), )) self.con.commit() return self.cur.execute(f'''SELECT * FROM guilds WHERE id=?''', (guild.id, )).fetchone() def getLanguage(self, guild): try: guild = self.getGuildSync(guild) return guild[2] except: return 'en.json' def getPrefix(self, guild): try: guild = self.getGuildSync(guild) return guild[3] except: return '!' def setPrefix(self, guild, prefix): self.getGuildSync(guild) self.cur.execute(f'''UPDATE guilds SET prefix=? WHERE id=?''', (prefix, guild.id)) self.con.commit() return self.getGuildSync(guild)[3] async def setLanguage(self, guild, language): await self.getGuild(guild) self.cur.execute(f'''UPDATE guilds SET language=? WHERE id=?''', (language, guild.id)) self.con.commit() async def setSocialCredit(self, user, credit: int): if credit < 0: credit = 0 await self.getUser(user) self.cur.execute(f'''UPDATE users SET socialCredit=? WHERE id=?''', (credit, user.id)) self.con.commit() return credit async def addSocialCredit(self, user, credit: int): if await self.getSocialCredit(user) + credit <= 0: self.cur.execute(f'''UPDATE users SET socialCredit=0 WHERE id=?''', (user.id, )) self.con.commit() elif await self.getSocialCredit(user) + credit >= 3000: self.cur.execute(f'''UPDATE users SET socialCredit=3000 WHERE id=?''', (user.id, )) self.con.commit() else: self.cur.execute(f'''UPDATE users SET socialCredit=socialCredit+? WHERE id=?''', (credit, user.id, )) self.con.commit() async def addEveryoneSocialCredit(self, credit: int): if credit < 0: credit = 0 self.cur.execute(f'''UPDATE users SET socialCredit=socialCredit+credit''', (credit, )) self.con.commit() return credit async def setEveryoneSocialCredit(self, credit: int): if credit < 0: credit = 0 self.cur.execute(f'''UPDATE users SET socialCredit=?''', (credit, )) self.con.commit() return credit async def removeSocialCredit(self, user, credit: int): if credit < 0: credit = 0 await self.getUser(user) self.cur.execute(f'''UPDATE users SET socialCredit=socialCredit-? WHERE id=?''', (credit, user.id)) self.con.commit() return credit async def getSocialCredit(self, user): credit = (await self.getUser(user))[9] return credit def getSocialCreditSync(self, user): credit = (self.getUserSync(user))[9] return credit globalData = database(path=f"{path}/data/database.db") languages = [item for item in os.listdir(f"{path}/data/languages/")] def convertToBitcoin(amount, currency): data = requests.get("http://api.bitcoincharts.com/v1/weighted_prices.json") bitcoins = data.json() converted = amount / float(bitcoins[currency]["24h"]) return converted def getPrefix(client, ctx): try: if config.get("debug") is True: return ["b!", f'<@{client.user.id}> ', f'<@!{client.user.id}> '] return [globalData.getPrefix(ctx.guild), f'<@{client.user.id}>
return pulumi.get(self, "swagger_uri") @property @pulumi.getter(name="aadAuthEnabled") def aad_auth_enabled(self) -> Optional[bool]: """ Whether or not AAD authentication is enabled. """ return pulumi.get(self, "aad_auth_enabled") @property @pulumi.getter(name="appInsightsEnabled") def app_insights_enabled(self) -> Optional[bool]: """ Whether or not Application Insights is enabled. """ return pulumi.get(self, "app_insights_enabled") @property @pulumi.getter(name="authEnabled") def auth_enabled(self) -> Optional[bool]: """ Whether or not authentication is enabled. """ return pulumi.get(self, "auth_enabled") @property @pulumi.getter(name="autoScaler") def auto_scaler(self) -> Optional['outputs.AKSServiceResponseResponseAutoScaler']: """ The auto scaler properties. """ return pulumi.get(self, "auto_scaler") @property @pulumi.getter(name="computeName") def compute_name(self) -> Optional[str]: """ The name of the compute resource. """ return pulumi.get(self, "compute_name") @property @pulumi.getter(name="containerResourceRequirements") def container_resource_requirements(self) -> Optional['outputs.ContainerResourceRequirementsResponse']: """ The container resource requirements. """ return pulumi.get(self, "container_resource_requirements") @property @pulumi.getter(name="dataCollection") def data_collection(self) -> Optional['outputs.AKSServiceResponseResponseDataCollection']: """ Details of the data collection options specified. """ return pulumi.get(self, "data_collection") @property @pulumi.getter(name="deploymentType") def deployment_type(self) -> Optional[str]: """ The deployment type for the service. """ return pulumi.get(self, "deployment_type") @property @pulumi.getter def description(self) -> Optional[str]: """ The service description. """ return pulumi.get(self, "description") @property @pulumi.getter(name="environmentImageRequest") def environment_image_request(self) -> Optional['outputs.AKSServiceResponseResponseEnvironmentImageRequest']: """ The Environment, models and assets used for inferencing. """ return pulumi.get(self, "environment_image_request") @property @pulumi.getter(name="isDefault") def is_default(self) -> Optional[bool]: """ Is this the default variant. """ return pulumi.get(self, "is_default") @property @pulumi.getter(name="kvTags") def kv_tags(self) -> Optional[Mapping[str, str]]: """ The service tag dictionary. Tags are mutable. """ return pulumi.get(self, "kv_tags") @property @pulumi.getter(name="livenessProbeRequirements") def liveness_probe_requirements(self) -> Optional['outputs.AKSServiceResponseResponseLivenessProbeRequirements']: """ The liveness probe requirements. """ return pulumi.get(self, "liveness_probe_requirements") @property @pulumi.getter(name="maxConcurrentRequestsPerContainer") def max_concurrent_requests_per_container(self) -> Optional[int]: """ The maximum number of concurrent requests per container. """ return pulumi.get(self, "max_concurrent_requests_per_container") @property @pulumi.getter(name="maxQueueWaitMs") def max_queue_wait_ms(self) -> Optional[int]: """ Maximum time a request will wait in the queue (in milliseconds). After this time, the service will return 503 (Service Unavailable) """ return pulumi.get(self, "max_queue_wait_ms") @property @pulumi.getter def models(self) -> Optional[Sequence['outputs.ModelResponse']]: """ The list of models. """ return pulumi.get(self, "models") @property @pulumi.getter def namespace(self) -> Optional[str]: """ The Kubernetes namespace of the deployment. """ return pulumi.get(self, "namespace") @property @pulumi.getter(name="numReplicas") def num_replicas(self) -> Optional[int]: """ The number of replicas on the cluster. """ return pulumi.get(self, "num_replicas") @property @pulumi.getter def properties(self) -> Optional[Mapping[str, str]]: """ The service property dictionary. Properties are immutable. """ return pulumi.get(self, "properties") @property @pulumi.getter(name="scoringTimeoutMs") def scoring_timeout_ms(self) -> Optional[int]: """ The scoring timeout in milliseconds. """ return pulumi.get(self, "scoring_timeout_ms") @property @pulumi.getter(name="trafficPercentile") def traffic_percentile(self) -> Optional[float]: """ The amount of traffic variant receives. """ return pulumi.get(self, "traffic_percentile") @property @pulumi.getter def type(self) -> Optional[str]: """ The type of the variant. """ return pulumi.get(self, "type") @pulumi.output_type class AKSServiceResponseResponseAutoScaler(dict): """ The auto scaler properties. """ @staticmethod def __key_warning(key: str): suggest = None if key == "autoscaleEnabled": suggest = "autoscale_enabled" elif key == "maxReplicas": suggest = "max_replicas" elif key == "minReplicas": suggest = "min_replicas" elif key == "refreshPeriodInSeconds": suggest = "refresh_period_in_seconds" elif key == "targetUtilization": suggest = "target_utilization" if suggest: pulumi.log.warn(f"Key '{key}' not found in AKSServiceResponseResponseAutoScaler. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: AKSServiceResponseResponseAutoScaler.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: AKSServiceResponseResponseAutoScaler.__key_warning(key) return super().get(key, default) def __init__(__self__, *, autoscale_enabled: Optional[bool] = None, max_replicas: Optional[int] = None, min_replicas: Optional[int] = None, refresh_period_in_seconds: Optional[int] = None, target_utilization: Optional[int] = None): """ The auto scaler properties. :param bool autoscale_enabled: Option to enable/disable auto scaling. :param int max_replicas: The maximum number of replicas in the cluster. :param int min_replicas: The minimum number of replicas to scale down to. :param int refresh_period_in_seconds: The amount of seconds to wait between auto scale updates. :param int target_utilization: The target utilization percentage to use for determining whether to scale the cluster. """ if autoscale_enabled is not None: pulumi.set(__self__, "autoscale_enabled", autoscale_enabled) if max_replicas is not None: pulumi.set(__self__, "max_replicas", max_replicas) if min_replicas is not None: pulumi.set(__self__, "min_replicas", min_replicas) if refresh_period_in_seconds is not None: pulumi.set(__self__, "refresh_period_in_seconds", refresh_period_in_seconds) if target_utilization is not None: pulumi.set(__self__, "target_utilization", target_utilization) @property @pulumi.getter(name="autoscaleEnabled") def autoscale_enabled(self) -> Optional[bool]: """ Option to enable/disable auto scaling. """ return pulumi.get(self, "autoscale_enabled") @property @pulumi.getter(name="maxReplicas") def max_replicas(self) -> Optional[int]: """ The maximum number of replicas in the cluster. """ return pulumi.get(self, "max_replicas") @property @pulumi.getter(name="minReplicas") def min_replicas(self) -> Optional[int]: """ The minimum number of replicas to scale down to. """ return pulumi.get(self, "min_replicas") @property @pulumi.getter(name="refreshPeriodInSeconds") def refresh_period_in_seconds(self) -> Optional[int]: """ The amount of seconds to wait between auto scale updates. """ return pulumi.get(self, "refresh_period_in_seconds") @property @pulumi.getter(name="targetUtilization") def target_utilization(self) -> Optional[int]: """ The target utilization percentage to use for determining whether to scale the cluster. """ return pulumi.get(self, "target_utilization") @pulumi.output_type class AKSServiceResponseResponseDataCollection(dict): """ Details of the data collection options specified. """ @staticmethod def __key_warning(key: str): suggest = None if key == "eventHubEnabled": suggest = "event_hub_enabled" elif key == "storageEnabled": suggest = "storage_enabled" if suggest: pulumi.log.warn(f"Key '{key}' not found in AKSServiceResponseResponseDataCollection. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: AKSServiceResponseResponseDataCollection.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: AKSServiceResponseResponseDataCollection.__key_warning(key) return super().get(key, default) def __init__(__self__, *, event_hub_enabled: Optional[bool] = None, storage_enabled: Optional[bool] = None): """ Details of the data collection options specified. :param bool event_hub_enabled: Option for enabling/disabling Event Hub. :param bool storage_enabled: Option for enabling/disabling storage. """ if event_hub_enabled is not None: pulumi.set(__self__, "event_hub_enabled", event_hub_enabled) if storage_enabled is not None: pulumi.set(__self__, "storage_enabled", storage_enabled) @property @pulumi.getter(name="eventHubEnabled") def event_hub_enabled(self) -> Optional[bool]: """ Option for enabling/disabling Event Hub. """ return pulumi.get(self, "event_hub_enabled") @property @pulumi.getter(name="storageEnabled") def storage_enabled(self) -> Optional[bool]: """ Option for enabling/disabling storage. """ return pulumi.get(self, "storage_enabled") @pulumi.output_type class AKSServiceResponseResponseDeploymentStatus(dict): """ The deployment status. """ @staticmethod def __key_warning(key: str): suggest = None if key == "availableReplicas": suggest = "available_replicas" elif key == "desiredReplicas": suggest = "desired_replicas" elif key == "updatedReplicas": suggest = "updated_replicas" if suggest: pulumi.log.warn(f"Key '{key}' not found in AKSServiceResponseResponseDeploymentStatus. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: AKSServiceResponseResponseDeploymentStatus.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: AKSServiceResponseResponseDeploymentStatus.__key_warning(key) return super().get(key, default) def __init__(__self__, *, available_replicas: Optional[int] = None, desired_replicas: Optional[int] = None, error: Optional['outputs.AKSReplicaStatusResponseError'] = None, updated_replicas: Optional[int] = None): """ The deployment status. :param int available_replicas: The number of available replicas. :param int desired_replicas: The desired number of replicas. :param 'AKSReplicaStatusResponseError' error: The error details. :param int updated_replicas: The number of updated replicas. """ if available_replicas is not None: pulumi.set(__self__, "available_replicas", available_replicas) if desired_replicas is not None: pulumi.set(__self__, "desired_replicas", desired_replicas) if error is not None: pulumi.set(__self__, "error", error) if updated_replicas is not None: pulumi.set(__self__, "updated_replicas", updated_replicas) @property @pulumi.getter(name="availableReplicas") def available_replicas(self) -> Optional[int]: """ The number of available replicas. """ return pulumi.get(self, "available_replicas") @property @pulumi.getter(name="desiredReplicas") def desired_replicas(self) -> Optional[int]: """ The desired number of replicas. """ return pulumi.get(self, "desired_replicas") @property @pulumi.getter def error(self) -> Optional['outputs.AKSReplicaStatusResponseError']: """ The error details. """ return pulumi.get(self, "error") @property @pulumi.getter(name="updatedReplicas") def updated_replicas(self) -> Optional[int]: """ The number of updated replicas. """ return pulumi.get(self, "updated_replicas") @pulumi.output_type class AKSServiceResponseResponseEnvironmentImageRequest(dict): """ The Environment, models and assets used for inferencing. """ @staticmethod def __key_warning(key: str): suggest = None if key == "driverProgram": suggest = "driver_program" elif key == "environmentReference": suggest = "environment_reference" elif key == "modelIds": suggest = "model_ids" if suggest: pulumi.log.warn(f"Key '{key}' not found in AKSServiceResponseResponseEnvironmentImageRequest. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: AKSServiceResponseResponseEnvironmentImageRequest.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: AKSServiceResponseResponseEnvironmentImageRequest.__key_warning(key) return super().get(key, default) def __init__(__self__, *, assets: Optional[Sequence['outputs.ImageAssetResponse']] = None, driver_program: Optional[str] = None, environment: Optional['outputs.EnvironmentImageResponseResponseEnvironment'] = None, environment_reference: Optional['outputs.EnvironmentImageResponseResponseEnvironmentReference'] = None, model_ids: Optional[Sequence[str]] = None, models: Optional[Sequence['outputs.ModelResponse']] = None): """ The Environment, models and assets used for inferencing. :param Sequence['ImageAssetResponse'] assets: The list of assets. :param str driver_program: The name of the driver file. :param 'EnvironmentImageResponseResponseEnvironment' environment: The details of the AZURE ML environment. :param 'EnvironmentImageResponseResponseEnvironmentReference' environment_reference: The unique identifying details of the AZURE ML environment. :param Sequence[str] model_ids: The list of model Ids. :param Sequence['ModelResponse'] models: The list of models. """ if assets is not None: pulumi.set(__self__, "assets", assets) if driver_program is not None: pulumi.set(__self__, "driver_program", driver_program) if environment is not None: pulumi.set(__self__, "environment", environment) if environment_reference is not None: pulumi.set(__self__, "environment_reference", environment_reference)
"""distutils.fancy_getopt Wrapper around the standard getopt module that provides the following additional features: * short oraz long options are tied together * options have help strings, so fancy_getopt could potentially create a complete usage summary * options set attributes of a dalejed-in object """ zaimportuj sys, string, re zaimportuj getopt z distutils.errors zaimportuj * # Much like command_re w distutils.core, this jest close to but nie quite # the same jako a Python NAME -- except, w the spirit of most GNU # utilities, we use '-' w place of '_'. (The spirit of LISP lives on!) # The similarities to NAME are again nie a coincidence... longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)' longopt_re = re.compile(r'^%s$' % longopt_pat) # For recognizing "negative alias" options, eg. "quiet=!verbose" neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat)) # This jest used to translate long options to legitimate Python identifiers # (dla use jako attributes of some object). longopt_xlate = str.maketrans('-', '_') klasa FancyGetopt: """Wrapper around the standard 'getopt()' module that provides some handy extra functionality: * short oraz long options are tied together * options have help strings, oraz help text can be assembled z them * options set attributes of a dalejed-in object * boolean options can have "negative aliases" -- eg. if --quiet jest the "negative alias" of --verbose, then "--quiet" on the command line sets 'verbose' to false """ def __init__(self, option_table=Nic): # The option table jest (currently) a list of tuples. The # tuples may have 3 albo four values: # (long_option, short_option, help_string [, repeatable]) # jeżeli an option takes an argument, its long_option should have '=' # appended; short_option should just be a single character, no ':' # w any case. If a long_option doesn't have a corresponding # short_option, short_option should be Nic. All option tuples # must have long options. self.option_table = option_table # 'option_index' maps long option names to entries w the option # table (ie. those 3-tuples). self.option_index = {} jeżeli self.option_table: self._build_index() # 'alias' records (duh) alias options; {'foo': 'bar'} means # --foo jest an alias dla --bar self.alias = {} # 'negative_alias' keeps track of options that are the boolean # opposite of some other option self.negative_alias = {} # These keep track of the information w the option table. We # don't actually populate these structures until we're ready to # parse the command-line, since the 'option_table' dalejed w here # isn't necessarily the final word. self.short_opts = [] self.long_opts = [] self.short2long = {} self.attr_name = {} self.takes_arg = {} # And 'option_order' jest filled up w 'getopt()'; it records the # original order of options (and their values) on the command-line, # but expands short options, converts aliases, etc. self.option_order = [] def _build_index(self): self.option_index.clear() dla option w self.option_table: self.option_index[option[0]] = option def set_option_table(self, option_table): self.option_table = option_table self._build_index() def add_option(self, long_option, short_option=Nic, help_string=Nic): jeżeli long_option w self.option_index: podnieś DistutilsGetoptError( "option conflict: already an option '%s'" % long_option) inaczej: option = (long_option, short_option, help_string) self.option_table.append(option) self.option_index[long_option] = option def has_option(self, long_option): """Return true jeżeli the option table dla this parser has an option przy long name 'long_option'.""" zwróć long_option w self.option_index def get_attr_name(self, long_option): """Translate long option name 'long_option' to the form it has jako an attribute of some object: ie., translate hyphens to underscores.""" zwróć long_option.translate(longopt_xlate) def _check_alias_dict(self, aliases, what): assert isinstance(aliases, dict) dla (alias, opt) w aliases.items(): jeżeli alias nie w self.option_index: podnieś DistutilsGetoptError(("invalid %s '%s': " "option '%s' nie defined") % (what, alias, alias)) jeżeli opt nie w self.option_index: podnieś DistutilsGetoptError(("invalid %s '%s': " "aliased option '%s' nie defined") % (what, alias, opt)) def set_aliases(self, alias): """Set the aliases dla this option parser.""" self._check_alias_dict(alias, "alias") self.alias = alias def set_negative_aliases(self, negative_alias): """Set the negative aliases dla this option parser. 'negative_alias' should be a dictionary mapping option names to option names, both the key oraz value must already be defined w the option table.""" self._check_alias_dict(negative_alias, "negative alias") self.negative_alias = negative_alias def _grok_option_table(self): """Populate the various data structures that keep tabs on the option table. Called by 'getopt()' before it can do anything worthwhile. """ self.long_opts = [] self.short_opts = [] self.short2long.clear() self.repeat = {} dla option w self.option_table: jeżeli len(option) == 3: long, short, help = option repeat = 0 albo_inaczej len(option) == 4: long, short, help, repeat = option inaczej: # the option table jest part of the code, so simply # assert that it jest correct podnieś ValueError("invalid option tuple: %r" % (option,)) # Type- oraz value-check the option names jeżeli nie isinstance(long, str) albo len(long) < 2: podnieś DistutilsGetoptError(("invalid long option '%s': " "must be a string of length >= 2") % long) jeżeli (nie ((short jest Nic) albo (isinstance(short, str) oraz len(short) == 1))): podnieś DistutilsGetoptError("invalid short option '%s': " "must a single character albo Nic" % short) self.repeat[long] = repeat self.long_opts.append(long) jeżeli long[-1] == '=': # option takes an argument? jeżeli short: short = short + ':' long = long[0:-1] self.takes_arg[long] = 1 inaczej: # Is option jest a "negative alias" dla some other option (eg. # "quiet" == "!verbose")? alias_to = self.negative_alias.get(long) jeżeli alias_to jest nie Nic: jeżeli self.takes_arg[alias_to]: podnieś DistutilsGetoptError( "invalid negative alias '%s': " "aliased option '%s' takes a value" % (long, alias_to)) self.long_opts[-1] = long # XXX redundant?! self.takes_arg[long] = 0 # If this jest an alias option, make sure its "takes arg" flag jest # the same jako the option it's aliased to. alias_to = self.alias.get(long) jeżeli alias_to jest nie Nic: jeżeli self.takes_arg[long] != self.takes_arg[alias_to]: podnieś DistutilsGetoptError( "invalid alias '%s': inconsistent przy " "aliased option '%s' (one of them takes a value, " "the other doesn't" % (long, alias_to)) # Now enforce some bondage on the long option name, so we can # later translate it to an attribute name on some object. Have # to do this a bit late to make sure we've removed any trailing # '='. jeżeli nie longopt_re.match(long): podnieś DistutilsGetoptError( "invalid long option name '%s' " "(must be letters, numbers, hyphens only" % long) self.attr_name[long] = self.get_attr_name(long) jeżeli short: self.short_opts.append(short) self.short2long[short[0]] = long def getopt(self, args=Nic, object=Nic): """Parse command-line options w args. Store jako attributes on object. If 'args' jest Nic albo nie supplied, uses 'sys.argv[1:]'. If 'object' jest Nic albo nie supplied, creates a new OptionDummy object, stores option values there, oraz returns a tuple (args, object). If 'object' jest supplied, it jest modified w place oraz 'getopt()' just returns 'args'; w both cases, the returned 'args' jest a modified copy of the dalejed-in 'args' list, which jest left untouched. """ jeżeli args jest Nic: args = sys.argv[1:] jeżeli object jest Nic: object = OptionDummy() created_object = Prawda inaczej: created_object = Nieprawda self._grok_option_table() short_opts = ' '.join(self.short_opts) spróbuj: opts, args = getopt.getopt(args, short_opts, self.long_opts) wyjąwszy getopt.error jako msg: podnieś DistutilsArgError(msg) dla opt, val w opts: jeżeli len(opt) == 2 oraz opt[0] == '-': # it's a short option opt = self.short2long[opt[1]] inaczej: assert len(opt) > 2 oraz opt[:2] == '--' opt = opt[2:] alias = self.alias.get(opt) jeżeli alias: opt = alias jeżeli nie self.takes_arg[opt]: # boolean option? assert val == '', "boolean option can't have value" alias = self.negative_alias.get(opt) jeżeli alias: opt = alias val = 0 inaczej: val = 1 attr = self.attr_name[opt] # The only repeating option at the moment jest 'verbose'. # It has a negative option -q quiet, which should set verbose = 0. jeżeli val oraz self.repeat.get(attr) jest nie Nic: val = getattr(object, attr, 0) + 1 setattr(object, attr, val) self.option_order.append((opt, val)) # dla opts jeżeli created_object: zwróć args, object inaczej: zwróć args def get_option_order(self): """Returns the list of (option, value) tuples processed by the previous run of 'getopt()'. Raises RuntimeError if 'getopt()' hasn't been called yet. """ jeżeli self.option_order jest Nic: podnieś RuntimeError("'getopt()' hasn't been called yet") inaczej: zwróć self.option_order def generate_help(self, header=Nic): """Generate help text (a list of strings, one per suggested line of output) z
<reponame>seawander/nmf_imaging from NonnegMFPy import nmf import numpy as np import os from astropy.io import fits def columnize(data, mask = None): """ Columnize an image or an image cube, excluding the masked out pixels Inputs: data: (n * height * width) or (height * width) mask: height * width Output: columnized: (n_pixel * n) where n_pixel is the number of unmasked pixels """ if len(data.shape) == 2: #indicating we are flattending an image rather than a cube. if mask is None: mask = np.ones(data.shape) mask[mask < 0.9] = 0 mask[mask != 0] = 1 #clean the mask mask_flt = mask.flatten() data_flt = data.flatten() columnized = np.zeros((int(np.prod(data.shape)-np.prod(mask.shape)+np.nansum(mask)), 1)) columnized[:, 0] = data_flt[mask_flt == 1] return columnized elif len(data.shape) == 3: #indicating we are vectorizing an image cube if mask is None: mask = np.ones(data.shape[1:]) mask[mask < 0.9] = 0 mask[mask != 0] = 1 #clean the mask mask_flt = mask.flatten() columnized = np.zeros((int(np.prod(data.shape[1:])-np.prod(mask.shape)+np.nansum(mask)), data.shape[0])) for i in range(data.shape[0]): data_flt = data[i].flatten() columnized[:, i] = data_flt[mask_flt == 1] return columnized def decolumnize(data, mask): """Decolumize either the components or the modelling result. i.e., to an image! data: NMF components or modelling result mask: must be given to restore the proper shape """ mask_flatten = mask.flatten() if (len(data.shape) == 1) or (data.shape[1] == 1): #single column to decolumnize mask_flatten[np.where(mask_flatten == 1)] = data.flatten() return mask_flatten.reshape(mask.shape) else: #several columns to decolumnize result = np.zeros((data.shape[1], mask.shape[0], mask.shape[1])) for i in range(data.shape[1]): results_flatten = np.copy(mask_flatten) results_flatten[np.where(mask_flatten == 1)] = data[:, i] result[i] = results_flatten.reshape(mask.shape) return result def NMFcomponents(ref, ref_err = None, mask = None, n_components = None, maxiters = 1e3, oneByOne = False, path_save = None): """ref and ref_err should be (n * height * width) where n is the number of references. Mask is the region we are interested in. if mask is a 3D array (binary, 0 and 1), then you can mask out different regions in the ref. if path_save is provided, then the code will star from there. """ if ref_err is None: ref_err = np.sqrt(ref) if mask is None: mask = np.ones(ref.shape[1:]) if (n_components is None) or (n_components > ref.shape[0]): n_components = ref.shape[0] mask[mask < 0.9] = 0 mask[mask != 0] = 1 ref[ref < 0] = 0 ref_err[ref <= 0] = np.nanpercentile(ref_err, 95)*10 #Setting the err of <= 0 pixels to be max error to reduce their impact if len(mask.shape) == 2: ref[np.isnan(ref)] = 0 ref[~np.isfinite(ref)] = 0 ref_err[ref <= 0] = np.nanpercentile(ref_err, 95)*10 #handling bad values in 2D mask case ref_columnized = columnize(ref, mask = mask) ref_err_columnized = columnize(ref_err, mask = mask) elif len(mask.shape) == 3: # ADI data imputation case, or the case where some regions must be masked out mask[ref <= 0] = 0 mask[np.isnan(ref)] = 0 mask[~np.isfinite(ref)] = 0 # handling bad values in 3D mask case mask_mark = np.nansum(mask, axis = 0) # This new mask is used to identify the regions that are masked out in all refs mask_mark[mask_mark != 0] = 1 # 1 means that there is coverage in at least one of the refs ref_columnized = columnize(ref, mask = mask_mark) ref_err_columnized = columnize(ref_err, mask = mask_mark) mask_columnized = np.array(columnize(mask, mask = mask_mark), dtype = bool) components_column = 0 if not oneByOne: print("Building components NOT one by one... If you want the one-by-one method (suggested), please set oneByOne = True.") if len(mask.shape) == 2: g_img = nmf.NMF(ref_columnized, V=1.0/ref_err_columnized**2, n_components=n_components) chi2, time_used = g_img.SolveNMF(maxiters=maxiters) components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components components = decolumnize(components_column, mask = mask) elif len(mask.shape) == 3: # different missing data at different references. g_img = nmf.NMF(ref_columnized, V=1.0/ref_err_columnized**2, M = mask_columnized, n_components=n_components) chi2, time_used = g_img.SolveNMF(maxiters=maxiters) components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components components = decolumnize(components_column, mask = mask_mark) # ignore the regions that are commonly masked out in all refs #The above line is changed on 2021-11-12 PST to speed up calculation for i in range(components.shape[0]): components[i][np.where(mask_mark == 0)] = np.nan components = (components.T/np.sqrt(np.nansum(components**2, axis = (1, 2))).T).T else: print("Building components one by one...") if len(mask.shape) == 2: if path_save is None: for i in range(n_components): print("\t" + str(i+1) + " of " + str(n_components)) n = i + 1 if (i == 0): g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, n_components= n) else: W_ini = np.random.rand(ref_columnized.shape[0], n) W_ini[:, :(n-1)] = np.copy(g_img.W) W_ini = np.array(W_ini, order = 'F') #Fortran ordering, column elements contiguous in memory. H_ini = np.random.rand(n, ref_columnized.shape[1]) H_ini[:(n-1), :] = np.copy(g_img.H) H_ini = np.array(H_ini, order = 'C') #C ordering, row elements contiguous in memory. g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, W = W_ini, H = H_ini, n_components= n) chi2 = g_img.SolveNMF(maxiters=maxiters) components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components else: print('\t path_save provided, you might want to load data and continue previous component calculation') print('\t\t loading from ' + path_save + '_comp.fits for components.') if not os.path.exists(path_save + '_comp.fits'): print('\t\t ' + path_save + '_comp.fits does not exist, calculating from scratch.') for i in range(n_components): print("\t" + str(i+1) + " of " + str(n_components)) n = i + 1 if (i == 0): g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, n_components= n) else: W_ini = np.random.rand(ref_columnized.shape[0], n) W_ini[:, :(n-1)] = np.copy(g_img.W) W_ini = np.array(W_ini, order = 'F') #Fortran ordering, column elements contiguous in memory. H_ini = np.random.rand(n, ref_columnized.shape[1]) H_ini[:(n-1), :] = np.copy(g_img.H) H_ini = np.array(H_ini, order = 'C') #C ordering, row elements contiguous in memory. g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, W = W_ini, H = H_ini, n_components= n) chi2 = g_img.SolveNMF(maxiters=maxiters) print('\t\t\t Calculation for ' + str(n) + ' components done, overwriting raw 2D component matrix at ' + path_save + '_comp.fits') fits.writeto(path_save + '_comp.fits', g_img.W, overwrite = True) print('\t\t\t Calculation for ' + str(n) + ' components done, overwriting raw 2D coefficient matrix at ' + path_save + '_coef.fits') fits.writeto(path_save + '_coef.fits', g_img.H, overwrite = True) components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components else: W_assign = fits.getdata(path_save + '_comp.fits') H_assign = fits.getdata(path_save + '_coef.fits') if W_assign.shape[1] >= n_components: print('You have already had ' + str(W_assign.shape[1]) + ' components while asking for ' + str(n_components) + '. Returning to your input.') components_column = W_assign/np.sqrt(np.nansum(W_assign**2, axis = 0)) components = decolumnize(components_column, mask = mask) else: print('You are asking for ' + str(n_components) + ' components. Building the rest based on the ' + str(W_assign.shape[1]) + ' provided.') for i in range(W_assign.shape[1], n_components): print("\t" + str(i+1) + " of " + str(n_components)) n = i + 1 if (i == W_assign.shape[1]): W_ini = np.random.rand(ref_columnized.shape[0], n) W_ini[:, :(n-1)] = np.copy(W_assign) W_ini = np.array(W_ini, order = 'F') #Fortran ordering, column elements contiguous in memory. H_ini = np.random.rand(n, ref_columnized.shape[1]) H_ini[:(n-1), :] = np.copy(H_assign) H_ini = np.array(H_ini, order = 'C') #C ordering, row elements contiguous in memory. g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, W = W_ini, H = H_ini, n_components= n) else: W_ini = np.random.rand(ref_columnized.shape[0], n) W_ini[:, :(n-1)] = np.copy(g_img.W) W_ini = np.array(W_ini, order = 'F') #Fortran ordering, column elements contiguous in memory. H_ini = np.random.rand(n, ref_columnized.shape[1]) H_ini[:(n-1), :] = np.copy(g_img.H) H_ini = np.array(H_ini, order = 'C') #C ordering, row elements contiguous in memory. g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, W = W_ini, H = H_ini, n_components= n) chi2 = g_img.SolveNMF(maxiters=maxiters) print('\t\t\t Calculation for ' + str(n) + ' components done, overwriting raw 2D component matrix at ' + path_save + '_comp.fits') fits.writeto(path_save + '_comp.fits', g_img.W, overwrite = True) print('\t\t\t Calculation for ' + str(n) + ' components done, overwriting raw 2D coefficient matrix at ' + path_save + '_coef.fits') fits.writeto(path_save + '_coef.fits', g_img.H, overwrite = True) components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components components = decolumnize(components_column, mask = mask) elif len(mask.shape) == 3: #
if board.arBoard[tcord] == EMPTY: if (color == BLACK and 2 * 8 <= tcord < 3 * 8) or ( color == WHITE and 5 * 8 <= tcord < 6 * 8 ): flag = ENPASSANT else: raise ParsingError( san, _("pawn capture without target piece is invalid"), board.asFen(), ) else: if not notat[-2:] in cordDic: raise ParsingError( san, _("the end cord (%s) is incorrect") % notat[-2:], board.asFen() ) tcord = cordDic[notat[-2:]] notat = notat[:-2] # In suicide promoting to king is valid, so # more than 1 king per side can exist ! if ( board.variant != SUICIDECHESS and board.variant != GIVEAWAYCHESS and piece == KING ): return newMove(board.kings[color], tcord, flag) # If there is any extra location info, like in the move Bexd1 or Nh3f4 we # want to know frank = None ffile = None if notat and notat[0] in reprRank: frank = int(notat[0]) - 1 notat = notat[1:] if notat and notat[0] in reprFile: ffile = ord(notat[0]) - ord("a") notat = notat[1:] if notat and notat[0] in reprRank: frank = int(notat[0]) - 1 notat = notat[1:] # we know all we want return newMove(frank * 8 + ffile, tcord, flag) if piece == PAWN: if (ffile is not None) and ffile != FILE(tcord): # capture if color == WHITE: fcord = tcord - 7 if ffile > FILE(tcord) else tcord - 9 else: fcord = tcord + 7 if ffile < FILE(tcord) else tcord + 9 else: if color == WHITE: pawns = board.boards[WHITE][PAWN] # In horde white pawns on first rank may move two squares also if ( board.variant == HORDECHESS and RANK(tcord) == 2 and not (pawns & fileBits[FILE(tcord)] & rankBits[1]) ): fcord = tcord - 16 else: fcord = ( tcord - 16 if RANK(tcord) == 3 and not (pawns & fileBits[FILE(tcord)] & rankBits[2]) else tcord - 8 ) else: pawns = board.boards[BLACK][PAWN] fcord = ( tcord + 16 if RANK(tcord) == 4 and not (pawns & fileBits[FILE(tcord)] & rankBits[5]) else tcord + 8 ) if board.variant == SITTUYINCHESS and flag == QUEEN_PROMOTION: if pawns & fileBits[FILE(tcord)] & rankBits[RANK(tcord)]: # in place promotion return newMove(tcord, tcord, flag) else: # queen move promotion (fcord have to be the closest cord of promotion zone) fcord = sittuyin_promotion_fcord(board, tcord) return newMove(fcord, tcord, flag) return newMove(fcord, tcord, flag) else: if board.pieceCount[color][piece] == 1: # we have only one from this kind if piece, so: fcord = firstBit(board.boards[color][piece]) return newMove(fcord, tcord, flag) else: # We find all pieces who could have done it. (If san was legal, there should # never be more than one) moves = genPieceMoves(board, piece, tcord) if len(moves) == 1: return moves.pop() else: for move in moves: f = FCORD(move) if frank is not None and frank != RANK(f): continue if ffile is not None and ffile != FILE(f): continue board_clone = board.clone() board_clone.applyMove(move) if board_clone.opIsChecked(): continue return move errstring = "no %s is able to move to %s" % (reprPiece[piece], reprCord[tcord]) raise ParsingError(san, errstring, board.asFen()) ################################################################################ # toLan # ################################################################################ def toLAN(board, move, localRepr=False): """ Returns a Long/Expanded Algebraic Notation string of a move board should be prior to the move """ fcord = FCORD(move) tcord = TCORD(move) flag = FLAG(move) fpiece = fcord if flag == DROP else board.arBoard[fcord] s = "" if fpiece != PAWN or flag == DROP: if board.variant in (CAMBODIANCHESS, MAKRUKCHESS): s = reprSignMakruk[fpiece] elif board.variant == SITTUYINCHESS: s = reprSignSittuyin[fpiece] elif localRepr: s = localReprSign[fpiece] else: s = reprSign[fpiece] if flag == DROP: s += "@" else: s += reprCord[FCORD(move)] if board.arBoard[tcord] == EMPTY: s += "-" else: s += "x" s += reprCord[tcord] if flag in PROMOTIONS: s += "=" + reprSign[PROMOTE_PIECE(flag)] return s ################################################################################ # parseLan # ################################################################################ def parseLAN(board, lan): """ Parse a Long/Expanded Algebraic Notation string """ # To parse LAN pawn moves like "e2-e4" as SAN moves, we have to remove a few # fields if len(lan) == 5: if "x" in lan: # e4xd5 -> exd5 return parseSAN(board, lan[0] + lan[3:]) else: # e2-e4 -> e4 return parseSAN(board, lan[3:]) # We want to use the SAN parser for LAN moves like "Nb1-c3" or "Rd3xd7" # The san parser should be able to handle most stuff, as long as we remove # the slash if not lan.upper().startswith("O-O") and not lan.startswith("--"): lan = lan.replace("-", "") return parseSAN(board, lan) ################################################################################ # toAN # ################################################################################ def toAN(board, move, short=False, castleNotation=CASTLE_SAN): """ Returns a Algebraic Notation string of a move board should be prior to the move short -- returns the short variant, e.g. f7f8q rather than f7f8=Q """ fcord = (move >> 6) & 63 tcord = move & 63 flag = move >> 12 if flag in (KING_CASTLE, QUEEN_CASTLE): if castleNotation == CASTLE_SAN: return flag == KING_CASTLE and "O-O" or "O-O-O" elif castleNotation == CASTLE_KR: rooks = board.ini_rooks[board.color] tcord = rooks[flag == KING_CASTLE and 1 or 0] # No treatment needed for CASTLE_KK if flag == DROP: if board.variant == SITTUYINCHESS: s = "%s@%s" % (reprSignSittuyin[fcord], reprCord[tcord]) else: s = "%s@%s" % (reprSign[fcord], reprCord[tcord]) else: s = reprCord[fcord] + reprCord[tcord] if flag in PROMOTIONS: if short: if board.variant in (CAMBODIANCHESS, MAKRUKCHESS): s += reprSignMakruk[PROMOTE_PIECE(flag)].lower() elif board.variant == SITTUYINCHESS: s += reprSignSittuyin[PROMOTE_PIECE(flag)].lower() else: s += reprSign[PROMOTE_PIECE(flag)].lower() else: if board.variant in (CAMBODIANCHESS, MAKRUKCHESS): s += "=" + reprSignMakruk[PROMOTE_PIECE(flag)] elif board.variant == SITTUYINCHESS: s += "=" + reprSignSittuyin[PROMOTE_PIECE(flag)] else: s += "=" + reprSign[PROMOTE_PIECE(flag)] return s ################################################################################ # parseAN # ################################################################################ def parseAN(board, an): """ Parse an Algebraic Notation string """ if not 4 <= len(an) <= 6: raise ParsingError(an, "the move must be 4 or 6 chars long", board.asFen()) if "@" in an: tcord = cordDic[an[-2:]] if an[0].islower(): # Sjeng-ism piece = chr2Sign[an[0]] else: piece = chrU2Sign[an[0]] return newMove(piece, tcord, DROP) try: fcord = cordDic[an[:2]] tcord = cordDic[an[2:4]] except KeyError as e: raise ParsingError(an, "the cord (%s) is incorrect" % e.args[0], board.asFen()) flag = NORMAL_MOVE if len(an) > 4 and not an[-1] in "QRBNMSFqrbnmsf": if ( (board.variant != SUICIDECHESS and board.variant != GIVEAWAYCHESS) or (board.variant == SUICIDECHESS or board.variant == GIVEAWAYCHESS) and not an[-1] in "Kk" ): raise ParsingError(an, "invalid promoted piece", board.asFen()) if len(an) == 5: # The a7a8q variant flag = chr2Sign[an[4].lower()] + 2 elif len(an) == 6: # The a7a8=q variant flag = chr2Sign[an[5].lower()] + 2 elif board.arBoard[fcord] == KING: if fcord - tcord == 2: flag = QUEEN_CASTLE if board.variant == FISCHERRANDOMCHESS: tcord = board.ini_rooks[board.color][0] elif fcord - tcord == -2: flag = KING_CASTLE if board.variant == FISCHERRANDOMCHESS: tcord = board.ini_rooks[board.color][1] elif board.arBoard[tcord] == ROOK: color = board.color friends = board.friends[color] if bitPosArray[tcord] & friends: if board.ini_rooks[color][0] == tcord: flag = QUEEN_CASTLE else: flag = KING_CASTLE else: flag = NORMAL_MOVE elif ( board.arBoard[fcord] == PAWN and board.arBoard[tcord] == EMPTY and FILE(fcord) != FILE(tcord) and RANK(fcord) != RANK(tcord) ): flag = ENPASSANT elif board.arBoard[fcord] == PAWN: if an[3] in "18" and board.variant != SITTUYINCHESS: flag = QUEEN_PROMOTION return newMove(fcord, tcord, flag) ################################################################################ # toFAN # ################################################################################ san2WhiteFanDic = { ord("K"): FAN_PIECES[WHITE][KING], ord("Q"): FAN_PIECES[WHITE][QUEEN], ord("M"): FAN_PIECES[WHITE][QUEEN], ord("F"): FAN_PIECES[WHITE][QUEEN], ord("R"): FAN_PIECES[WHITE][ROOK], ord("B"): FAN_PIECES[WHITE][BISHOP], ord("S"): FAN_PIECES[WHITE][BISHOP], ord("N"): FAN_PIECES[WHITE][KNIGHT], ord("P"): FAN_PIECES[WHITE][PAWN], ord("+"): "†", ord("#"): "‡", } san2BlackFanDic = { ord("K"): FAN_PIECES[BLACK][KING], ord("Q"): FAN_PIECES[BLACK][QUEEN], ord("M"): FAN_PIECES[BLACK][QUEEN], ord("F"): FAN_PIECES[BLACK][QUEEN], ord("R"): FAN_PIECES[BLACK][ROOK], ord("B"): FAN_PIECES[BLACK][BISHOP], ord("S"): FAN_PIECES[BLACK][BISHOP], ord("N"): FAN_PIECES[BLACK][KNIGHT], ord("P"): FAN_PIECES[BLACK][PAWN], ord("+"): "†", ord("#"): "‡", } def toFAN(board, move): """ Returns a Figurine Algebraic Notation string of a move """ san = toSAN(board, move) return san.translate(san2WhiteFanDic) ################################################################################ # parseFAN # ################################################################################ fan2SanDic = {} for k, v in san2WhiteFanDic.items(): fan2SanDic[ord(v)] = chr(k) for k, v in san2BlackFanDic.items(): fan2SanDic[ord(v)] = chr(k) def parseFAN(board, fan): """ Parse a Figurine Algebraic Notation string """ san = fan.translate(fan2SanDic) return parseSAN(board, san) ################################################################################ # toPolyglot # ################################################################################ def toPolyglot(board, move): """ Returns a 16-bit Polyglot-format move board should be prior to the move """ pg = move & 4095 if FLAG(move) in PROMOTIONS: pg |= (PROMOTE_PIECE(FLAG(move)) - 1) << 12 elif FLAG(move) == QUEEN_CASTLE: pg = (pg & 4032) | board.ini_rooks[board.color][0] elif FLAG(move) == KING_CASTLE: pg = (pg & 4032) |
<gh_stars>1-10 # pylint: disable=all # pylint: disable=missing-docstring # flake8: noqa import os import tempfile import unittest import numpy as np import tensorflow as tf import tf_encrypted as tfe from tf_encrypted.protocol.aby3 import ABY3 from tf_encrypted.protocol.aby3 import ARITHMETIC from tf_encrypted.protocol.aby3 import BOOLEAN class TestABY3(unittest.TestCase): def test_add_private_private(self): tf.reset_default_graph() prot = ABY3() tfe.set_protocol(prot) def provide_input(): return tf.ones(shape=(2, 2)) * 1.3 # define inputs x = tfe.define_private_variable(tf.ones(shape=(2, 2))) y = tfe.define_private_input("input-provider", provide_input) # define computation z = x + y with tfe.Session() as sess: # initialize variables sess.run(tfe.global_variables_initializer()) # reveal result result = sess.run(z.reveal()) # Should be [[2.3, 2.3], [2.3, 2.3]] expected = np.array([[2.3, 2.3], [2.3, 2.3]]) np.testing.assert_allclose(result, expected, rtol=0.0, atol=0.01) def test_add_private_public(self): tf.reset_default_graph() prot = ABY3() tfe.set_protocol(prot) # define inputs x = tfe.define_private_variable(tf.ones(shape=(2, 2))) y = tfe.define_constant(np.array([[0.6, 0.7], [0.8, 0.9]])) # define computation z = x + y z = y + z with tfe.Session() as sess: # initialize variables sess.run(tfe.global_variables_initializer()) # reveal result result = sess.run(z.reveal()) expected = np.array([[2.2, 2.4], [2.6, 2.8]]) np.testing.assert_allclose(result, expected, rtol=0.0, atol=0.01) def test_sub_private_private(self): tf.reset_default_graph() prot = ABY3() tfe.set_protocol(prot) def provide_input(): return tf.ones(shape=(2, 2)) * 1.3 x = tfe.define_private_variable(tf.ones(shape=(2, 2))) y = tfe.define_private_input("input-provider", provide_input) z = x - y with tfe.Session() as sess: # initialize variables sess.run(tfe.global_variables_initializer()) # reveal result result = sess.run(z.reveal()) expected = np.array([[-0.3, -0.3], [-0.3, -0.3]]) np.testing.assert_allclose(result, expected, rtol=0.0, atol=0.01) def test_sub_private_public(self): tf.reset_default_graph() prot = ABY3() tfe.set_protocol(prot) # define inputs x = tfe.define_private_variable(tf.ones(shape=(2, 2))) y = tfe.define_constant(np.array([[0.6, 0.7], [0.8, 0.9]])) # define computation z1 = x - y z2 = y - x with tfe.Session() as sess: # initialize variables sess.run(tfe.global_variables_initializer()) # reveal result result = sess.run(z1.reveal()) z1_exp = np.array([[0.4, 0.3], [0.2, 0.1]]) np.testing.assert_allclose(result, z1_exp, rtol=0.0, atol=0.01) result = sess.run(z2.reveal()) z2_exp = np.array([[-0.4, -0.3], [-0.2, -0.1]]) np.testing.assert_allclose(result, z2_exp, rtol=0.0, atol=0.01) def test_neg(self): tf.reset_default_graph() prot = ABY3() tfe.set_protocol(prot) # define inputs x = tfe.define_private_variable(np.array([[0.6, -0.7], [-0.8, 0.9]])) y = tfe.define_constant(np.array([[0.6, -0.7], [-0.8, 0.9]])) # define computation z1 = -x z2 = -y with tfe.Session() as sess: # initialize variables sess.run(tfe.global_variables_initializer()) # reveal result result = sess.run(z1.reveal()) z1_exp = np.array([[-0.6, 0.7], [0.8, -0.9]]) np.testing.assert_allclose(result, z1_exp, rtol=0.0, atol=0.01) result = sess.run(z2) z2_exp = np.array([[-0.6, 0.7], [0.8, -0.9]]) np.testing.assert_allclose(result, z2_exp, rtol=0.0, atol=0.01) def test_mul_private_public(self): tf.reset_default_graph() prot = ABY3() tfe.set_protocol(prot) # define inputs x = tfe.define_private_variable(tf.ones(shape=(2, 2)) * 2) y = tfe.define_constant(np.array([[0.6, 0.7], [0.8, 0.9]])) w = tfe.define_constant(np.array([[2, 2], [2, 2]])) # define computation z1 = y * x # mul_public_private z2 = z1 * w # mul_private_public with tfe.Session() as sess: # initialize variables sess.run(tfe.global_variables_initializer()) # reveal result result = sess.run(z2.reveal()) np.testing.assert_allclose( result, np.array([[2.4, 2.8], [3.2, 3.6]]), rtol=0.0, atol=0.01 ) def test_mul_private_private(self): tf.reset_default_graph() prot = ABY3() tfe.set_protocol(prot) def provide_input(): # normal TensorFlow operations can be run locally # as part of defining a private input, in this # case on the machine of the input provider return tf.ones(shape=(2, 2)) * 1.3 # define inputs x = tfe.define_private_variable(tf.ones(shape=(2, 2)) * 2) y = tfe.define_private_input("input-provider", provide_input) # define computation z = y * x with tfe.Session() as sess: # initialize variables sess.run(tfe.global_variables_initializer()) # reveal result result = sess.run(z.reveal()) np.testing.assert_allclose( result, np.array([[2.6, 2.6], [2.6, 2.6]]), rtol=0.0, atol=0.01 ) def test_matmul_public_private(self): tf.reset_default_graph() prot = ABY3() tfe.set_protocol(prot) def provide_input(): # normal TensorFlow operations can be run locally # as part of defining a private input, in this # case on the machine of the input provider return tf.constant(np.array([[1.1, 1.2], [1.3, 1.4], [1.5, 1.6]])) # define inputs x = tfe.define_private_variable(tf.ones(shape=(2, 2))) y = tfe.define_public_input("input-provider", provide_input) v = tfe.define_constant(np.ones((2, 2))) # define computation w = y.matmul(x) # matmul_public_private z = w.matmul(v) # matmul_private_public with tfe.Session() as sess: # initialize variables sess.run(tfe.global_variables_initializer()) # reveal result result = sess.run(w.reveal()) np.testing.assert_allclose( result, np.array([[2.3, 2.3], [2.7, 2.7], [3.1, 3.1]]), rtol=0.0, atol=0.01, ) result = sess.run(z.reveal()) np.testing.assert_allclose( result, np.array([[4.6, 4.6], [5.4, 5.4], [6.2, 6.2]]), rtol=0.0, atol=0.01, ) def test_matmul_private_private(self): tf.reset_default_graph() prot = ABY3() tfe.set_protocol(prot) # 2-D matrix mult x = tfe.define_private_variable(tf.constant([[1, 2, 3], [4, 5, 6]])) y = tfe.define_private_variable(tf.constant([[7, 8], [9, 10], [11, 12]])) z = tfe.matmul(x, y) with tfe.Session() as sess: # initialize variables sess.run(tfe.global_variables_initializer()) # reveal result result = sess.run(z.reveal()) np.testing.assert_allclose( result, np.array([[58, 64], [139, 154]]), rtol=0.0, atol=0.01 ) def test_3d_matmul_private(self): tf.reset_default_graph() prot = ABY3() tfe.set_protocol(prot) # 3-D matrix mult x = tfe.define_private_variable(tf.constant(np.arange(1, 13), shape=[2, 2, 3])) y = tfe.define_private_variable(tf.constant(np.arange(13, 25), shape=[2, 3, 2])) z = tfe.matmul(x, y) with tfe.Session() as sess: # initialize variables sess.run(tfe.global_variables_initializer()) # reveal result result = sess.run(z.reveal()) np.testing.assert_allclose( result, np.array([[[94, 100], [229, 244]], [[508, 532], [697, 730]]]), rtol=0.0, atol=0.01, ) def test_boolean_sharing(self): tf.reset_default_graph() prot = ABY3() tfe.set_protocol(prot) x = tfe.define_private_variable( tf.constant([[1, 2, 3], [4, 5, 6]]), share_type=BOOLEAN ) y = tfe.define_private_variable( tf.constant([[7, 8, 9], [10, 11, 12]]), share_type=BOOLEAN ) z1 = tfe.B_xor(x, y) z2 = tfe.B_and(x, y) with tfe.Session() as sess: # initialize variables sess.run(tfe.global_variables_initializer()) # reveal result result = sess.run(z1.reveal()) np.testing.assert_allclose( result, np.array([[6, 10, 10], [14, 14, 10]]), rtol=0.0, atol=0.01 ) result = sess.run(z2.reveal()) np.testing.assert_allclose( result, np.array([[1, 0, 1], [0, 1, 4]]), rtol=0.0, atol=0.01 ) def test_not_private(self): tf.reset_default_graph() prot = ABY3() tfe.set_protocol(prot) x = tfe.define_private_variable( tf.constant([[1, 2, 3], [4, 5, 6]]), share_type=BOOLEAN, apply_scaling=False ) y = tfe.define_private_variable( tf.constant([[1, 0, 0], [0, 1, 0]]), apply_scaling=False, share_type=BOOLEAN, factory=prot.bool_factory, ) z1 = ~x z2 = ~y with tfe.Session() as sess: # initialize variables sess.run(tfe.global_variables_initializer()) # reveal result result = sess.run(z1.reveal()) np.testing.assert_allclose( result, np.array([[-2, -3, -4], [-5, -6, -7]]), rtol=0.0, atol=0.01 ) result = sess.run(z2.reveal()) np.testing.assert_allclose( result, np.array([[0, 1, 1], [1, 0, 1]]), rtol=0.0, atol=0.01 ) def test_native_ppa_sklansky(self): from math import log2 from random import randint n = 10 while n > 0: n = n - 1 x = randint(1, 2 ** 31) y = randint(1, 2 ** 31) keep_masks = [ 0x5555555555555555, 0x3333333333333333, 0x0F0F0F0F0F0F0F0F, 0x00FF00FF00FF00FF, 0x0000FFFF0000FFFF, 0x00000000FFFFFFFF, ] # yapf: disable copy_masks = [ 0x5555555555555555, 0x2222222222222222, 0x0808080808080808, 0x0080008000800080, 0x0000800000008000, 0x0000000080000000, ] # yapf: disable G = x & y P = x ^ y k = 64 for i in range(int(log2(k))): c_mask = copy_masks[i] k_mask = keep_masks[i] # Copy the selected bit to 2^i positions: # For example, when i=2, the 4-th bit is copied to the (5, 6, 7, 8)-th bits G1 = (G & c_mask) << 1 P1 = (P & c_mask) << 1 for j in range(i): G1 = (G1 << (2 ** j)) ^ G1 P1 = (P1 << (2 ** j)) ^ P1 # Two-round impl. using algo. specified in the slides that assume using OR gate is free, but in fact, # here using OR gate cost one round. # The PPA operator 'o' is defined as: # (G, P) o (G1, P1) = (G + P*G1, P*P1), where '+' is OR, '*' is AND # G1 and P1 are 0 for those positions that we do not copy the selected bit to. # Hence for those positions, the result is: (G, P) = (G, P) o (0, 0) = (G, 0). # In order to keep (G, P) for these positions so that they can be used in the future, # we need to let (G1, P1) = (G, P) for these positions, because (G, P) o (G, P) = (G, P) # G1 = G1 ^ (G & k_mask) # P1 = P1 ^ (P & k_mask) # G = G | (P & G1) # P = P & P1 # One-round impl. by modifying the PPA operator 'o' as: # (G, P) o (G1, P1) = (G ^ (P*G1), P*P1), where '^' is XOR, '*' is AND # This is a valid definition: when calculating the carry bit c_i = g_i + p_i * c_{i-1}, # the OR '+' can actually be replaced with XOR '^' because we know g_i and p_i will NOT take '1' # at the same time. # And this PPA operator 'o' is also associative. BUT, it is NOT idempotent, hence (G, P) o (G, P) != (G, P). # This does not matter, because we can do (G, P) o (0, P) =
<reponame>sawway/skia-pathops<filename>tests/pathops_test.py from pathops import ( Path, PathPen, OpenPathError, OpBuilder, PathOp, PathVerb, FillType, bits2float, float2bits, ArcSize, Direction, simplify, ) import pytest class PathTest(object): def test_init(self): path = Path() assert isinstance(path, Path) def test_getPen(self): path = Path() pen = path.getPen() assert isinstance(pen, PathPen) assert id(pen) != id(path.getPen()) def test_eq_operator(self): path1 = Path() path2 = Path() assert path1 == path2 path1.moveTo(0, 0) assert path1 != path2 path2.moveTo(0, 0) assert path1 == path2 path1.fillType = FillType.EVEN_ODD assert path1 != path2 def test_copy(self): path1 = Path() path2 = Path(path1) assert path1 == path2 def test_draw(self): path = Path() pen = path.getPen() pen.moveTo((0, 0)) pen.lineTo((1.0, 2.0)) pen.curveTo((3.5, 4), (5, 6), (7, 8)) pen.qCurveTo((9, 10), (11, 12)) pen.closePath() path2 = Path() path.draw(path2.getPen()) assert path == path2 def test_allow_open_contour(self): path = Path() pen = path.getPen() pen.moveTo((0, 0)) # pen.endPath() is implicit here pen.moveTo((1, 0)) pen.lineTo((1, 1)) pen.curveTo((2, 2), (3, 3), (4, 4)) pen.endPath() assert list(path.segments) == [ ('moveTo', ((0.0, 0.0),)), ('endPath', ()), ('moveTo', ((1.0, 0.0),)), ('lineTo', ((1.0, 1.0),)), ('curveTo', ((2.0, 2.0), (3.0, 3.0), (4.0, 4.0))), ('endPath', ()), ] def test_raise_open_contour_error(self): path = Path() pen = path.getPen(allow_open_paths=False) pen.moveTo((0, 0)) with pytest.raises(OpenPathError): pen.endPath() def test_decompose_join_quadratic_segments(self): path = Path() pen = path.getPen() pen.moveTo((0, 0)) pen.qCurveTo((1, 1), (2, 2), (3, 3)) pen.closePath() items = list(path) assert len(items) == 4 # the TrueType quadratic spline with N off-curves is stored internally # as N atomic quadratic Bezier segments assert items[1][0] == PathVerb.QUAD assert items[1][1] == ((1.0, 1.0), (1.5, 1.5)) assert items[2][0] == PathVerb.QUAD assert items[2][1] == ((2.0, 2.0), (3.0, 3.0)) # when drawn back onto a SegmentPen, the implicit on-curves are omitted assert list(path.segments) == [ ('moveTo', ((0.0, 0.0),)), ('qCurveTo', ((1.0, 1.0), (2.0, 2.0), (3.0, 3.0))), ('closePath', ())] def test_last_implicit_lineTo(self): # https://github.com/fonttools/skia-pathops/issues/6 path = Path() pen = path.getPen() pen.moveTo((100, 100)) pen.lineTo((100, 200)) pen.closePath() assert list(path.segments) == [ ('moveTo', ((100.0, 100.0),)), ('lineTo', ((100.0, 200.0),)), # ('lineTo', ((100.0, 100.0),)), ('closePath', ())] def test_transform(self): path = Path() path.moveTo(125, 376) path.cubicTo(181, 376, 218, 339, 218, 290) path.cubicTo(218, 225, 179, 206, 125, 206) path.close() # t = Transform().rotate(radians(-45)).translate(-100, 0) matrix = (0.707107, -0.707107, 0.707107, 0.707107, -70.7107, 70.7107) result = path.transform(*matrix) expected = Path() expected.moveTo( bits2float(0x438dc663), # 283.55 bits2float(0x437831ce), # 248.195 ) expected.cubicTo( bits2float(0x43a192ee), # 323.148 bits2float(0x435098b8), # 208.597 bits2float(0x43a192ee), # 323.148 bits2float(0x431c454a), # 156.271 bits2float(0x43903ff5), # 288.5 bits2float(0x42f33ead), # 121.622 ) expected.cubicTo( bits2float(0x437289a8), # 242.538 bits2float(0x42975227), # 75.6605 bits2float(0x43498688), # 201.526 bits2float(0x42b39aee), # 89.8026 bits2float(0x4323577c), # 163.342 bits2float(0x42fff906), # 127.986 ) expected.close() result.dump(as_hex=True) assert result == expected def test_pen_addComponent_missing_required_glyphSet(self): path = Path() pen = path.getPen() with pytest.raises(TypeError, match="Missing required glyphSet"): pen.addComponent("a", (1, 0, 0, 1, 0, 0)) def test_pen_addComponent_decomposed_from_glyphSet(self): a = Path() a.moveTo(0, 0) a.lineTo(1, 0) a.lineTo(1, 1) a.lineTo(0, 1) a.close() glyphSet = {"a": a} b = Path() pen = b.getPen(glyphSet=glyphSet) pen.addComponent("a", (2, 0, 0, 2, 10, 10)) glyphSet["b"] = b assert list(b) == [ (PathVerb.MOVE, ((10, 10),)), (PathVerb.LINE, ((12, 10),)), (PathVerb.LINE, ((12, 12),)), (PathVerb.LINE, ((10, 12),)), (PathVerb.CLOSE, ()), ] c = Path() pen = c.getPen(glyphSet=glyphSet) pen.addComponent("a", (1, 0, 0, 1, 2, 2)) pen.addComponent("b", (1, 0, 0, 1, -10, -10)) glyphSet["c"] = c assert list(c) == [ (PathVerb.MOVE, ((2, 2),)), (PathVerb.LINE, ((3, 2),)), (PathVerb.LINE, ((3, 3),)), (PathVerb.LINE, ((2, 3),)), (PathVerb.CLOSE, ()), (PathVerb.MOVE, ((0, 0),)), (PathVerb.LINE, ((2, 0),)), (PathVerb.LINE, ((2, 2),)), (PathVerb.LINE, ((0, 2),)), (PathVerb.CLOSE, ()), ] class OpBuilderTest(object): def test_init(self): builder = OpBuilder() def test_add(self): path = Path() pen = path.getPen() pen.moveTo((5, -225)) pen.lineTo((-225, 7425)) pen.lineTo((7425, 7425)) pen.lineTo((7425, -225)) pen.lineTo((-225, -225)) pen.closePath() builder = OpBuilder() builder.add(path, PathOp.UNION) def test_resolve(self): path1 = Path() pen1 = path1.getPen() pen1.moveTo((5, -225)) pen1.lineTo((-225, 7425)) pen1.lineTo((7425, 7425)) pen1.lineTo((7425, -225)) pen1.lineTo((-225, -225)) pen1.closePath() path2 = Path() pen2 = path2.getPen() pen2.moveTo((5940, 2790)) pen2.lineTo((5940, 2160)) pen2.lineTo((5970, 1980)) pen2.lineTo((5688, 773669888)) pen2.lineTo((5688, 2160)) pen2.lineTo((5688, 2430)) pen2.lineTo((5400, 4590)) pen2.lineTo((5220, 4590)) pen2.lineTo((5220, 4920)) pen2.curveTo((5182.22900390625, 4948.328125), (5160, 4992.78662109375), (5160, 5040.00048828125)) pen2.lineTo((5940, 2790)) pen2.closePath() builder = OpBuilder(fix_winding=False, keep_starting_points=False) builder.add(path1, PathOp.UNION) builder.add(path2, PathOp.UNION) result = builder.resolve() assert list(result.segments) == [ ("moveTo", ((5316.0, 4590.0),)), ("lineTo", ((5220.0, 4590.0),)), ("lineTo", ((5220.0, 4866.92333984375),)), ("lineTo", ((5316.0, 4590.0),)), ("closePath", ()), ("moveTo", ((5192.18701171875, 4947.15283203125),)), ( "curveTo", ( (5171.5654296875, 4973.322265625), (5160.0, 5005.9443359375), (5160.0, 5040.00048828125), ), ), ("lineTo", ((5192.18701171875, 4947.15283203125),)), ("closePath", ()), ("moveTo", ((5688.0, 7425.0),)), ("lineTo", ((-225.0, 7425.0),)), ("lineTo", ((5.0, -225.0),)), ("lineTo", ((7425.0, -225.0),)), ("lineTo", ((7425.0, 7425.0),)), ("lineTo", ((5688.0, 7425.0),)), ("closePath", ()), ] TEST_DATA = [ ( [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.LINE, ((1, 1),)), (PathVerb.LINE, ((2, 2),)), (PathVerb.LINE, ((3, 3),)), (PathVerb.CLOSE, ()), ], [ (PathVerb.MOVE, ((3, 3),)), (PathVerb.LINE, ((2, 2),)), (PathVerb.LINE, ((1, 1),)), (PathVerb.LINE, ((0, 0),)), (PathVerb.CLOSE, ()) ] ), ( [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.LINE, ((1, 1),)), (PathVerb.LINE, ((2, 2),)), (PathVerb.LINE, ((0, 0),)), (PathVerb.CLOSE, ()), ], [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.LINE, ((2, 2),)), (PathVerb.LINE, ((1, 1),)), (PathVerb.LINE, ((0, 0),)), (PathVerb.CLOSE, ()) ] ), ( [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.LINE, ((0, 0),)), (PathVerb.LINE, ((1, 1),)), (PathVerb.LINE, ((2, 2),)), (PathVerb.CLOSE, ()), ], [ (PathVerb.MOVE, ((2, 2),)), (PathVerb.LINE, ((1, 1),)), (PathVerb.LINE, ((0, 0),)), (PathVerb.LINE, ((0, 0),)), (PathVerb.CLOSE, ()), ] ), ( [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.LINE, ((1, 1),)), (PathVerb.CLOSE, ()), ], [ (PathVerb.MOVE, ((1, 1),)), (PathVerb.LINE, ((0, 0),)), (PathVerb.CLOSE, ()), ] ), ( [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.CUBIC, ((1, 1), (2, 2), (3, 3))), (PathVerb.CUBIC, ((4, 4), (5, 5), (0, 0))), (PathVerb.CLOSE, ()), ], [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.CUBIC, ((5, 5), (4, 4), (3, 3))), (PathVerb.CUBIC, ((2, 2), (1, 1), (0, 0))), (PathVerb.CLOSE, ()), ] ), ( [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.CUBIC, ((1, 1), (2, 2), (3, 3))), (PathVerb.CUBIC, ((4, 4), (5, 5), (6, 6))), (PathVerb.CLOSE, ()), ], [ (PathVerb.MOVE, ((6, 6),)), (PathVerb.CUBIC, ((5, 5), (4, 4), (3, 3))), (PathVerb.CUBIC, ((2, 2), (1, 1), (0, 0))), (PathVerb.CLOSE, ()), ] ), ( [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.LINE, ((1, 1),)), (PathVerb.CUBIC, ((2, 2), (3, 3), (4, 4))), (PathVerb.CUBIC, ((5, 5), (6, 6), (7, 7))), (PathVerb.CLOSE, ()), ], [ (PathVerb.MOVE, ((7, 7),)), (PathVerb.CUBIC, ((6, 6), (5, 5), (4, 4))), (PathVerb.CUBIC, ((3, 3), (2, 2), (1, 1))), (PathVerb.LINE, ((0, 0),)), (PathVerb.CLOSE, ()), ] ), ( [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.QUAD, ((1, 1), (2.5, 2.5))), (PathVerb.QUAD, ((3, 3), (0, 0))), (PathVerb.CLOSE, ()), ], [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.QUAD, ((3, 3), (2.5, 2.5))), (PathVerb.QUAD, ((1, 1), (0, 0))), (PathVerb.CLOSE, ()), ] ), ( [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.QUAD, ((1, 1), (2.5, 2.5))), (PathVerb.QUAD, ((3, 3), (4, 4))), (PathVerb.CLOSE, ()), ], [ (PathVerb.MOVE, ((4, 4),)), (PathVerb.QUAD, ((3, 3), (2.5, 2.5))), (PathVerb.QUAD, ((1, 1), (0, 0))), (PathVerb.CLOSE, ()), ] ), ( [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.LINE, ((1, 1),)), (PathVerb.QUAD, ((2, 2), (3, 3))), (PathVerb.CLOSE, ()), ], [ (PathVerb.MOVE, ((3, 3),)), (PathVerb.QUAD, ((2, 2), (1, 1))), (PathVerb.LINE, ((0, 0),)), (PathVerb.CLOSE, ()), ] ), ( [], [] ), ( [ (PathVerb.MOVE, ((0, 0),)), ], [ (PathVerb.MOVE, ((0, 0),)), ], ), ( [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.CLOSE, ()), ], [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.CLOSE, ()), ], ), ( [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.LINE, ((1, 1),)), ], [ (PathVerb.MOVE, ((1, 1),)), (PathVerb.LINE, ((0, 0),)), ] ), ( [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.CUBIC, ((1, 1), (2, 2), (3, 3))), ], [ (PathVerb.MOVE, ((3, 3),)), (PathVerb.CUBIC, ((2, 2), (1, 1), (0, 0))), ] ), ( [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.CUBIC, ((1, 1), (2, 2), (3, 3))), (PathVerb.LINE, ((4, 4),)), ], [ (PathVerb.MOVE, ((4, 4),)), (PathVerb.LINE, ((3, 3),)), (PathVerb.CUBIC, ((2, 2), (1, 1), (0, 0))), ] ), ( [ (PathVerb.MOVE, ((0, 0),)), (PathVerb.LINE, ((1, 1),)), (PathVerb.CUBIC, ((2, 2), (3, 3), (4, 4))), ], [ (PathVerb.MOVE, ((4, 4),)), (PathVerb.CUBIC, ((3, 3), (2, 2), (1, 1))), (PathVerb.LINE, ((0, 0),)), ] ), # Test case from: # https://github.com/googlei18n/cu2qu/issues/51#issue-179370514 ( [ (PathVerb.MOVE, ((848, 348),)), (PathVerb.LINE, ((848, 348),)), # duplicate lineTo point after moveTo (PathVerb.QUAD, ((848, 526), (748.5, 615))), (PathVerb.QUAD, ((649, 704), (449, 704))), (PathVerb.QUAD, ((449, 704), (348.5, 704))), (PathVerb.QUAD, ((248, 704), (149, 615))), (PathVerb.QUAD, ((50, 526), (50, 348))), (PathVerb.LINE, ((50, 348),)), (PathVerb.QUAD, ((50, 348), (50, 259.5))), (PathVerb.QUAD, ((50, 171), (149, 84))), (PathVerb.QUAD, ((248, -3), (449, -3))), (PathVerb.QUAD, ((449, -3), (549, -3))), (PathVerb.QUAD, ((649, -3), (748.5, 84))), (PathVerb.QUAD, ((848, 171), (848, 348))), (PathVerb.CLOSE, ()) ], [ (PathVerb.MOVE, ((848, 348),)), (PathVerb.QUAD, ((848, 171), (748.5, 84))), (PathVerb.QUAD, ((649, -3), (549, -3))), (PathVerb.QUAD, ((449,
+ (8*mckin**6)/mbkin**6 - mckin**8/mbkin**8 - (12*mckin**4*np.log(mckin**2/mbkin**2))/mbkin**4))/(27*mckin) - (640*np.pi**2*(1 - (8*mckin**2)/mbkin**2 + (8*mckin**6)/mbkin**6 - mckin**8/mbkin**8 - (12*mckin**4*np.log(mckin**2/mbkin**2))/mbkin**4))/ 27 - (131072*(1 - mckin/mbkin)**5*(np.log(2) + np.log(1 - mckin/mbkin)))/ 405 + (131072*mbkin*(1 - mckin/mbkin)**5*(np.log(2) + np.log(1 - mckin/mbkin)))/(405*mckin) - (131072*mckin*(1 - mckin/mbkin)**5*(np.log(2) + np.log(1 - mckin/mbkin)))/ (405*mbkin) + (131072*mckin**2*(1 - mckin/mbkin)**5* (np.log(2) + np.log(1 - mckin/mbkin)))/(405*mbkin**2) + (65536*(1 - mckin/mbkin)**6*(np.log(2) + np.log(1 - mckin/mbkin)))/243 - (65536*mbkin*(1 - mckin/mbkin)**6*(np.log(2) + np.log(1 - mckin/mbkin)))/ (405*mckin) + (131072*mckin*(1 - mckin/mbkin)**6* (np.log(2) + np.log(1 - mckin/mbkin)))/(1215*mbkin) - (262144*mckin**2*(1 - mckin/mbkin)**6*(np.log(2) + np.log(1 - mckin/mbkin)))/ (1215*mbkin**2) - (131072*(1 - mckin/mbkin)**7* (np.log(2) + np.log(1 - mckin/mbkin)))/1701 + (131072*mbkin*(1 - mckin/mbkin)**7*(np.log(2) + np.log(1 - mckin/mbkin)))/ (8505*mckin) + (131072*mckin*(1 - mckin/mbkin)**7* (np.log(2) + np.log(1 - mckin/mbkin)))/(8505*mbkin) + (131072*mckin**2*(1 - mckin/mbkin)**7*(np.log(2) + np.log(1 - mckin/mbkin)))/ (2835*mbkin**2) - (65536*(1 - mckin/mbkin)**8* (np.log(2) + np.log(1 - mckin/mbkin)))/2835 + (32768*mbkin*(1 - mckin/mbkin)**8*(np.log(2) + np.log(1 - mckin/mbkin)))/ (945*mckin) - (16384*mckin*(1 - mckin/mbkin)**8* (np.log(2) + np.log(1 - mckin/mbkin)))/(405*mbkin) + (16384*mckin**2*(1 - mckin/mbkin)**8*(np.log(2) + np.log(1 - mckin/mbkin)))/ (567*mbkin**2) - (139264*(1 - mckin/mbkin)**9* (np.log(2) + np.log(1 - mckin/mbkin)))/5103 + (8192*mbkin*(1 - mckin/mbkin)**9*(np.log(2) + np.log(1 - mckin/mbkin)))/ (243*mckin) - (188416*mckin*(1 - mckin/mbkin)**9* (np.log(2) + np.log(1 - mckin/mbkin)))/(5103*mbkin) + (155648*mckin**2*(1 - mckin/mbkin)**9*(np.log(2) + np.log(1 - mckin/mbkin)))/ (5103*mbkin**2) - (782336*(1 - mckin/mbkin)**10* (np.log(2) + np.log(1 - mckin/mbkin)))/25515 + (937984*mbkin*(1 - mckin/mbkin)**10*(np.log(2) + np.log(1 - mckin/mbkin)))/ (25515*mckin) - (1015808*mckin*(1 - mckin/mbkin)**10* (np.log(2) + np.log(1 - mckin/mbkin)))/(25515*mbkin) + (8192*mckin**2*(1 - mckin/mbkin)**10*(np.log(2) + np.log(1 - mckin/mbkin)))/ (243*mbkin**2) - (151552*(1 - mckin/mbkin)**11* (np.log(2) + np.log(1 - mckin/mbkin)))/4455 + (536576*mbkin*(1 - mckin/mbkin)**11*(np.log(2) + np.log(1 - mckin/mbkin)))/ (13365*mckin) - (192512*mckin*(1 - mckin/mbkin)**11* (np.log(2) + np.log(1 - mckin/mbkin)))/(4455*mbkin) + (45056*mckin**2*(1 - mckin/mbkin)**11*(np.log(2) + np.log(1 - mckin/mbkin)))/ (1215*mbkin**2) - (1497088*(1 - mckin/mbkin)**12* (np.log(2) + np.log(1 - mckin/mbkin)))/40095 + (581632*mbkin*(1 - mckin/mbkin)**12*(np.log(2) + np.log(1 - mckin/mbkin)))/ (13365*mckin) - (373760*mckin*(1 - mckin/mbkin)**12* (np.log(2) + np.log(1 - mckin/mbkin)))/(8019*mbkin) + (1620992*mckin**2*(1 - mckin/mbkin)**12*(np.log(2) + np.log(1 - mckin/mbkin)))/(40095*mbkin**2) - (21173248*(1 - mckin/mbkin)**13*(np.log(2) + np.log(1 - mckin/mbkin)))/ 521235 + (24415232*mbkin*(1 - mckin/mbkin)**13* (np.log(2) + np.log(1 - mckin/mbkin)))/(521235*mckin) - (26036224*mckin*(1 - mckin/mbkin)**13*(np.log(2) + np.log(1 - mckin/mbkin)))/(521235*mbkin) + (1519616*mckin**2*(1 - mckin/mbkin)**13*(np.log(2) + np.log(1 - mckin/mbkin)))/(34749*mbkin**2) - (395264*(1 - mckin/mbkin)**14*(np.log(2) + np.log(1 - mckin/mbkin)))/9009 + (12191744*mbkin*(1 - mckin/mbkin)**14*(np.log(2) + np.log(1 - mckin/mbkin)))/(243243*mckin) - (4317184*mckin*(1 - mckin/mbkin)**14*(np.log(2) + np.log(1 - mckin/mbkin)))/ (81081*mbkin) + (11431936*mckin**2*(1 - mckin/mbkin)**14* (np.log(2) + np.log(1 - mckin/mbkin)))/(243243*mbkin**2) - (171870208*(1 - mckin/mbkin)**15*(np.log(2) + np.log(1 - mckin/mbkin)))/ 3648645 + (4327424*mbkin*(1 - mckin/mbkin)**15* (np.log(2) + np.log(1 - mckin/mbkin)))/(81081*mckin) - (29452288*mckin*(1 - mckin/mbkin)**15*(np.log(2) + np.log(1 - mckin/mbkin)))/(521235*mbkin) + (183302144*mckin**2*(1 - mckin/mbkin)**15*(np.log(2) + np.log(1 - mckin/mbkin)))/(3648645*mbkin**2) - (14123008*(1 - mckin/mbkin)**16*(np.log(2) + np.log(1 - mckin/mbkin)))/ 280665 + (29501696*mbkin*(1 - mckin/mbkin)**16* (np.log(2) + np.log(1 - mckin/mbkin)))/(521235*mckin) - (19815296*mckin*(1 - mckin/mbkin)**16*(np.log(2) + np.log(1 - mckin/mbkin)))/(331695*mbkin) + (21672832*mckin**2*(1 - mckin/mbkin)**16*(np.log(2) + np.log(1 - mckin/mbkin)))/(405405*mbkin**2) - (368870272*(1 - mckin/mbkin)**17*(np.log(2) + np.log(1 - mckin/mbkin)))/ 6891885 + (12491392*mbkin*(1 - mckin/mbkin)**17* (np.log(2) + np.log(1 - mckin/mbkin)))/(208845*mckin) - (433888768*mckin*(1 - mckin/mbkin)**17*(np.log(2) + np.log(1 - mckin/mbkin)))/(6891885*mbkin) + (55791872*mckin**2*(1 - mckin/mbkin)**17*(np.log(2) + np.log(1 - mckin/mbkin)))/(984555*mbkin**2) - (502561024*(1 - mckin/mbkin)**18*(np.log(2) + np.log(1 - mckin/mbkin)))/ 8860995 + (10948096*mbkin*(1 - mckin/mbkin)**18* (np.log(2) + np.log(1 - mckin/mbkin)))/(173745*mckin) - (45096064*mckin*(1 - mckin/mbkin)**18*(np.log(2) + np.log(1 - mckin/mbkin)))/(681615*mbkin) + (9644672*mckin**2*(1 - mckin/mbkin)**18*(np.log(2) + np.log(1 - mckin/mbkin)))/(161109*mbkin**2) + (9644672*(1 - mckin/mbkin)**19*(np.log(2) + np.log(1 - mckin/mbkin)))/ 3061071 + (9644672*mbkin*(1 - mckin/mbkin)**19* (np.log(2) + np.log(1 - mckin/mbkin)))/(3061071*mckin) - (19289344*mckin*(1 - mckin/mbkin)**19*(np.log(2) + np.log(1 - mckin/mbkin)))/(3061071*mbkin) + (327680*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/1701 - (65536*mbkin*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(1701*mckin) - (65536*mckin*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(243*mbkin) - (65536*mckin**2*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/ (243*mbkin**2) + (327680*mckin**3*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(243*mbkin**3) - (458752*mckin**4*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/ (243*mbkin**4) + (327680*mckin**5*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(243*mbkin**5) - (851968*mckin**6*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/ (1701*mbkin**6) + (131072*mckin**7*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(1701*mbkin**7) - (65536*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/567 + (32768*mbkin*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/(1701*mckin) + (131072*mckin*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/(567*mbkin) - (65536*mckin**3*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/ (81*mbkin**3) + (131072*mckin**4*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/(81*mbkin**4) - (131072*mckin**5*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/ (81*mbkin**5) + (524288*mckin**6*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/(567*mbkin**6) - (163840*mckin**7*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/ (567*mbkin**7) + (65536*mckin**8*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/(1701*mbkin**8) + (16384*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/729 - (16384*mbkin*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/(5103*mckin) - (32768*mckin*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/(567*mbkin) + (65536*mckin**2*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/ (1701*mbkin**2) + (32768*mckin**3*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/(243*mbkin**3) - (32768*mckin**4*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/ (81*mbkin**4) + (131072*mckin**5*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/(243*mbkin**5) - (720896*mckin**6*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/ (1701*mbkin**6) + (16384*mckin**7*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/(81*mbkin**7) - (278528*mckin**8*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/ (5103*mbkin**8) + (32768*mckin**9*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/(5103*mbkin**9) + (65536*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/5103 - (8192*mbkin*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/ (5103*mckin) - (204800*mckin*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/(5103*mbkin) + (81920*mckin**2*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/ (1701*mbkin**2) + (81920*mckin**3*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/(1701*mbkin**3) - (65536*mckin**4*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/ (243*mbkin**4) + (114688*mckin**5*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/(243*mbkin**5) - (819200*mckin**6*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/ (1701*mbkin**6) + (532480*mckin**7*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/(1701*mbkin**7) - (655360*mckin**8*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/ (5103*mbkin**8) + (155648*mckin**9*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/(5103*mbkin**9) - (16384*mckin**10*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/ (5103*mbkin**10) + (77824*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/ 6237 - (77824*mbkin*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/ (56133*mckin) - (77824*mckin*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/(1701*mbkin) + (389120*mckin**2*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/ (5103*mbkin**2) - (155648*mckin**4*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/(567*mbkin**4) + (155648*mckin**5*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/ (243*mbkin**5) - (155648*mckin**6*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/(189*mbkin**6) + (389120*mckin**7*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/ (567*mbkin**7) - (1945600*mckin**8*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/(5103*mbkin**8) + (77824*mckin**9*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/ (567*mbkin**9) - (77824*mckin**10*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/(2673*mbkin**10) + (155648*mckin**11*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/ (56133*mbkin**11) + (102400*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/8019 - (10240*mbkin*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/ (8019*mckin) - (143360*mckin*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/(2673*mbkin) + (81920*mckin**2*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/ (729*mbkin**2) - (51200*mckin**3*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/(729*mbkin**3) - (20480*mckin**4*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/ (81*mbkin**4) + (204800*mckin**5*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/(243*mbkin**5) - (327680*mckin**6*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/ (243*mbkin**6) + (112640*mckin**7*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/(81*mbkin**7) - (716800*mckin**8*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/ (729*mbkin**8) + (348160*mckin**9*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/(729*mbkin**9) - (409600*mckin**10*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/ (2673*mbkin**10) + (235520*mckin**11*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/(8019*mbkin**11) - (20480*mckin**12*(1 + 12*np.log(2) + 12*np.log(1 - mckin/mbkin)))/ (8019*mbkin**12) + (123904*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/9477 - (11264*mbkin*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/ (9477*mckin) - (45056*mckin*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/(729*mbkin) + (112640*mckin**2*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/ (729*mbkin**2) - (123904*mckin**3*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/(729*mbkin**3) - (123904*mckin**4*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/ (729*mbkin**4) + (247808*mckin**5*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/(243*mbkin**5) - (495616*mckin**6*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/ (243*mbkin**6) + (619520*mckin**7*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/(243*mbkin**7) - (1610752*mckin**8*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/ (729*mbkin**8) + (991232*mckin**9*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/(729*mbkin**9) - (428032*mckin**10*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/ (729*mbkin**10) + (123904*mckin**11*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/(729*mbkin**11) - (281600*mckin**12*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/ (9477*mbkin**12) + (22528*mckin**13*(1 + 13*np.log(2) + 13*np.log(1 - mckin/mbkin)))/(9477*mbkin**13) + (3241984*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/243243 - (810496*mbkin*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/ (729729*mckin) - (810496*mckin*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/(11583*mbkin) + (1620992*mckin**2*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/ (8019*mbkin**2) - (810496*mckin**3*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/(2673*mbkin**3) + (810496*mckin**5*(1 + 14*np.log(2) + 14*np.log(1 - mckin/mbkin)))/ (729*mbkin**5) - (1620992*mckin**6*(1 + 14*np.log(2)
target_ip, starting_port, ending_port): #Default ports file reading port_read_file = open('default_port/default_ports.txt', 'r') some_read_port = port_read_file.read().split() port_read_file.close() #Default ports banner reading banner_read_file = open('default_port/default_port_version.txt','r') some_read_banner = banner_read_file.read().split() banner_read_file.close() search_prefix = target_ip.find('/') if search_prefix != -1: #interrupt signal check try: try: #Create open port list result_open_port = [] #create open port on ip list result_open_ip = [] #port scan choose for b_port in range(starting_port, ending_port): #ports scan on target subnet for dst_ip in ipcalc.Network(target_ip): sock_b = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result_b = sock_b.connect_ex((str(dst_ip), b_port)) if 0 == result_b: result_open_port.append(str(b_port)) result_open_ip.append(str(dst_ip)) if not result_open_port: print(Fore.GREEN+" "*20+"************** RESULT **************") print(Fore.RED+" "*14+"[-] NO FOUND OPEN PORT ON " +str(target_ip)+ " TARGET SUBNET [-]") else: #create match list match_list_b = [] print(Fore.GREEN+" "*20+"*************** RESULT ***************") #open port banner match for open_port in result_open_port: #check list if str(open_port) in some_read_port: match_list_b.append(open_port) for s_match in range(0, len(match_list_b)): if str(match_list_b[s_match]) in some_read_port: for r_p, r_b in zip(some_read_port, some_read_banner): if str(match_list_b[s_match]) in str(r_p): print(Fore.BLUE+" "*19+str(result_open_ip[s_match])+"==>"+" [+] {} {} PORT IS OPEN [+]".format(match_list_b[s_match], r_b)) else: print(Fore.BLUE+" "*19+str(result_open_ip[s_match])+"==>"+" {} UNKNOW PORT IS OPEN [+]".format(match_list_b[s_match])) except: print(Fore.RED+" "*20+"[-]THERE IS A PROBLEM ON SCAN [-]") sys.exit(0) except KeyboardInterrupt: #success exit sys.exit(0) else: #Create open port list sing_open_port = [] #interrupt signal try: #check process #try: #start and end port beetween scan loop for sing_port in range(starting_port, ending_port): sing_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #scan on target ip result_sing = sing_sock.connect_ex((target_ip, sing_port)) if 0 == result_sing: sing_open_port.append(sing_port) if not sing_open_port: print(Fore.RED+" "*15+"[-] NO OPEN PORT FOUND IN THE SCAN RESULT [-]") else: #create single match list sing_match_list = [] print(Fore.GREEN+" "*20+"*************** RESULT ***************") for s_open in sing_open_port: if str(s_open) in some_read_port: sing_match_list.append(s_open) for sing_match in range(0, len(sing_match_list)): if str(sing_match_list[sing_match]) in some_read_port: for s_p, s_b in zip(some_read_port, some_read_banner): if str(sing_match_list[sing_match]) in str(s_p): print(Fore.BLUE+" "*19+str(target_ip)+"==>"+"[+] {} {} PORT IS OPEN [+]".format(sing_match_list[sing_match], s_b)) else: print(Fore.BLUE+" "*19+str(target_ip)+"==>"+"[+] {} UNKNOW PORT IS OPEN [+]".format(sing_match_list[sing_match])) #except: # print(Fore.RED+" "*20+"[-]THERE IS A PROBLEM ON SCAN [-]") # sys.exit(0) except KeyboardInterrupt: sys.exit(0) #Ack scan on firewall class FIREWALL_SCAN_OPTIONS: #Default ports reading port_file = open('default_port/default_tcp_ports.txt', 'r') read_ports = port_file.read().split() port_file.close() #Default banners reading banner_file = open('default_port/default_port_version.txt', 'r') read_banner = banner_file.read().split() banner_file.close() #ack scan function def ack_scan_option(self, target_ip, port): #Create random source port for tcp packet src_port = RandShort() #control process try: print(Fore.GREEN+" "*20+"*************** RESULT ***************") #create ack packet ack_packet = sr1(IP(dst=target_ip)/TCP(dport=port, flags="A"),verbose=0, timeout=10) if (str(type(ack_packet)) == "<type 'NoneType'>"): print(Fore.BLUE+" "*18+str(target_ip)+" "+str(port)+" ==>"+" [*] FILTERED FROM FIREWALL [*]") elif (ack_packet.haslayer(TCP)): if (ack_packet.getlayer(TCP).flags == 0x4): print(Fore.BLUE+" "*18+str(target_ip)+" "+str(port)+" ==>"+" [*] UNFILTERED OR NO FIREWALL [*]") elif (ack_packet.haslayer(ICMP)): if(int(ack_packet.getlayer(ICMP).type) == 3 and int(ack_packet.getlayer(ICMP).code in[1,2,3,9,10,13])): print(Fore.BLUE+" "*18+str(target_ip)+" "+str(port)+" ==>"+" [*] FILTERED FROM FIREWALL [*]") except: print(Fore.RED+" "*20+"[!] A TCP PROBLEM OCCURED [!]") sys.exit(0) def ack_scan_option_default(self, target_ip): #Create source port src_port = RandShort() #Create int value ports list int_ports = [] #Create filtered port list filtered_port = [] #Create unfiltered port list unfiltered_port = [] #Create could be filtered list c_filtered_port = [] int_ports = list(map(int, self.read_ports)) try: ## print(Fore.GREEN+" "*20+"*************** RESULT ***************") for dst_port in int_ports: try: ack_packet = sr1(IP(dst=target_ip)/TCP(dport=dst_port, flags="A"), verbose=0, timeout=10) if (str(type(ack_packet)) == "<type 'NoneType'>"): filtered_port.append(str(dst_port)) elif (ack_packet.haslayer(TCP)): if(ack_packet.getlayer(TCP).flags == 0x4): unfiltered_port.append(str(dst_port)) elif(ack_packet.haslayer(ICMP)): if(int(ack_packet.getlayer(ICMP).type) == 3 and int(ack_packet.getlayer(ICMP).code in[1,2,3,9,10,13])): c_filtered_port.append(str(dst_port)) except: pass if filtered_port != None: for filter_port in filtered_port: print(Fore.BLUE+" "*18+str(target_ip)+" "+str(filter_port)+" ==>"+" [*]FILTERED FROM FIREWALL [*]") print('') if unfiltered_port != None: for unfilter_port in unfiltered_port: print(Fore.BLUE+" "*18+str(target_ip)+" "+str(unfilter_port)+" ==>"+" [*]UNFILTERED OR NO FIREWALL[*]") if c_filtered_port != None: for c_filtered in c_filtered_port: print(Fore.BLUE+" "*18+str(target_ip)+" "+str(c_filtered)+" ==>"+" [*] FILTERED FROM FIREWALL [*]") except: print(Fore.RED+" "*20+"[!] A TCP PROBLEM OCCURED [!]") sys.exit(0) def null_scan_option(self, target_ip, port): #Create source port src_port = RandShort() #check process print(Fore.GREEN+" "*20+"*************** RESULT ***************") try: Tcp_null_packet = sr1(IP(dst=target_ip)/TCP(dport=port,flags=""), verbose=0, timeout=10) if (str(type(Tcp_null_packet)) == "<type 'NoneType'>"): print(Fore.BLUE+" "*18+str(target_ip)+" "+str(port)+" ==>"+"[*] PORT IS OPEN|FILTERED [*]") elif(Tcp_null_packet.haslayer(TCP)): if(Tcp_null_packet.getlayer(TCP).flags == 0x4): print(Fore.BLUE+" "*18+str(target_ip)+" "+str(port)+" ==>"+"[*] PORT IS CLOSED [*]") elif(Tcp_null_packet.haslayer(ICMP)): if(int(Tcp_null_packet.getlayer(ICMP).type)==3 and int(Tcp_null_packet.getlayer(ICMP).code) in [1,2,3,9,10,13]): print(Fore.BLUE+" "*18+str(target_ip)+" "+str(port)+" ==>"+"[*] PORT IS FILTERED [*]") except: print(Fore.RED+" "*20+"[!] A TCP PROBLEM OCCURED [!]") sys.exit(0) def null_scan_option_default(self, target_ip): #Create source port src_port = RandShort() #Create int value port list int_ports_default = [] #Create filtered port list open_or_filtered = [] #Create close port list close_ports = [] #Create filtered ports filtered_ports = [] int_ports_default = list(map(int, self.read_ports)) try: ## print(Fore.GREEN+" "*20+"*************** RESULT ***************") for dst_port in int_ports_default: #check process Tcp_null_packet = sr1(IP(dst=target_ip)/TCP(dport=dst_port, flags=""), verbose=0, timeout=10) try: if (str(type(Tcp_null_packet)) == "<type 'NoneType'>"): open_or_filtered.append(str(dst_port)) elif(Tcp_null_packet.haslayer(TCP)): if(Tcp_null_packet.getlayer(TCP).flags == 0x4): close_ports.append(str(dst_port)) elif(Tcp_null_packet.haslayer(ICMP)): if(int(Tcp_null_packet.getlayer(ICMP).type)==3 and int(Tcp_null_packet.getlayer(ICMP).code) in [1,2,3,9,10,13]): filtered_ports.append(str(dst_port)) except: pass if open_or_filtered != None: for filter_op in open_or_filtered: print(Fore.BLUE+" "*18+str(target_ip)+" "+str(filter_op)+" ==>"+"[*]PORT IS OPEN|FILTERED [*]") if close_ports != None: for close_port in close_ports: print(Fore.BLUE+" "*18+str(target_ip)+" "+str(filter_op)+" ==>"+"[*]PORT IS CLOSED [*]") if filtered_ports != None: for filter_port in filtered_ports: print(Fore.BLUE+" "*18+str(target_ip)+" "+str(filter_port)+" ==>"+"[*]PORT IS FILTERED [*]") except: print(Fore.RED+" "*20+"[!] A TCP PROBLEM OCCURED [!]") sys.exit(0) def xmas_scan_option(self, target_ip, port): #Create source port src_port = RandShort() print(Fore.GREEN+" "*20+"*************** RESULT ***************") try: #create tcp packet(send and receive) Tcp_xmas_packet = sr1(IP(dst=target_ip)/TCP(dport=port, flags="FPU"),verbose=0, timeout=10) if (str(type(Tcp_xmas_packet)) == "<type 'NoneType'>"): print(Fore.BLUE+" "*18+str(target_ip)+" "+str(port)+" ==>"+"[*]PORT IS OPEN|FILTERED [*]") elif(Tcp_xmas_packet.getlayer(TCP).flags == 0x14): print(Fore.BLUE+" "*18+str(target_ip)+" "+str(port)+" ==>"+"[*]PORT IS CLOSED [*]") elif(Tcp_xmas_packet.haslayer(ICMP)): if(int(Tcp_xmas_packet.getlayer(ICMP).type) == 3 and int(Tcp_xmas_packet.getlayer(ICMP).code) in [1,2,3,9,10,13]): print(Fore.BLUE+" "*18+str(target_ip)+" "+str(port)+" ==>"+"[*]PORT IS FILTERED [*]") except: print(Fore.RED+" "*20+"[!] A TCP PROBLEM OCCURED [!]") sys.exit(0) def xmas_scan_option_default(self, target_ip): #Create source port src_port = RandShort() #Create open or filtered list open_or_filtered = [] #Create close port close_ports = [] #Create filtered_ports filtered_ports = [] int_ports_default = list(map(int, self.read_ports)) try: ## print(Fore.GREEN+" "*20+"*************** RESULT ***************") try: for dst_port in int_ports_default: Tcp_xmas_packet = sr1(IP(dst=target_ip)/TCP(dport=dst_port, flags="FPU"), verbose=0, timeout=10) if (str(type(Tcp_xmas_packet)) == "<type 'NoneType'>"): open_or_filtered.append(str(dst_port)) elif(Tcp_xmas_packet.getlayer(TCP).flags == 0x14): close_ports.append(str(dst_port)) elif(Tcp_xmas_packet.haslayer(ICMP)): if(int(Tcp_xmas_packet.getlayer(ICMP).type) == 3 and int(Tcp_xmas_packet.getlayer(ICMP).code) in [1,2,3,9,10,13]): filtered_ports.append(str(dst_port)) except: pass if open_or_filtered != None: for filter_op in open_or_filtered: print(Fore.BLUE+" "*18+str(target_ip)+" "+str(filter_op)+" ==>"+"[*] PORT IS OPEN|FILTERED [*]") print('') else: if close_ports != None: for close_port in close_ports: print(Fore.BLUE+" "*18+str(target_ip)+" "+str(close_port)+" ==>"+"[*] PORT IS CLOSED [*]") print('') if filtered_ports != None: for filter_port in filtered_ports: print(Fore.BLUE+" "*18+str(target_ip)+" "+str(filter_port)+" ==>"+"[*] PORT IS FILTERED [*]") except: print(Fore.RED+" "*20+"[!] A TCP PROBLEM OCCURED [!]") sys.exit(0) def fin_scan_option(self, target_ip, port): #Create source port src_port = RandShort() print(Fore.GREEN+" "*20+"*************** RESULT ***************") try: Tcp_fin_packet = sr1(IP(dst=target_ip)/TCP(dport=port,flags="F"),verbose=0, timeout=10) if(str(type(Tcp_fin_packet)) == "<type 'NoneType'>"): print(Fore.BLUE+" "*18+str(target_ip)+" "+str(port)+" ==>"+"[*]PORT IS OPEN|FILTERED [*]") elif(Tcp_fin_packet.haslayer(TCP)): if(Tcp_fin_packet.getlayer(TCP).flags == 0x4): print(Fore.BLUE+" "*18+str(target_ip)+" "+str(port)+" ==>"+"[*]PORT IS CLOSED [*]") elif(Tcp_fin_packet.haslayer(ICMP)): if(int(Tcp_fin_packet.getlayer(ICMP).type) == 3 and int(Tcp_fin_packet.getlayer(ICMP).code) in [1,2,3,9,10,13]): print(Fore.BLUE+" "*18+str(target_ip)+" "+str(port)+" ==>"+"[*]PORT IS FILTERED [*]") else: print(Fore.BLUE+" "*18+str(target_ip)+" "+str(port)+" ==>"+" [*] PORT IS CLOSED OR NOT RESPONSE [*]") except: print(Fore.RED+" "*20+"[!] A TCP PROBLEM OCCURED [!]") sys.exit(0) def fin_scan_option_default(self, target_ip): #Create source port src_port = RandShort() #Create open or filtered list open_or_filtered = [] #Create close port list close_ports = [] #Create filtered port list filtered_ports = [] #Create int ports list int_ports_default = [] int_ports_default = list(map(int, self.read_ports)) try: print(Fore.GREEN+" "*20+"*************** RESULT ***************") for dst_port in int_ports_default: try: Tcp_fin_packet = sr1(IP(dst=target_ip)/TCP(dport=dst_port,flags="F"),verbose=0, timeout=10) if(str(type(Tcp_fin_packet)) == "<type 'NoneType'>"): open_or_filtered.append(str(dst_port)) elif(Tcp_fin_packet.haslayer(TCP)): if(Tcp_fin_packet.getlayer(TCP).flags == 0x4): close_ports.append(str(dst_port)) elif(Tcp_fin_packet.haslayer(ICMP)): if(int(Tcp_fin_packet.getlayer(ICMP).type) == 3 and int(Tcp_fin_packet.getlayer(ICMP).code) in [1,2,3,9,10,13]): filtered_ports.append(str(dst_port)) except: pass if open_or_filtered != None: for filter_op in open_or_filtered: print(Fore.BLUE+" "*18+str(target_ip)+" "+str(filter_op)+" ==>"+"[*] PORT IS OPEN|FILTERED [*]") if close_ports != None: for close_port in close_ports: print(Fore.BLUE+" "*18+str(target_ip)+" "+str(close_port)+" ==>"+"[*] PORT IS CLOSED [*]") if filtered_ports != None: for filter_port in filtered_ports: print(Fore.BLUE+" "*18+str(target_ip)+" "+str(filter_port)+" ==>"+"[*] PORT IS FILTERED [*]") except: print(Fore.RED+" "*20+"[!] A TCP PROBLEM OCCURED [!]") sys.exit(0) def connect_scan_option(self, target_ip, port): #Create source port src_port = RandShort() print(Fore.GREEN+" "*20+"*************** RESULT ***************") try: Tcp_packet = sr1(IP(dst=target_ip)/TCP(sport=src_port, dport=port), verbose=0, timeout=10) if(Tcp_packet.getlayer(TCP).flags == 0x12): #create
<gh_stars>1-10 # Copyright (c) 2012 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import worker import lock import logging import logging.config import rpc import optparse import scheduler import warnings import configuration import task import parameter import re import argparse import sys import os from task import Register def setup_interface_logging(conf_file=None): # use a variable in the function object to determine if it has run before if getattr(setup_interface_logging, "has_run", False): return if conf_file is None: logger = logging.getLogger('luigi-interface') logger.setLevel(logging.DEBUG) streamHandler = logging.StreamHandler() streamHandler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(levelname)s: %(message)s') streamHandler.setFormatter(formatter) logger.addHandler(streamHandler) else: logging.config.fileConfig(conf_file, disable_existing_loggers=False) setup_interface_logging.has_run = True def get_config(): warnings.warn('Use luigi.configuration.get_config() instead') return configuration.get_config() class EnvironmentParamsContainer(task.Task): ''' Keeps track of a bunch of environment params. Uses the internal luigi parameter mechanism. The nice thing is that we can instantiate this class and get an object with all the environment variables set. This is arguably a bit of a hack.''' local_scheduler = parameter.BooleanParameter( is_global=True, default=False, description='Use local scheduling') scheduler_host = parameter.Parameter( is_global=True, default=None, description='Hostname of machine running remote scheduler') scheduler_port = parameter.IntParameter( is_global=True, default=None, description='Port of remote scheduler api process') lock = parameter.BooleanParameter( is_global=True, default=False, description='(Deprecated, replaced by no_lock)' 'Do not run if similar process is already running') lock_size = parameter.IntParameter( is_global=True, default=1, description="Maximum number of workers running the same command") no_lock = parameter.BooleanParameter( is_global=True, default=False, description='Ignore if similar process is already running') lock_pid_dir = parameter.Parameter( is_global=True, default='/var/tmp/luigi', description='Directory to store the pid file') workers = parameter.IntParameter( is_global=True, default=1, description='Maximum number of parallel tasks to run') logging_conf_file = parameter.Parameter( is_global=True, default=None, description='Configuration file for logging') module = parameter.Parameter( is_global=True, default=None, description='Used for dynamic loading of modules') # see DynamicArgParseInterface @classmethod def apply_config_defaults(cls): cls.scheduler_host.set_default( configuration.get_config().get( 'core', 'default-scheduler-host', 'localhost')) cls.scheduler_port.set_default( configuration.get_config().get( 'core', 'default-scheduler-port', 8082)) cls.logging_conf_file.set_default( configuration.get_config().get( 'core', 'logging_conf_file', None)) @classmethod def env_params(cls, override_defaults): cls.apply_config_defaults() # Override any global parameter with whatever is in override_defaults for param_name, param_obj in cls.get_global_params(): if param_name in override_defaults: param_obj.set_default(override_defaults[param_name]) return cls() # instantiate an object with the global params set on it def expose(cls): warnings.warn('expose is no longer used, everything is autoexposed', DeprecationWarning) return cls def expose_main(cls): warnings.warn('expose_main is no longer supported, use luigi.run(..., main_task_cls=cls) instead', DeprecationWarning) return cls def reset(): warnings.warn('reset is no longer supported') class WorkerSchedulerFactory(object): def create_local_scheduler(self): return scheduler.CentralPlannerScheduler() def create_remote_scheduler(self, host, port): return rpc.RemoteScheduler(host=host, port=port) def create_worker(self, scheduler, worker_processes): return worker.Worker( scheduler=scheduler, worker_processes=worker_processes) class Interface(object): def parse(self): raise NotImplementedError @staticmethod def run(tasks, worker_scheduler_factory=None, override_defaults={}): if worker_scheduler_factory is None: worker_scheduler_factory = WorkerSchedulerFactory() env_params = EnvironmentParamsContainer.env_params(override_defaults) # search for logging configuration path first on the command line, then # in the application config file logging_conf = env_params.logging_conf_file if logging_conf is not None and not os.path.exists(logging_conf): raise Exception( "Error: Unable to locate specified logging configuration file!" ) if not configuration.get_config().getboolean( 'core', 'no_configure_logging', False): setup_interface_logging(logging_conf) if env_params.lock: warnings.warn( "The --lock flag is deprecated and will be removed." "Locking is now the default behavior." "Use --no-lock to override to not use lock", DeprecationWarning ) if (not env_params.no_lock and not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size))): sys.exit(1) if env_params.local_scheduler: sch = worker_scheduler_factory.create_local_scheduler() else: sch = worker_scheduler_factory.create_remote_scheduler( host=env_params.scheduler_host, port=env_params.scheduler_port) w = worker_scheduler_factory.create_worker( scheduler=sch, worker_processes=env_params.workers) for t in tasks: w.add(t) logger = logging.getLogger('luigi-interface') logger.info('Done scheduling tasks') w.run() w.stop() class ErrorWrappedArgumentParser(argparse.ArgumentParser): ''' Wraps ArgumentParser's error message to suggested similar tasks ''' # Simple unweighted Levenshtein distance def _editdistance(self, a, b): r0 = range(0, len(b) + 1) r1 = [0] * (len(b) + 1) for i in range(0, len(a)): r1[0] = i + 1 for j in range(0, len(b)): c = 0 if a[i] is b[j] else 1 r1[j + 1] = min(r1[j] + 1, r0[j + 1] + 1, r0[j] + c) r0 = r1[:] return r1[len(b)] def error(self, message): result = re.match("argument .+: invalid choice: '(\w+)'.+", message) if result: arg = result.group(1) weightedTasks = [(self._editdistance(arg, task), task) for task in Register.get_reg().keys()] orderedTasks = sorted(weightedTasks, key=lambda pair: pair[0]) candidates = [task for (dist, task) in orderedTasks if dist <= 5 and dist < len(task)] displaystring = "" if candidates: displaystring = "No task %s. Did you mean:\n%s" % (arg, '\n'.join(candidates)) else: displaystring = "No task %s." % arg super(ErrorWrappedArgumentParser, self).error(displaystring) else: super(ErrorWrappedArgumentParser, self).error(message) class ArgParseInterface(Interface): ''' Takes the task as the command, with parameters specific to it ''' @classmethod def add_parameter(cls, parser, param_name, param, prefix=None): description = [] if prefix: description.append('%s.%s' % (prefix, param_name)) else: description.append(param_name) if param.description: description.append(param.description) if param.has_default: description.append(" [default: %s]" % (param.default,)) if param.is_list: action = "append" elif param.is_boolean: action = "store_true" else: action = "store" parser.add_argument('--' + param_name.replace('_', '-'), help=' '.join(description), default=None, action=action) @classmethod def add_task_parameters(cls, parser, task_cls): for param_name, param in task_cls.get_nonglobal_params(): cls.add_parameter(parser, param_name, param, task_cls.task_family) @classmethod def add_global_parameters(cls, parser): for param_name, param in Register.get_global_params(): cls.add_parameter(parser, param_name, param) def parse_task(self, cmdline_args=None, main_task_cls=None): parser = ErrorWrappedArgumentParser() self.add_global_parameters(parser) if main_task_cls: self.add_task_parameters(parser, main_task_cls) else: orderedtasks = '{%s}' % ','.join(sorted(Register.get_reg().keys())) subparsers = parser.add_subparsers(dest='command', metavar=orderedtasks) for name, cls in Register.get_reg().iteritems(): subparser = subparsers.add_parser(name) if cls == Register.AMBIGUOUS_CLASS: continue self.add_task_parameters(subparser, cls) # Add global params here as well so that we can support both: # test.py --global-param xyz Test --n 42 # test.py Test --n 42 --global-param xyz self.add_global_parameters(subparser) args = parser.parse_args(args=cmdline_args) params = vars(args) # convert to a str -> str hash if main_task_cls: task_cls = main_task_cls else: task_cls = Register.get_reg()[args.command] if task_cls == Register.AMBIGUOUS_CLASS: raise Exception('%s is ambigiuous' % args.command) # Notice that this is not side effect free because it might set global params task = task_cls.from_input(params, Register.get_global_params()) return [task] def parse(self, cmdline_args=None, main_task_cls=None): return self.parse_task(cmdline_args, main_task_cls) class DynamicArgParseInterface(ArgParseInterface): ''' Uses --module as a way to load modules dynamically Usage: python whatever.py --module foo_module FooTask --blah xyz --x 123 This will dynamically import foo_module and then try to create FooTask from this ''' def parse(self, cmdline_args=None, main_task_cls=None): parser = ErrorWrappedArgumentParser() self.add_global_parameters(parser) args, unknown = parser.parse_known_args(args=cmdline_args) module = args.module __import__(module) return self.parse_task(cmdline_args, main_task_cls) class PassThroughOptionParser(optparse.OptionParser): ''' An unknown option pass-through implementation of OptionParser. When unknown arguments are encountered, bundle with largs and try again, until rargs is depleted. sys.exit(status) will still be called if a known argument is passed incorrectly (e.g. missing arguments or bad argument types, etc.) ''' def _process_args(self, largs, rargs, values): while rargs: try: optparse.OptionParser._process_args(self, largs, rargs, values) except (optparse.BadOptionError, optparse.AmbiguousOptionError), e: largs.append(e.opt_str) class OptParseInterface(Interface): ''' Supported for legacy reasons where it's necessary to interact with an existing parser. Takes the task using --task. All parameters to all possible tasks will be defined globally in a big unordered soup. ''' def __init__(self, existing_optparse): self.__existing_optparse = existing_optparse def parse(self, cmdline_args=None, main_task_cls=None): global_params = list(Register.get_global_params()) parser = PassThroughOptionParser() tasks_str = '/'.join(sorted([name for name in Register.get_reg()])) def add_task_option(p): if main_task_cls: p.add_option('--task', help='Task to run (' + tasks_str + ') [default: %default]', default=main_task_cls.task_family) else: p.add_option('--task', help='Task to run (%s)' % tasks_str) def _add_parameter(parser, param_name, param): description = [param_name] if param.description: description.append(param.description) if param.has_default: description.append(" [default: %s]" % (param.default,)) if param.is_list: action = "append" elif param.is_boolean: action = "store_true" else: action = "store" parser.add_option('--' + param_name.replace('_', '-'), help=' '.join(description), default=None, action=action) for param_name, param in global_params: _add_parameter(parser, param_name, param) add_task_option(parser) options, args = parser.parse_args(args=cmdline_args) task_cls_name = options.task if self.__existing_optparse: parser = self.__existing_optparse else: parser = optparse.OptionParser() add_task_option(parser) if task_cls_name not in Register.get_reg(): raise Exception('Error: %s is not a valid tasks (must be %s)' % (task_cls_name, tasks_str)) # Register all parameters as a big mess task_cls = Register.get_reg()[task_cls_name] if task_cls == Register.AMBIGUOUS_CLASS: raise Exception('%s is ambiguous' % task_cls_name) params = task_cls.get_nonglobal_params() for param_name, param in global_params: _add_parameter(parser, param_name, param) for param_name, param in params: _add_parameter(parser, param_name, param) # Parse and run options, args = parser.parse_args(args=cmdline_args) params = {} for k, v in vars(options).iteritems(): if k != 'task': params[k] = v task = task_cls.from_input(params, global_params) return [task] class LuigiConfigParser(configuration.LuigiConfigParser): ''' Deprecated class, use
[[1, 2, 3, 5], [1, 2, 3, 4]], [[1, 3], [1, 2]], [[1, 2, 3, 5], [5, 3, 1, 2]]) outputs = ([2], [2], [0], [0], [4]) question_list.append(Question(readme, inputs, outputs, index=[0, 2], id='wrong position')) readme = 'Given a list of unique numbers, write a function to return ' \ 'the position \nindex of the smallest number. For example, if ' \ 'the given list is [3, 1, 5, 0, \n2, 6], then the returned index ' \ 'is 3, because the smallest number 0 appears \nat position 3.' inputs = ([[0, 1, 3, 2, 5]], [[3, 2, 5, 1, 4, 0]], [[1, 2, 0, 3, 5]], [[2, 1, 5, 6, 7, 3]], [[3, 2, 1, 5, 7]]) outputs = ([0], [5], [2], [1], [2]) question_list.append(Question(readme, inputs, outputs, index=[0, 1, 2], level='medium', id='minimum number index')) readme = 'Given a list of numbers as the first input argument, and an ' \ 'integer k as \nthe second argument, write a function that ' \ 'returns a list of the k smallest \nnumbers of the list. ' \ 'Items in the returned list is ranked from the smallest \nto ' \ 'the largest. For example, if the list is [0, 1, 2, 3, 2, 5], ' \ 'and the \ninteger is k=3, then the returned list is [0, 1, 2]. ' \ 'If k is larger than \nthe length of the list, then return the '\ 'whole list.' inputs = ([[0, 1, 2, 3, 2, 5], 3], [[0, 1, 2, 3, 2, 5], 1], [[0, 1, 2, 3, 2, 5], 8], [[8, 7, 6, 5, 5, 4, 3, 3, 2], 5], [[8, 7, 6, 5, 5, 4, 3, 3, 2], 3], [[1], 1], [[1], 5], [[1, 2], 0], [[], 2]) outputs = ([[0, 1, 2]], [[0]], [[0, 1, 2, 2, 3, 5]], [[2, 3, 3, 4, 5]], [[2, 3, 3]], [[1]], [[1]], [[]], [[]]) question_list.append(Question(readme, inputs, outputs, index=[0, 1, 2], level='medium', id='k smallest')) readme = 'Write a function to find the longest common prefix string ' \ 'amongst a list of \nstrings. For example, the input argument ' \ 'is ["flower", "flow", "flight"], \nthe output is "fl". If ' \ 'there is no common prefix among the input strings, \nthe ' \ 'output is an empty string.' inputs = ([['flower', 'flow', 'flight']], [['coldplay', 'cold storage', 'cold', 'cold war']], [['dog', 'racecar', 'car']], [['flower', 'flow', 'flight', 'inflow']]) outputs = (['fl'], ['cold'], [''], ['']) question_list.append(Question(readme, inputs, outputs, level='hard', index=[0, 3], id='common prefix')) readme = 'Write a function to find the Nth number in the Fibonacci ' \ 'Sequence, In the \nFibonacci Sequence, the next number is the ' \ 'sum of the two numbers before it, \ni.e. 0, 1, 1, 2, 3, 5, 8, ' \ '13, 21, 34, ... For example, if N=0, then the \noutput is 0; if ' \ 'N=1 or N=2, then the output is 1; if N=3, then the output is \n' \ '2; if N=4, then the output is 3.' inputs = ([0], [1], [2], [3], [4], [15], [20], [50], [80], [100], [200]) outputs = ([0], [1], [1], [2], [3], [610], [6765], [12586269025], [23416728348467685], [354224848179261915075], [280571172992510140037611932413038677189525]) question_list.append(Question(readme, inputs, outputs, level='medium', index=[4, 6, 9], id='fibonacci')) readme = 'Create a function to transform the time data as a string of ' \ '"XX:XX:XXam" or \n"XX:XX:XXpm" to the number of seconds counted ' \ 'from 12:00am. For example, the \ninput of "10:35:29am" gives an ' \ 'output of 38129; the input of 06:21:33pm gives \nan output of ' \ '66093.' inputs = (['10:35:29am'], ['06:21:33pm'], ['03:11:12am'], ['09:45:01pm']) outputs = ([38129], [66093], [11472], [78301]) question_list.append(Question(readme, inputs, outputs, index=[0, 1], id='Time conversiont')) readme = 'Reverse the digits of an integer. For example, if the input ' \ 'is 123, then the \noutput is 321; if the input is -456, then ' \ 'the output is -654.' inputs = ([123], [-456], [392], [-14567], [-2], [1]) outputs = ([321], [-654], [293], [-76541], [-2], [1]) question_list.append(Question(readme, inputs, outputs, index=[0, 1], id='reverse digits')) readme = 'Create a function to remove all duplicates in a list, so that ' \ 'the output is a \nlist containing the unique values of the ' \ 'original list. For example, if the \ninput list is [2, 3, 5, ' \ '2, 3, 4, 7], then the output must be [2, 3, 5, 4, 7].' inputs = ([[2, 3, 5, 2, 3, 4, 7]], [[3.5, '4', '4', True, False, False]], [[1.2, 3.5, 3.5, 2.4, 3.5, True, False, 0.01, 3.5, False]]) outputs = ([[2, 3, 5, 4, 7]], [[3.5, '4', True, False]], [[1.2, 3.5, 2.4, True, False, 0.01]]) question_list.append(Question(readme, inputs, outputs, index=[0, 1], id='unique', compset=True)) readme = 'Create a function with the input argument to be a string. The ' \ 'output is the \nlongest word in the string. For example, the ' \ 'input is a string "NUS Business \nSchool is a magical place", ' \ 'then the output is the string "Business". If\nthere are two or ' \ 'more words with the same maximum length, then return the \n' \ 'first one.' inputs = (['NUS Business School is a magical place'], ["Take a sad song and make it better"], ['We are the champions my friend'], ['Manners maketh man'], ['All models are wrong but some are useful'], ['Simple is better than complex']) outputs = (['Business'], ['better'], ['champions'], ['Manners'], ['models'], ['complex']) question_list.append(Question(readme, inputs, outputs, level='medium', index=[0, 5], id='longest word')) readme = 'Create a function to return the length of the last word in a ' \ 'string. For \nexample, if the input is "NUS Business School is ' \ 'a magical place", then the \noutput is 5, the length of the ' \ 'last word "place". ' inputs = (['NUS Business School is a magical place'], ["Take a sad song and make it better"], ['We are the champions my friend'], ['Manners maketh man'], ['All models are wrong but some are useful'], ['Simple is better than complex']) outputs = ([5], [6], [6], [3], [6], [7]) question_list.append(Question(readme, inputs, outputs, index=[0, 1], id='last word')) readme = 'Create a function to convert a list of scores to grades. ' \ 'Grade "A" accounts \nfor scores no lower than 90; grade "B" ' \ 'accounts for scores between 80 to 89; \nand grade "C" accounts ' \ 'for scores between 70 and 79; scores lower than 70 \nare ' \ 'recorded as "D". For example, if the input is [85.5, 92, 45, ' \ '74, 79], \nthen the output is ["B", "A", "D", "C", "C"]. ' inputs = ([[85.5, 92, 45, 74, 79]], [[25, 26, 55, 70, 80, 99]], [[100, 95, 85]]) outputs = ([['B', 'A', 'D', 'C', 'C']], [['D', 'D', 'D', 'C', 'B', 'A']], [['A', 'A', 'B']]) question_list.append(Question(readme, inputs, outputs, index=[0], id='grade')) readme = 'Create a function to grade bubble cards. The first input ' \ 'argument is list of \nanswers to be graded, and the second ' \ 'input argument is a list of correct \nanswers. The function ' \ 'compares these two lists, and returns the proportion \nof ' \ 'answers to be correct. For example, if the inputs are ' \ '["A", "C", "B", "D", \n"A", "D"] and ["A", "B", "C", "D", "D", ' \ '"D"], then the output is 0.5 because \nhalf of the values ' \ 'are the same in these two lists.' inputs = ([["A", "C", "B", "D", "A", "D"], ["A", "B", "C", "D", "D", "D"]], [["A", "B", "C", "D", "D", "D"], ["A", "B", "C", "D", "D", "D"]], [["C", "B", "B", "C", "B", "D"], ["C", "A", "B", "D", "B", "D"]], [["C", "B", "A"], ["A", "C", "B"]]) outputs = ([3/6], [6/6], [4/6], [0]) question_list.append(Question(readme, inputs, outputs, index=[0, 1], id='bubble card')) readme = 'For a sequence of numbers in a list, create a new list ' \ 'containing the \nsquares of all non-negative numbers, and cubes
% self.map['usUnits']['field_name'] raise WeeImportFieldError(_msg) # we have a value but is it valid if _raw_units in unit_nicknames: # it is valid so use it _units = _raw_units else: # the units value is not valid so raise an error _msg = "Invalid unit system '%s'(0x%02x) mapped from data source. " \ "Check data source or field mapping." % (_raw_units, _raw_units) raise weewx.UnitError(_msg) # interval if 'field_name' in self.map['interval']: # We have a map for interval so try to get the raw data. If # its not there then raise an error. try: _tfield = _row[self.map['interval']['field_name']] except KeyError: _msg = "Field '%s' not found in source data." % self.map['interval']['field_name'] raise WeeImportFieldError(_msg) # now process the raw interval data if _tfield is not None and _tfield != '': try: interval = int(_tfield) except ValueError: _msg = "Invalid '%s' field. Cannot convert '%s' to " \ "an integer." % (self.map['interval']['field_name'], _tfield) raise ValueError(_msg) else: # if it happens to be None then raise an error _msg = "Invalid value '%s' for mapped field '%s' at " \ "timestamp '%s'." % (_tfield, self.map['interval']['field_name'], timestamp_to_string(_rec['dateTime'])) raise ValueError(_msg) else: # we have no mapping so try to calculate it interval = self.getInterval(_last_ts, _rec['dateTime']) _rec['interval'] = interval # now step through the rest of the fields in our map and process # the fields that don't require special processing for _field in self.map: # skip those that have had special processing if _field in MINIMUM_MAP: continue # process everything else else: # is our mapped field in the record if self.map[_field]['field_name'] in _row: # Yes it is. Try to get a value for the obs but if we # can't catch the error try: _temp = float(_row[self.map[_field]['field_name']].strip()) except AttributeError: # the data has no strip() attribute so chances are # it's a number already if isinstance(_row[self.map[_field]['field_name']], numbers.Number): _temp = _row[self.map[_field]['field_name']] elif _row[self.map[_field]['field_name']] is None: _temp = None else: # it's not a string and its not a number so raise an error _msg = "%s: cannot convert '%s' to float at " \ "timestamp '%s'." % (_field, _row[self.map[_field]['field_name']], timestamp_to_string(_rec['dateTime'])) raise ValueError(_msg) except TypeError: # perhaps we have a None, so return None for our field _temp = None except ValueError: # most likely have non-numeric, non-None data # if this is a csv import and we are mapping to a # direction field perhaps we have a string # representation of a cardinal, intercardinal or # secondary intercardinal direction that we can # convert to degrees # set a flag to indicate whether we matched the data to a value matched = False if hasattr(self, 'wind_dir_map') and self.map[_field]['units'] == 'degree_compass': # we have a csv import and we are mapping to a # direction field # first strip any whitespace and hyphens from # the data _stripped = re.sub(r'[\s-]+', '', _row[self.map[_field]['field_name']]) # try to use the data as the key in a dict # mapping directions to degrees, if there is no # match we will have None returned dir_degrees = self.wind_dir_map.get(_stripped.upper()) # if we have a non-None value use it if dir_degrees is not None: _temp = dir_degrees # we have a match so set our flag matched = True # if we did not get a match perhaps we can ignore # the invalid data, that will depend on the # ignore_invalid_data property if not matched and self.ignore_invalid_data: # we ignore the invalid data so set our result # to None _temp = None # set our matched flag matched = True # if we did not find a match raise the error if not matched: _msg = "%s: cannot convert '%s' to float at " \ "timestamp '%s'." % (_field, _row[self.map[_field]['field_name']], timestamp_to_string(_rec['dateTime'])) raise ValueError(_msg) # some fields need some special processing # rain - if our imported 'rain' field is cumulative # (self.rain == 'cumulative') then we need to calculate # the discrete rainfall for this archive period if _field == "rain" and self.rain == "cumulative": _rain = self.getRain(_last_rain, _temp) _last_rain = _temp _temp = _rain # wind - check any wind direction fields are within our # bounds and convert to 0 to 360 range if _field == "windDir" or _field == "windGustDir": if _temp is not None and (self.wind_dir[0] <= _temp <= self.wind_dir[1]): # normalise to 0 to 360 _temp %= 360 else: # outside our bounds so set to None _temp = None # UV - if there was no UV sensor used to create the # imported data then we need to set the imported value # to None if _field == 'UV' and not self.UV_sensor: _temp = None # solar radiation - if there was no solar radiation # sensor used to create the imported data then we need # to set the imported value to None if _field == 'radiation' and not self.solar_sensor: _temp = None # check and ignore if required temperature and humidity # values of 255.0 and greater if self.ignore_extr_th \ and self.map[_field]['units'] in ['degree_C', 'degree_F', 'percent'] \ and _temp >= 255.0: _temp = None # if no mapped field for a unit system we have to do # field by field unit conversions if _units is None: _temp_vt = ValueTuple(_temp, self.map[_field]['units'], weewx.units.obs_group_dict[_field]) _conv_vt = convertStd(_temp_vt, unit_sys) _rec[_field] = _conv_vt.value else: # we do have a mapped field for a unit system so # save the field in our record and continue, any # unit conversion will be done in bulk later _rec[_field] = _temp else: # No it's not. Set the field in our output to None _rec[_field] = None # now warn the user about this field if we have not # already done so if self.map[_field]['field_name'] not in _warned: _msg = "Warning: Import field '%s' is mapped to WeeWX " \ "field '%s' but the" % (self.map[_field]['field_name'], _field) if not self.suppress: print(_msg) log.info(_msg) _msg = " import field '%s' could not be found " \ "in one or more records." % self.map[_field]['field_name'] if not self.suppress: print(_msg) log.info(_msg) _msg = " WeeWX field '%s' will be set to 'None' in these records." % _field if not self.suppress: print(_msg) log.info(_msg) # make sure we do this warning once only _warned.append(self.map[_field]['field_name']) # if we have a mapped field for a unit system with a valid value, # then all we need do is set 'usUnits', bulk conversion is taken # care of by saveToArchive() if _units is not None: # we have a mapped field for a unit system with a valid value _rec['usUnits'] = _units else: # no mapped field for unit system but we have already converted # any necessary fields on a field by field basis so all we need # do is set 'usUnits', any bulk conversion will be taken care of # by saveToArchive() _rec['usUnits'] = unit_sys # If interval is being derived from record timestamps our first # record will have an interval of None. In this case we wait until # we have the second record and then we use the interval between # records 1 and 2 as the interval for record 1. if len(_records) == 1 and _records[0]['interval'] is None: _records[0]['interval'] = _rec['interval'] _last_ts = _rec['dateTime'] # this record is done, add it to our list of records to return _records.append(_rec) # If we have more than 1 unique value for interval in our records it # could be a sign of missing data and impact the integrity of our data, # so do the check and see if the user wants to continue if len(_records) > 0: # if we have any records to return do the unique interval check # before we return the records _start_interval = _records[0]['interval'] _diff_interval = False for _rec in _records: if _rec['interval'] != _start_interval: _diff_interval = True break
# -*- coding: utf-8 -*- #------------------------------------------------------------------------- # Vulkan CTS # ---------- # # Copyright (c) 2015 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #------------------------------------------------------------------------- import os import re import sys import copy from itertools import chain from collections import OrderedDict sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "..", "scripts")) from build.common import DEQP_DIR from khr_util.format import indentLines, writeInlFile VULKAN_H = os.path.join(os.path.dirname(__file__), "src", "vulkan.h.in") VULKAN_DIR = os.path.join(os.path.dirname(__file__), "..", "framework", "vulkan") INL_HEADER = """\ /* WARNING: This is auto-generated file. Do not modify, since changes will * be lost! Modify the generating script instead. */\ """ DEFINITIONS = [ ("VK_API_VERSION_1_0", "deUint32"), ("VK_API_VERSION_1_1", "deUint32"), ("VK_MAX_PHYSICAL_DEVICE_NAME_SIZE", "size_t"), ("VK_MAX_EXTENSION_NAME_SIZE", "size_t"), ("VK_MAX_DRIVER_NAME_SIZE_KHR", "size_t"), ("VK_MAX_DRIVER_INFO_SIZE_KHR", "size_t"), ("VK_UUID_SIZE", "size_t"), ("VK_LUID_SIZE", "size_t"), ("VK_MAX_MEMORY_TYPES", "size_t"), ("VK_MAX_MEMORY_HEAPS", "size_t"), ("VK_MAX_DESCRIPTION_SIZE", "size_t"), ("VK_MAX_DEVICE_GROUP_SIZE", "size_t"), ("VK_ATTACHMENT_UNUSED", "deUint32"), ("VK_SUBPASS_EXTERNAL", "deUint32"), ("VK_QUEUE_FAMILY_IGNORED", "deUint32"), ("VK_QUEUE_FAMILY_EXTERNAL", "deUint32"), ("VK_REMAINING_MIP_LEVELS", "deUint32"), ("VK_REMAINING_ARRAY_LAYERS", "deUint32"), ("VK_WHOLE_SIZE", "vk::VkDeviceSize"), ("VK_TRUE", "vk::VkBool32"), ("VK_FALSE", "vk::VkBool32"), ] PLATFORM_TYPES = [ # VK_KHR_xlib_surface (["Display","*"], ["XlibDisplayPtr"], "void*"), (["Window"], ["XlibWindow"], "deUintptr",), (["VisualID"], ["XlibVisualID"], "deUint32"), # VK_KHR_xcb_surface (["xcb_connection_t", "*"], ["XcbConnectionPtr"], "void*"), (["xcb_window_t"], ["XcbWindow"], "deUintptr"), (["xcb_visualid_t"], ["XcbVisualid"], "deUint32"), # VK_KHR_wayland_surface (["struct", "wl_display","*"], ["WaylandDisplayPtr"], "void*"), (["struct", "wl_surface", "*"], ["WaylandSurfacePtr"], "void*"), # VK_KHR_mir_surface (["MirConnection", "*"], ["MirConnectionPtr"], "void*"), (["MirSurface", "*"], ["MirSurfacePtr"], "void*"), # VK_KHR_android_surface (["ANativeWindow", "*"], ["AndroidNativeWindowPtr"], "void*"), # VK_KHR_win32_surface (["HINSTANCE"], ["Win32InstanceHandle"], "void*"), (["HWND"], ["Win32WindowHandle"], "void*"), (["HANDLE"], ["Win32Handle"], "void*"), (["const", "SECURITY_ATTRIBUTES", "*"], ["Win32SecurityAttributesPtr"], "const void*"), (["AHardwareBuffer", "*"], ["AndroidHardwareBufferPtr"], "void*"), # VK_EXT_acquire_xlib_display (["RROutput"], ["RROutput"], "void*") ] PLATFORM_TYPE_NAMESPACE = "pt" TYPE_SUBSTITUTIONS = [ ("uint8_t", "deUint8"), ("uint16_t", "deUint16"), ("uint32_t", "deUint32"), ("uint64_t", "deUint64"), ("int8_t", "deInt8"), ("int16_t", "deInt16"), ("int32_t", "deInt32"), ("int64_t", "deInt64"), ("bool32_t", "deUint32"), ("size_t", "deUintptr"), # Platform-specific ("DWORD", "deUint32"), ("HANDLE*", PLATFORM_TYPE_NAMESPACE + "::" + "Win32Handle*"), ("LPCWSTR", "char*"), ] EXTENSION_POSTFIXES = ["KHR", "EXT", "NV", "NVX", "KHX", "NN", "MVK"] EXTENSION_POSTFIXES_STANDARD = ["KHR"] def prefixName (prefix, name): name = re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', name[2:]) name = re.sub(r'([a-zA-Z])([0-9])', r'\1_\2', name) name = name.upper() name = name.replace("YCB_CR_", "YCBCR_") name = name.replace("WIN_32_", "WIN32_") name = name.replace("8_BIT_", "8BIT_") name = name.replace("16_BIT_", "16BIT_") name = name.replace("INT_64_", "INT64_") name = name.replace("D_3_D_12_", "D3D12_") name = name.replace("IOSSURFACE_", "IOS_SURFACE_") name = name.replace("MAC_OS", "MACOS_") name = name.replace("TEXTURE_LOD", "TEXTURE_LOD_") name = name.replace("VIEWPORT_W", "VIEWPORT_W_") name = name.replace("_IDPROPERTIES", "_ID_PROPERTIES") name = name.replace("PHYSICAL_DEVICE_FLOAT_16_INT_8_FEATURES", "PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES") name = name.replace("_PCIBUS_", "_PCI_BUS_") return prefix + name class Version: def __init__ (self, versionTuple): self.major = versionTuple[0] self.minor = versionTuple[1] self.patch = versionTuple[2] def getInHex (self): if self.major == 1 and self.minor == 0 and self.patch == 0: return "VK_API_VERSION_1_0" elif self.major == 1 and self.minor == 1 and self.patch == 0: return "VK_API_VERSION_1_1" else: hex = (self.major << 22) | (self.minor << 12) | self.patch return '0x%Xu' % (hex) def isStandardVersion (self): if self.patch != 0: return False if self.major != 1: return False if self.minor != 1 and self.minor != 0: return False return True def getBestRepresentation (self): if self.isStandardVersion(): return self.getInHex() return self.getDefineName() def getDefineName (self): return 'VERSION_%d_%d_%d' % (self.major, self.minor, self.patch) def __hash__ (self): return (self.major << 22) | (self.minor << 12) | self.patch def __eq__ (self, other): return self.major == other.major and self.minor == other.minor and self.patch == other.patch def __str__ (self): return self.getBestRepresentation() class Handle: TYPE_DISP = 0 TYPE_NONDISP = 1 def __init__ (self, type, name): self.type = type self.name = name self.alias = None self.isAlias = False def getHandleType (self): return prefixName("HANDLE_TYPE_", self.name) def checkAliasValidity (self): pass def __repr__ (self): return '%s (%s, %s)' % (self.name, self.alias, self.isAlias) class Definition: def __init__ (self, type, name, value): self.type = type self.name = name self.value = value self.alias = None self.isAlias = False def __repr__ (self): return '%s = %s (%s)' % (self.name, self.value, self.type) class Enum: def __init__ (self, name, values): self.name = name self.values = values self.alias = None self.isAlias = False def checkAliasValidity (self): if self.alias != None: if len(self.values) != len(self.alias.values): raise Exception("%s has different number of flags than its alias %s." % (self.name, self.alias.name)) for index, value in enumerate(self.values): aliasVal = self.alias.values[index] if value[1] != aliasVal[1] or not (value[0].startswith(aliasVal[0]) or aliasVal[0].startswith(value[0])): raise Exception("Flag %s of %s has different value than %s of %s." % (self.alias.values[index], self.alias.name, value, self.name)) def __repr__ (self): return '%s (%s) %s' % (self.name, self.alias, self.values) class Bitfield: def __init__ (self, name, values): self.name = name self.values = values self.alias = None self.isAlias = False def checkAliasValidity (self): if self.alias != None: if len(self.values) != len(self.alias.values): raise Exception("%s has different number of flags than its alias %s." % (self.name, self.alias.name)) for index, value in enumerate(self.values): aliasVal = self.alias.values[index] if value[1] != aliasVal[1] or not (value[0].startswith(aliasVal[0]) or aliasVal[0].startswith(value[0])): raise Exception("Flag %s of %s has different value than %s of %s." % (self.alias.values[index], self.alias.name, value, self.name)) def __repr__ (self): return '%s (%s)' % (self.name, self.alias) class Variable: def __init__ (self, type, name, arraySize): type = type.replace('*',' *').replace('&',' &') for src, dst in TYPE_SUBSTITUTIONS: type = type.replace(src, dst) self.type = type.split(' ') for platformType, substitute, compat in PLATFORM_TYPES: range = self.contains(self.type, platformType) if range != None: self.type = self.type[:range[0]]+[PLATFORM_TYPE_NAMESPACE + '::' + substitute[0]] + substitute[1:] + self.type[range[1]:] break self.name = name self.arraySize = arraySize def contains(self, big, small): for i in xrange(len(big)-len(small)+1): for j in xrange(len(small)): if big[i+j] != small[j]: break else: return i, i+len(small) return None def getType (self): return ' '.join(self.type).replace(' *','*').replace(' &','&') def getAsString (self, separator): return '%s%s%s%s' % (self.getType(), separator, self.name, self.arraySize) def __repr__ (self): return '<%s> <%s> <%s>' % (self.type, self.name, self.arraySize) def __eq__ (self, other): if len(self.type) != len(other.type): return False for index, type in enumerate(self.type): if "*" == type or "&" == type or "const" == type or "volatile" == type: if type != other.type[index]: return False elif type != other.type[index] and \ type not in map(lambda ext: other.type[index] + ext, EXTENSION_POSTFIXES_STANDARD) and \ other.type[index] not in map(lambda ext: type + ext, EXTENSION_POSTFIXES_STANDARD): return False return self.arraySize == other.arraySize def __ne__ (self, other): return not self == other class CompositeType: CLASS_STRUCT = 0 CLASS_UNION = 1 def __init__ (self, typeClass, name, members): self.typeClass = typeClass self.name = name self.members = members self.alias = None self.isAlias = False def getClassName (self): names = {CompositeType.CLASS_STRUCT: 'struct', CompositeType.CLASS_UNION: 'union'} return names[self.typeClass] def checkAliasValidity (self): if self.alias != None: if len(self.members) != len(self.alias.members): raise Exception("%s has different number of members than its alias %s." % (self.name, self.alias.name)) for index, member in enumerate(self.members ): break #if member != self.alias.members[index]: #raise Exception("Member %s of %s is different than core member %s in %s." % (self.alias.members[index], self.alias.name, member, self.name)) #raise Exception("Member ",str(self.alias.members[index])," of ", str(self.alias.name)," is different than core member ", str(member)," in ", str(self.name),".") def __repr__ (self): return '%s (%s)' % (self.name, self.alias) class Function: TYPE_PLATFORM = 0 # Not bound to anything TYPE_INSTANCE = 1 # Bound to VkInstance TYPE_DEVICE = 2 # Bound to VkDevice def __init__ (self, name, returnType, arguments, apiVersion = None): self.name = name self.returnType = returnType self.arguments = arguments self.alias = None self.isAlias = False self.apiVersion = apiVersion def getType (self): # Special functions if self.name == "vkGetInstanceProcAddr": return Function.TYPE_PLATFORM assert len(self.arguments) > 0 firstArgType = self.arguments[0].getType() if firstArgType in ["VkInstance", "VkPhysicalDevice"]: return Function.TYPE_INSTANCE elif firstArgType in ["VkDevice", "VkCommandBuffer", "VkQueue"]: return Function.TYPE_DEVICE else: return Function.TYPE_PLATFORM def checkAliasValidity (self): if self.alias != None: if len(self.arguments) != len(self.alias.arguments): raise Exception("%s has different number of arguments than its alias %s." % (self.name, self.alias.name)) if self.returnType != self.alias.returnType or not (self.returnType.startswith(self.alias.returnType) or self.alias.returnType.startswith(self.returnType)): raise Exception("%s has different return value's type than its alias %s." % (self.name, self.alias.name)) for index, argument in enumerate(self.arguments): if argument != self.alias.arguments[index]: raise Exception("argument %s: \"%s\" of %s is different than \"%s\" of %s." % (index, self.alias.arguments[index].getAsString(' '), self.alias.name, argument.getAsString(' '), self.name)) def __repr__ (self): return '%s (%s)' % (self.name, self.alias) class Extension: def __init__ (self, name, handles, enums, bitfields, compositeTypes, functions, definitions, additionalDefinitions, versionInCore): self.name = name self.definitions = definitions self.additionalDefs = additionalDefinitions self.handles = handles self.enums = enums self.bitfields = bitfields self.compositeTypes = compositeTypes self.functions = functions self.versionInCore = versionInCore def __repr__ (self): return 'EXT:\n%s ->\nENUMS:\n%s\nCOMPOS:\n%s\nFUNCS:\n%s\nBITF:\n%s\nHAND:\n%s\nDEFS:\n%s\n' % (self.name, self.enums, self.compositeTypes, self.functions, self.bitfields, self.handles, self.definitions, self.versionInCore) class API: def __init__ (self, definitions, handles, enums, bitfields, compositeTypes, functions, extensions): self.definitions = definitions self.handles = handles self.enums = enums self.bitfields = bitfields self.compositeTypes = compositeTypes self.functions = functions # \note contains extension functions as well self.extensions = extensions def readFile (filename): with open(filename, 'rb') as f: return f.read() IDENT_PTRN = r'[a-zA-Z_][a-zA-Z0-9_]*' TYPE_PTRN = r'[a-zA-Z_][a-zA-Z0-9_ \t*&]*' def fixupEnumValues (values): fixed = [] for name, value in values: if "_BEGIN_RANGE" in name or "_END_RANGE" in name: continue fixed.append((name, value)) return fixed def getInterfaceName (function): assert function.name[:2] == "vk" return function.name[2].lower() + function.name[3:] def getFunctionTypeName (function): assert function.name[:2] == "vk" return function.name[2:] + "Func" def endsWith (str, postfix): return str[-len(postfix):] == postfix def splitNameExtPostfix (name): knownExtPostfixes = EXTENSION_POSTFIXES for postfix in knownExtPostfixes: if endsWith(name, postfix): return (name[:-len(postfix)], postfix) return (name, "") def getBitEnumNameForBitfield (bitfieldName): bitfieldName, postfix = splitNameExtPostfix(bitfieldName) assert bitfieldName[-1] == "s" return bitfieldName[:-1] + "Bits" + postfix def getBitfieldNameForBitEnum (bitEnumName): bitEnumName, postfix = splitNameExtPostfix(bitEnumName) assert bitEnumName[-4:] == "Bits" return bitEnumName[:-4] + "s" + postfix def parsePreprocDefinedValue (src, name): value = parsePreprocDefinedValueOptional(src, name) if value is None: raise Exception("No such definition: %s" % name) return value def parsePreprocDefinedValueOptional (src, name): definition = re.search(r'#\s*define\s+' + name + r'\s+([^\n]+)\n', src) if definition is None: return None value = definition.group(1).strip() if value == "UINT32_MAX": value = "(~0u)" return value def parseEnum (name, src): keyValuePtrn = '(' + IDENT_PTRN + r')\s*=\s*([^\s,}]+)\s*[,}]' matches = re.findall(keyValuePtrn, src) return Enum(name, fixupEnumValues(matches)) # \note Parses raw enums, some are mapped to bitfields later def parseEnums (src): matches = re.findall(r'typedef enum(\s*' + IDENT_PTRN + r')?\s*{([^}]*)}\s*(' + IDENT_PTRN + r')\s*;', src) enums = [] for enumname, contents, typename in matches: enums.append(parseEnum(typename, contents)) return enums def parseCompositeType (type, name, src): typeNamePtrn = r'(' + TYPE_PTRN + r')(\s+' + IDENT_PTRN + r')((\[[^\]]+\])*)\s*;' matches = re.findall(typeNamePtrn, src) members = [Variable(t.strip(), n.strip(), a.strip()) for t, n, a, _ in matches] return CompositeType(type, name, members) def parseCompositeTypes (src): typeMap = { 'struct': CompositeType.CLASS_STRUCT, 'union': CompositeType.CLASS_UNION } matches = re.findall(r'typedef (struct|union)(\s*' + IDENT_PTRN + r')?\s*{([^}]*)}\s*(' + IDENT_PTRN + r')\s*;', src) types = [] for type, structname, contents, typename in matches: types.append(parseCompositeType(typeMap[type], typename, contents)) return types def parseHandles (src): matches = re.findall(r'VK_DEFINE(_NON_DISPATCHABLE|)_HANDLE\((' + IDENT_PTRN + r')\)[ \t]*[\n\r]', src) handles = [] typeMap = {'': Handle.TYPE_DISP, '_NON_DISPATCHABLE': Handle.TYPE_NONDISP} for type, name in matches: handle = Handle(typeMap[type], name) handles.append(handle) return handles def parseArgList (src): typeNamePtrn = r'(' + TYPE_PTRN + r')(\s+' + IDENT_PTRN + r')((\[[^\]]+\])*)\s*' args = [] for rawArg in src.split(','): m = re.search(typeNamePtrn, rawArg) args.append(Variable(m.group(1).strip(), m.group(2).strip(), m.group(3))) return args def removeTypeExtPostfix (name): for extPostfix in EXTENSION_POSTFIXES_STANDARD: if endsWith(name, extPostfix): return name[0:-len(extPostfix)] return None def populateAliases (objects): objectsByName = {} for object in objects: objectsByName[object.name] = object for object in objects: withoutPostfix = removeTypeExtPostfix(object.name) if withoutPostfix != None and withoutPostfix in objectsByName: objectsByName[withoutPostfix].alias = object object.isAlias = True for object in objects: object.checkAliasValidity() def populateAliasesWithTypedefs (objects, src): objectsByName = {} for object in objects: objectsByName[object.name] = object ptrn = r'\s*typedef\s+' + object.name + r'\s+([^;]+)' stash = re.findall(ptrn, src) if len(stash) == 1: objExt = copy.deepcopy(object) objExt.name = stash[0] object.alias = objExt objExt.isAlias = True objects.append(objExt) def
<reponame>venaturum/piso import numpy as np import pandas as pd import pytest import piso import piso.intervalarray as piso_intervalarray from piso import register_accessors register_accessors() def get_accessor_method(self, function): return { piso_intervalarray.union: self.piso.union, piso_intervalarray.intersection: self.piso.intersection, piso_intervalarray.symmetric_difference: self.piso.symmetric_difference, piso_intervalarray.isdisjoint: self.piso.isdisjoint, piso_intervalarray.issuperset: self.piso.issuperset, piso_intervalarray.issubset: self.piso.issubset, piso_intervalarray.coverage: self.piso.coverage, piso_intervalarray.complement: self.piso.complement, piso_intervalarray.contains: self.piso.contains, piso_intervalarray.split: self.piso.split, piso_intervalarray.bridge: self.piso.bridge, }[function] def get_package_method(function): return { piso_intervalarray.union: piso.union, piso_intervalarray.intersection: piso.intersection, piso_intervalarray.symmetric_difference: piso.symmetric_difference, piso_intervalarray.isdisjoint: piso_intervalarray.isdisjoint, piso_intervalarray.issuperset: piso.issuperset, piso_intervalarray.issubset: piso.issubset, piso_intervalarray.coverage: piso.coverage, piso_intervalarray.complement: piso.complement, piso_intervalarray.contains: piso.contains, piso_intervalarray.split: piso.split, piso_intervalarray.bridge: piso.bridge, }[function] def perform_op(*args, method, function, **kwargs): # method = "supplied, accessor, or package" if method == "accessor": self, *args = args return get_accessor_method(self, function)(*args, **kwargs) elif method == "package": return get_package_method(function)(*args, **kwargs) else: return function(*args, **kwargs) def make_ia1(interval_index, closed): ia1 = pd.arrays.IntervalArray.from_tuples( [(0, 4), (2, 5), (3, 6), (7, 8), (8, 9), (10, 12)], closed=closed, ) if interval_index: ia1 = pd.IntervalIndex(ia1) return ia1 def make_ia2(interval_index, closed): ia2 = pd.arrays.IntervalArray.from_tuples( [(0, 4), (2, 5), (3, 6)], closed=closed, ) if interval_index: ia2 = pd.IntervalIndex(ia2) return ia2 def make_ia3(interval_index, closed): ia3 = pd.arrays.IntervalArray.from_tuples( [(3, 4), (8, 11)], closed=closed, ) if interval_index: ia3 = pd.IntervalIndex(ia3) return ia3 def make_ia4(interval_index, closed): ia4 = pd.arrays.IntervalArray.from_tuples( [(1, 4), (2, 5), (3, 6)], closed=closed, ) if interval_index: ia4 = pd.IntervalIndex(ia4) return ia4 def make_ia_from_tuples(interval_index, tuples, closed): klass = pd.IntervalIndex if interval_index else pd.arrays.IntervalArray return klass.from_tuples(tuples, closed=closed) def assert_interval_array_equal(interval_array, expected, interval_index): if interval_index: interval_array = interval_array.values pd._testing.assert_interval_array_equal( interval_array, expected, exact=False, ) @pytest.mark.parametrize( "closed", ["left", "right"], ) @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "return_type", ["infer", pd.arrays.IntervalArray, pd.IntervalIndex], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) def test_union(closed, interval_index, return_type, method): interval_array = make_ia1(interval_index, closed) # result = piso_intervalarray.union(interval_array, return_type) result = perform_op( interval_array, return_type=return_type, method=method, function=piso_intervalarray.union, ) expected = pd.arrays.IntervalArray.from_tuples( [(0, 6), (7, 9), (10, 12)], closed=closed, ) interval_index = ( interval_index if return_type == "infer" else (return_type == pd.IntervalIndex) ) assert_interval_array_equal( result, expected, interval_index, ) @pytest.mark.parametrize( "closed", ["left", "right"], ) @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "return_type", ["infer", pd.arrays.IntervalArray, pd.IntervalIndex], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) def test_intersection_overlaps_all_empty_result( closed, interval_index, return_type, method ): interval_array = make_ia1(interval_index, closed) result = perform_op( interval_array, return_type=return_type, method=method, function=piso_intervalarray.intersection, ) expected = pd.arrays.IntervalArray([], closed=closed) interval_index = ( interval_index if return_type == "infer" else (return_type == pd.IntervalIndex) ) assert_interval_array_equal( result, expected, interval_index, ) @pytest.mark.parametrize( "closed", ["left", "right"], ) @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "return_type", ["infer", pd.arrays.IntervalArray, pd.IntervalIndex], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) def test_intersection_overlaps_all_nonempty_result( closed, interval_index, return_type, method ): interval_array = make_ia2(interval_index, closed=closed) result = perform_op( interval_array, return_type=return_type, method=method, function=piso_intervalarray.intersection, ) expected = pd.arrays.IntervalArray.from_tuples([(3, 4)], closed=closed) interval_index = ( interval_index if return_type == "infer" else (return_type == pd.IntervalIndex) ) assert_interval_array_equal( result, expected, interval_index, ) @pytest.mark.parametrize( "closed", ["left", "right"], ) @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "return_type", ["infer", pd.arrays.IntervalArray, pd.IntervalIndex], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) def test_intersection_overlaps_2(closed, interval_index, return_type, method): interval_array = make_ia1(interval_index, closed) result = perform_op( interval_array, min_overlaps=2, return_type=return_type, method=method, function=piso_intervalarray.intersection, ) expected = pd.arrays.IntervalArray.from_tuples([(2, 5)], closed=closed) interval_index = ( interval_index if return_type == "infer" else (return_type == pd.IntervalIndex) ) assert_interval_array_equal( result, expected, interval_index, ) @pytest.mark.parametrize( "closed", ["left", "right"], ) @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "return_type", ["infer", pd.arrays.IntervalArray, pd.IntervalIndex], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) def test_intersection_overlaps_3(closed, interval_index, return_type, method): interval_array = make_ia1(interval_index, closed) result = perform_op( interval_array, min_overlaps=3, return_type=return_type, method=method, function=piso_intervalarray.intersection, ) expected = pd.arrays.IntervalArray.from_tuples([(3, 4)], closed=closed) interval_index = ( interval_index if return_type == "infer" else (return_type == pd.IntervalIndex) ) assert_interval_array_equal( result, expected, interval_index, ) @pytest.mark.parametrize( "closed", ["left", "right"], ) @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "return_type", ["infer", pd.arrays.IntervalArray, pd.IntervalIndex], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) def test_symmetric_difference(closed, interval_index, return_type, method): interval_array = make_ia1(interval_index, closed) result = perform_op( interval_array, return_type=return_type, method=method, function=piso_intervalarray.symmetric_difference, ) expected = pd.arrays.IntervalArray.from_tuples( [(0, 2), (5, 6), (7, 9), (10, 12)], closed=closed, ) interval_index = ( interval_index if return_type == "infer" else (return_type == pd.IntervalIndex) ) assert_interval_array_equal( result, expected, interval_index, ) @pytest.mark.parametrize( "closed", ["left", "right"], ) @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "return_type", ["infer", pd.arrays.IntervalArray, pd.IntervalIndex], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) def test_symmetric_difference_min_overlaps_3( closed, interval_index, return_type, method ): interval_array = make_ia1(interval_index, closed) result = perform_op( interval_array, min_overlaps=3, return_type=return_type, method=method, function=piso_intervalarray.symmetric_difference, ) expected = pd.arrays.IntervalArray.from_tuples( [(0, 3), (4, 6), (7, 9), (10, 12)], closed=closed, ) interval_index = ( interval_index if return_type == "infer" else (return_type == pd.IntervalIndex) ) assert_interval_array_equal( result, expected, interval_index, ) @pytest.mark.parametrize( "closed", ["left", "right"], ) @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "return_type", ["infer", pd.arrays.IntervalArray, pd.IntervalIndex], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) def test_symmetric_difference_min_overlaps_all_1( closed, interval_index, return_type, method ): interval_array = make_ia1(interval_index, closed) result = perform_op( interval_array, min_overlaps="all", return_type=return_type, method=method, function=piso_intervalarray.symmetric_difference, ) expected = pd.arrays.IntervalArray.from_tuples( [(0, 6), (7, 9), (10, 12)], closed=closed, ) interval_index = ( interval_index if return_type == "infer" else (return_type == pd.IntervalIndex) ) assert_interval_array_equal( result, expected, interval_index, ) @pytest.mark.parametrize( "closed", ["left", "right"], ) @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "return_type", ["infer", pd.arrays.IntervalArray, pd.IntervalIndex], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) def test_symmetric_difference_min_overlaps_all_2( closed, interval_index, return_type, method ): interval_array = make_ia2(interval_index, closed) result = perform_op( interval_array, min_overlaps="all", return_type=return_type, method=method, function=piso_intervalarray.symmetric_difference, ) expected = pd.arrays.IntervalArray.from_tuples( [(0, 3), (4, 6)], closed=closed, ) interval_index = ( interval_index if return_type == "infer" else (return_type == pd.IntervalIndex) ) assert_interval_array_equal( result, expected, interval_index, ) def map_to_dates(obj, date_type): if date_type is None: return obj def make_date(x): ts = pd.to_datetime(x, unit="d", origin="2021-09-30") if date_type == "numpy": return ts.to_numpy() if date_type == "datetime": return ts.to_pydatetime() if date_type == "timedelta": return ts - pd.Timestamp("2021-10-1") return ts if isinstance(obj, (pd.IntervalIndex, pd.arrays.IntervalArray)): return obj.from_arrays( obj.left.map(make_date), obj.right.map(make_date), obj.closed, ) elif isinstance(obj, list): return [make_date(x) for x in obj] @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "tuples, expected", [ ([], True), ([(1, 2), (2, 3)], True), ([(1, 2), (3, 4)], True), ([(1, 3), (2, 4)], False), ([(1, 4), (2, 3)], False), ([(1, 2), (2, 3), (3, 4)], True), ([(1, 2), (3, 4), (5, 6)], True), ([(1, 3), (2, 4), (5, 6)], False), ([(1, 4), (2, 3), (5, 6)], False), ], ) @pytest.mark.parametrize( "closed", ["left", "right", "neither"], ) @pytest.mark.parametrize( "date_type", ["timestamp", "numpy", "datetime", "timedelta", None], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) def test_isdisjoint_left_right_neither( interval_index, tuples, expected, closed, date_type, method ): interval_array = make_ia_from_tuples(interval_index, tuples, closed) interval_array = map_to_dates(interval_array, date_type) result = perform_op( interval_array, method=method, function=piso_intervalarray.isdisjoint ) assert result == expected @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "tuples, expected", [ ([], True), ([(1, 2), (2, 3)], False), ([(1, 2), (3, 3)], True), ([(1, 2), (3, 4)], True), ([(1, 3), (2, 4)], False), ([(1, 4), (2, 3)], False), ([(1, 2), (2, 3), (3, 4)], False), ([(1, 2), (3, 4), (5, 6)], True), ([(1, 3), (2, 4), (5, 6)], False), ([(1, 4), (2, 3), (5, 6)], False), ], ) @pytest.mark.parametrize( "date_type", ["timestamp", "numpy", "datetime", "timedelta", None], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) def test_isdisjoint_both(interval_index, tuples, expected, date_type, method): interval_array = make_ia_from_tuples(interval_index, tuples, "both") interval_array = map_to_dates(interval_array, date_type) result = perform_op( interval_array, method=method, function=piso_intervalarray.isdisjoint ) assert result == expected @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "domain, expected_fraction, expected_sum", [ (None, 10 / 12, 10), ((0, 10), 0.8, 8), (pd.Interval(0, 10), 0.8, 8), ((15, 20), 0, 0), (pd.IntervalIndex.from_tuples([(0, 6), (10, 12)]), 1, 8), (pd.IntervalIndex.from_tuples([(6, 7), (9, 10)]), 0, 0), ], ) @pytest.mark.parametrize( "closed", ["left", "right"], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) @pytest.mark.parametrize( "how", ["fraction", "sum"], ) def test_coverage( interval_index, domain, expected_fraction, expected_sum, closed, method, how ): if hasattr(domain, "set_closed"): domain = domain.set_closed(closed) ia = make_ia1(interval_index, closed) result = perform_op( ia, method=method, function=piso_intervalarray.coverage, domain=domain, how=how, ) expected = expected_fraction if how == "fraction" else expected_sum assert result == expected @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "domain_interval_index", [True, False], ) @pytest.mark.parametrize( "closed", ["left", "right"], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) @pytest.mark.parametrize( "how", ["fraction", "sum"], ) def test_coverage_bins(interval_index, domain_interval_index, closed, method, how): domain = pd.arrays.IntervalArray.from_tuples( [(0, 2), (3, 7), (8, 10)], closed=closed, ) if domain_interval_index: domain = pd.IntervalIndex(domain) ia = make_ia1(interval_index, closed) result = perform_op( ia, method=method, function=piso_intervalarray.coverage, domain=domain, bins=True, how=how, ) values = [1, 0.75, 0.5] if how == "fraction" else [2.0, 3.0, 1.0] expected = pd.Series(values, index=domain) pd.testing.assert_series_equal(result, expected) @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "closed", ["left", "right"], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) def test_coverage_edge_case(interval_index, closed, method): ia = make_ia_from_tuples(interval_index, [], closed) result = perform_op( ia, method=method, function=piso_intervalarray.coverage, domain=None, ) assert result == 0.0 @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "closed", ["left", "right"], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) def test_coverage_exception(interval_index, closed, method): domain = (1, 2, 3) with pytest.raises(ValueError): ia = make_ia1(interval_index, closed) perform_op( ia, method=method, function=piso_intervalarray.coverage, domain=domain, ) @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "closed", ["left", "right"], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) def test_coverage_exception2(interval_index, closed, method): domain = (1, 2) with pytest.raises(ValueError): ia = make_ia1(interval_index, closed) perform_op( ia, method=method, function=piso_intervalarray.coverage, domain=domain, bins=True, ) @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "closed", ["left", "right"], ) @pytest.mark.parametrize( "method", ["supplied", "accessor", "package"], ) def test_coverage_exception3(interval_index, closed, method): domain = pd.IntervalIndex.from_tuples([(1, 3), (2, 4)]) with pytest.raises(ValueError): ia = make_ia1(interval_index, closed) perform_op( ia, method=method, function=piso_intervalarray.coverage, domain=domain, bins=True, ) @pytest.mark.parametrize( "interval_index", [True, False], ) @pytest.mark.parametrize( "domain, expected_tuples", [ (None, [(6, 7), (9, 10)]), ((-5, 15), [(-5, 0),
<gh_stars>1-10 # Copyright (c) 2016 Civic Knowledge. This file is licensed under the terms of the # MIT License, included in this distribution as LICENSE.txt """ Code generation for processing columns """ import ast def file_loc(): """Return file and line number""" import sys import inspect try: raise Exception except: file_ = '.../' + '/'.join((inspect.currentframe().f_code.co_filename.split('/'))[-3:]) line_ = sys.exc_info()[2].tb_frame.f_back.f_lineno return "{}:{}".format(file_, line_) const_args = ('row', 'row_n', 'scratch', 'errors', 'accumulator', 'pipe', 'manager', 'source') var_args = ('v', 'i_s', 'i_d', 'header_s', 'header_d') all_args = var_args + const_args # Full lambda definition for a column, including variable parts col_code_def = 'lambda {}:'.format(','.join(all_args)) # lambda definition for the who;e row. Includes only the arguments # that are the same for every column code_def = 'lambda {}:'.format(','.join(const_args)) col_args_t = """col_args = dict(v=v, i_s=i_s, i_d=i_d, header_s=header_s, header_d=header_d, scratch=scratch, errors=errors, accumulator = accumulator, row=row, row_n=row_n)""" file_header = """ # {} import sys from rowgenerators.valuetype import resolve_value_type from rowgenerators.rowpipe.exceptions import CasterExceptionError """.format(file_loc()) column_template = """ # {loc} def {f_name}(v, i_s, i_d, header_s, header_d, row, row_n, errors, scratch, accumulator, pipe, manager, source): try: {stack} except Exception as exc: {exception} return v """ indent = ' ' row_template = """ # {loc} def row_{table}_{stage}(row, row_n, errors, scratch, accumulator, pipe, manager, source): return [ {stack} ] """ class CodeGenError(Exception): pass def exec_context(**kwargs): """Base environment for evals, the stuff that is the same for all evals. Primarily used in the Caster pipe""" import dateutil.parser import datetime import random from functools import partial from rowgenerators.valuetype.types import parse_date, parse_time, parse_datetime import rowgenerators.valuetype.types import rowgenerators.valuetype.exceptions import rowgenerators.valuetype.test import rowgenerators.valuetype import rowgenerators.rowpipe.transforms def set_from(f, frm): # This maybe isn't used anymore, or maybe it is for debugging? try: try: f.ambry_from = frm except AttributeError: # for instance methods f.im_func.ambry_from = frm except (TypeError, AttributeError): # Builtins, non python code pass return f test_env = dict( parse_date=parse_date, parse_time=parse_time, parse_datetime=parse_datetime, partial=partial ) test_env.update(kwargs) test_env.update(dateutil.parser.__dict__) test_env.update(datetime.__dict__) test_env.update(random.__dict__) test_env.update(rowgenerators.valuetype.core.__dict__) test_env.update(rowgenerators.valuetype.types.__dict__) test_env.update(rowgenerators.valuetype.exceptions.__dict__) test_env.update(rowgenerators.valuetype.test.__dict__) test_env.update(rowgenerators.valuetype.__dict__) test_env.update(rowgenerators.rowpipe.transforms.__dict__) localvars = {} for f_name, func in test_env.items(): if not isinstance(func, (str, tuple)): localvars[f_name] = set_from(func, 'env') # The 'b' parameter of randint is assumed to be a manager, but # replacing it with a lambda prevents the param assignment localvars['randint'] = lambda a, b: random.randint(a, b) localvars['round'] = lambda a, b: round(a, b) return localvars def make_row_processors(source_headers, dest_table, env=None): """ Make multiple row processors for all of the columns in a table. :param source_headers: :param dest_table: :param env: :return: """ import re if env is None: env = exec_context() assert len(dest_table.columns) > 0 # Convert the transforms to a list of list, with each list being a # segment of column transformations, and each segment having one entry per column. row_processors = [] out = [] preamble = [] transforms = dest_table.stage_transforms for i, segments in enumerate(transforms): # Iterate over each stage column_names = [] column_types = [] seg_funcs = [] # Iterate over each column, linking it to the segments for this stage for col_num, (segment, column) in enumerate(zip(segments, dest_table), 0): assert column assert column.name, (dest_table.name, i) assert column.name == segment['column'].name col_name = column.name preamble_parts, try_lines, exception, passthrough = make_stack(env, i, segment) preamble += preamble_parts column_names.append(col_name) column_types.append(column.datatype) # Optimization to remove unecessary functions. Without this, the column function will # have just 'v = v' if len(segment['transforms']) == 1 and segment['transforms'][0] == 'v': seg_funcs.append('row[{}]'.format(col_num)) continue column_name = re.sub(r'[^\w]+', '_', col_name, ) table_name = re.sub(r'[^\w]+', '_', dest_table.name) assert column_name, (dest_table.name, i, col_name) assert table_name f_name = "{}_{}_{}".format(table_name, column_name, i) exception = (exception if exception else 'raise CasterExceptionError("' + f_name + '",header_d, v, exc, sys.exc_info())') try: # The input values for the first stage is the input dataset, # which may have different columns that the later stages if i == 0: i_s = source_headers.index(column.name) header_s = column.name else: i_s = col_num header_s = None v = 'row[{}]'.format(i_s) except ValueError as e: # The col name is not in the source dataset # This is the typical case when the output dataset has different columns from the # input, whcih should only occur on the first stage. i_s = 'None' header_s = None v = 'None' if col_num >= 1 else 'row_n' # Give the id column (first column) the row number header_d = column.name # Seg funcs holds the calls to the function for each column, called in the row stage function seg_funcs.append(f_name + ('({v}, {i_s}, {i_d}, {header_s}, \'{header_d}\', ' 'row, row_n, errors, scratch, accumulator, pipe, manager, source)') .format(v=v, i_s=i_s, i_d=col_num, header_s="'" + header_s + "'" if header_s else 'None', header_d=header_d)) # This creates the column manipulation function. out.append(column_template.format( f_name=f_name, table_name=dest_table.name, column_name=col_name, stage=i, i_s=i_s, i_d=col_num, header_s=header_s, header_d=header_d, v=v, exception=indent + exception, stack='\n'.join(indent + l for l in try_lines), col_args='', # col_args not implemented yet loc=file_loc())) # This stack assembles all of the function calls that will generate the next row stack = '\n'.join("{}{}, # column {}".format(indent, l, cn) for l, cn, dt in zip(seg_funcs, column_names, column_types)) out.append(row_template.format( table=re.sub(r'[^\w]+', '_', dest_table.name), stage=i, stack=stack, loc=file_loc() )) row_processors.append('row_{table}_{stage}'.format(stage=i, table=re.sub(r'[^\w]+', '_', dest_table.name))) # Add the final datatype cast, which is done seperately to avoid an unecessary function call. stack = '\n'.join("{}cast_{}(row[{}], '{}', errors),".format(indent, c.datatype.__name__, i, c.name) for i, c in enumerate(dest_table)) out.append(row_template.format( table=re.sub(r'[^\w]+', '_', dest_table.name), stage=len(transforms), stack=stack, loc=file_loc() )) row_processors.append('row_{table}_{stage}'.format(stage=len(transforms), table=re.sub(r'[^\w]+', '_', dest_table.name))) out.append('row_processors = [{}]'.format(','.join(row_processors))) return '\n'.join([file_header] + list(set(preamble)) + out) def calling_code(f, f_name=None, raise_for_missing=True): """Return the code string for calling a function. """ import inspect from rowgenerators.exceptions import ConfigurationError if inspect.isclass(f): try: args = inspect.signature(f.__init__).parameters.keys() except TypeError as e: raise TypeError("Failed to inspect {}: {}".format(f, e)) else: args = inspect.signature(f).parameters.keys() if len(args) > 1 and list(args)[0] == 'self': args = list(args)[1:] if 'self' in args: # Python3 gets self, but not Python2 args.remove('self') for a in args: if a not in all_args + ('exception',): # exception arg is only for exception handlers if raise_for_missing: # In CPython, inspecting __init__ for IntMeasure, FloatMeasure, etc, # raises a TypeError 12 lines up, but that does not happen in PyPy. This hack # raises the TypeError. if a == 'obj': raise TypeError() raise TypeError() raise ConfigurationError('Caster code {} has unknown argument ' 'name: \'{}\'. Must be one of: {} '.format(f, a, ','.join(all_args))) arg_map = {e: e for e in var_args} args = [arg_map.get(a, a) for a in args] return "{}({})".format(f_name if f_name else f.__name__, ','.join(args)) def make_stack(env, stage, segment): """For each transform segment, create the code in the try/except block with the assignements for pipes in the segment """ import string import random from rowgenerators.valuetype import ValueType passthrough = False # If true, signal that the stack will just return its input value column = segment['column'] def make_line(column, t): preamble = [] line_t = "v = {} # {}" env_t = env.get(t,t) if isinstance(env_t, type) and issubclass(env_t, ValueType): # A valuetype class, from the datatype column. try: cc, fl = calling_code(env_t, env_t.__name__), file_loc() except TypeError as e: cc, fl = "{}(v)".format(env_t.__name__), file_loc() preamble.append("{} = resolve_value_type('{}') # {}".format(env_t.__name__, env_t.vt_code, file_loc())) elif isinstance(t, type): # A python type, from the datatype columns. cc, fl = "parse_{}(v, header_d)".format(t.__name__), file_loc() elif callable(env.get(t)): # Transform function try: cc, fl = calling_code(env.get(t), t), file_loc() except TypeError as e: raise else: # A transform generator, or python code. rnd = (''.join(random.choice(string.ascii_lowercase) for _ in range(6))) name = 'tg_{}_{}_{}'.format(column.name, stage, rnd) try: a, b, fl = rewrite_tg(env, name, t) except (CodeGenError, AttributeError) as e: raise CodeGenError("Failed to re-write pipe code '{}' in column '{}': {} " .format(t, column, e)) cc = str(a) if b: preamble.append("{} = {} # {}".format(name, b, file_loc())) line = line_t.format(cc, fl) return line, preamble preamble = [] try_lines = [] for t in list(segment): if not t: continue line, col_preamble = make_line(column, t) preamble += col_preamble try_lines.append(line) if segment['exception']: exception, col_preamble = make_line(column, segment['exception']) else: exception = None if len(try_lines) == 0: try_lines.append('pass # Empty pipe segment') assert len(try_lines) > 0, column.name return preamble, try_lines, exception, passthrough def mk_kwd_args(fn, fn_name=None): import inspect fn_name = fn_name or fn.__name__ fn_args = inspect.getargspec(fn).args if len(fn_args) > 1 and fn_args[0] == 'self': args = fn_args[1:] kwargs = dict((a, a) for a in all_args if a in args) return "{}({})".format(fn_name,
<filename>library/genome_selection/strategy.py """ Genome selection strategies. :Authors: <NAME> <<EMAIL>> """ import os import pickle import sys from collections import Counter, defaultdict from itertools import accumulate, chain from random import random, shuffle import numpy as np from sklearn.cluster import AgglomerativeClustering EXCLUDED_GENOMES = {} # The id to use with Genome holdout genome strategies # when a taxid has a single genome. SINGLETON = -1 EPS = 2e-24 class DistRecordError(Exception): pass def filter_genomes(accessions, index): """ Filter unwanted genomes. Parameters ---------- accessions: iterable An iterable of genome accession strings. index: dict The genomes index. Returns ------- include, exclude Two lists of accessions. One to include and one to exclude. """ def genbank_duplicate(accession_info, index): return (accession_info['section'] == 'genbank' and accession_info['gbrs_paired_asm'] != '' and accession_info['paired_asm_comp'] == 'identical' and index['genomes'][accession_info['gbrs_paired_asm']] and (index['genomes'][accession_info['gbrs_paired_asm']] ['species_taxid'] == accession_info['species_taxid'])) include = [] exclude = [] for accession in accessions: if 'contig_sum' not in index['genomes'][accession]: exclude.append(accession) EXCLUDED_GENOMES[accession] = 'the contig_sum does not exist' elif len(accessions) == 1: include.append(accession) elif genbank_duplicate(index['genomes'][accession], index): exclude.append(accession) EXCLUDED_GENOMES[accession] = 'duplicate genome found in refseq' else: include.append(accession) return include, exclude def genome_sort(genomes, index): """ Sort the genomes based on the quality of the assembly, etc. First, representative genomes are chosen. This list is sorted by assembly level and then in reverse order by the number of bases divided by the number of contigs. Second, the same is done for reference genomes. Third, the same is done for all other genomes. Parameters ---------- genomes: list A list of genome accession ids. index: dict The genomes index. This will be used to lookup information about each genome to do sorting. Returns ------- A sorted list of genome accessions. """ assembly_level = ["Complete Genome", "Chromosome", "Scaffold", "Contig"] def contig_key(genome): try: return float(genome['contig_sum'] / genome['contig_count']) except KeyError: print(genome, file=sys.stderr) raise KeyError def sort_by_assembly_and_contig(genomes_data_list): assembly_level_dict = {level: [] for level in assembly_level} for genome_data in genomes_data_list: assembly_level_dict[genome_data['assembly_level']].append( genome_data) for level in assembly_level_dict: assembly_level_dict[level].sort(key=contig_key, reverse=True) sorted_list = [] for level in assembly_level: sorted_list += assembly_level_dict[level] return sorted_list ref_genomes = [ index['genomes'][accession] for accession in genomes if index['genomes'][accession]['refseq_category'] == 'reference genome' ] repr_genomes = [ index['genomes'][accession] for accession in genomes if index['genomes'][accession]['refseq_category'] == 'representative genome' ] other_genomes = [ index['genomes'][accession] for accession in genomes if index['genomes'][accession]['refseq_category'] != 'reference genome' and index['genomes'][accession]['refseq_category'] != 'representative genome' ] my_genomes = sort_by_assembly_and_contig(ref_genomes) my_genomes += sort_by_assembly_and_contig(repr_genomes) my_genomes += sort_by_assembly_and_contig(other_genomes) return [genome_data['assembly_accession'] for genome_data in my_genomes] def select_equal(list_lists, select_amount): """ Select equal elements from a list of lists. Parameters ---------- list_lists: list A list of lists of elements. The lists could be unequal in size. select_amount: int An integer of samples to take. Examples -------- list_lists is of form [[a, b, c] [1, 2, 3] [x, y]. If select_amount is 5, then the function will return [a, 1, x, b, 2]. Returns ------- list A list of sampled elements """ pos = [0] * len(list_lists) empty = [0] * len(list_lists) output_list = [] iterations = 0 while len(output_list) < select_amount and sum(empty) < len(list_lists): i = iterations % len(list_lists) j = pos[i] if j < len(list_lists[i]): output_list.append(list_lists[i][j]) pos[i] += 1 else: empty[i] = 1 iterations = i + 1 return output_list def select_genomes(genome_list, index, down_select="random", select_amount=None, X=None, mapping=None): """ Get a list of genomes in a sorted list or in a random list. Parameters ---------- genome_list: list A list of genome accessions. index: dict The Radogest index structure down_select: str The type of list ordering to perform. 'random' puts the list in random order 'sort' puts the list in sorted order 'dist' uses genome distances (e.g. Mash) select_amount: int The number of genomes to select. If this is None, then the list will not be cut. dist_location: str The location of the distance matrices, if any. Returns ------- A list of genome accessions. """ def splice(a_list): if select_amount is not None and select_amount < len(a_list): return a_list[0:select_amount] else: return a_list if down_select.startswith("random"): shuffle(genome_list) return splice(genome_list) elif down_select.startswith("sort"): return splice(genome_sort(genome_list, index)) elif down_select.startswith("dist"): return cluster(genome_list, select_amount, X, mapping)[0] else: raise NotImplementedError def cluster(genome_list, n_clusters, X, mapping): """ Get genomes representing each cluster from a taxid. The genomes are sorted in descending order of cluster size. Parameters ---------- genome_list: list A list of genome accessions. taxid: int The taxonomic id to get clusters for. n_clusters: int The number of clusters to request. dist_location: The directory where distances are stored. Returns ------- genome_list: list A list of up to n_clusters genomes. labels: numpy array A list of cluster labels """ # If there is only one genome, just include it. # Include all genomes if the number of clusters is too high. if len(genome_list) == 1 or n_clusters > len(genome_list): return (genome_list, np.zeros((1, 1))) # Cluster the genomes. labels = AgglomerativeClustering(n_clusters=n_clusters, affinity="precomputed", linkage="average").fit(X).labels_ # Find a genome that best represents the cluster # by finding the genome that has the least distance from # all other genomes. id_label = defaultdict(list) genome_list = [] for i, label in enumerate(labels): id_label[label].append(i) labels_count = Counter(labels) for label in id_label: rows = {} shuffle(id_label[label]) for i in id_label[label]: rows[i] = sum([X[i][j] for j in id_label[label]]) min_value = min(rows.values()) min_pos = [pos for pos in rows if abs(rows[pos] - min_value) <= EPS][0] genome_list.append((mapping[min_pos], label, labels_count[label])) # Sort the genomes in descending order of cluster size. genome_list.sort(key=lambda x: x[2], reverse=True) genome_list = [item[0] for item in genome_list] return genome_list, labels class GenomeSelection: """The base class for genome selection strategies.""" def __init__(self, index): """ Initialize the GenomeSelection class. Parameters ---------- index: dict A dictionary representing the index of genomes. """ self.index = index def set_all_genomes(self, boolean=False): """ Set all genome inclusions to the given value in the index. Parameters ---------- boolean: primitive A value to set all genome inclusions to. Returns ------- None """ taxids_dict = self.index['taxids'] for taxid in taxids_dict: for accession in taxids_dict[taxid]: taxids_dict[taxid][accession] = boolean class StandardSelect(GenomeSelection): """ """ def __init__(self, index, select_amount, down_select, dist_location=None): """ Initialize the StandardSelect class. Parameters ---------- index: dict A dictionary representing the index of genomes. select_amount: int The number of genomes to select at each level. down_select: str A string indicating which down selection to use. e.g. 'random' or 'sort' """ GenomeSelection.__init__(self, index) self.select_amount = select_amount self.down_select = down_select self.dist_location=dist_location self.set_all_genomes(boolean=False) class TreeSelect(StandardSelect): """ Perform down selection at each level of the tree. """ def select(self, parent, children): """ Perform down selection at each level of the tree. Parameters ---------- parent: int Taxonomic id of the parent. children: iterable An iterable of children taxonomic ids of the parent. A leaf node is represented by [] or False. Returns ------- i: int The number of genomes selected. """ if children: children_genomes = [] for child in children: include, _ = filter_genomes(self.index['taxids'][child].keys(), self.index) child_genomes = select_genomes(include, self.index, down_select=self.down_select, select_amount=None) children_genomes.append(child_genomes) my_genomes = select_equal(children_genomes, self.select_amount) for accession in my_genomes: self.index['taxids'][parent][accession] = True return len(my_genomes) else: include, _ = filter_genomes(self.index['taxids'][parent].keys(), self.index) my_genomes = select_genomes(include, self.index, down_select=self.down_select, select_amount=None) i = 0 for accession in my_genomes: if i >= self.select_amount: break self.index['taxids'][parent][accession] = True i += 1 return i class AllGenomes(GenomeSelection): """Choose all of the genomes.""" def __init__(self, index): """ Initialize the class. Parameters ---------- index: dict A dictionary representing the index of genomes. """ super().__init__(index) self.set_all_genomes(boolean=True) def select(self, parent, children): """ Set all genomes in the index to true. Filter out some genomes. Parameters ---------- parent: int Taxonomic id of the parent. children: iterable An iterable of children taxonomic ids of the parent. A leaf node is given when children is []. Returns ------- int The number of genomes selected. """ include, exclude = filter_genomes(self.index['taxids'][parent].keys(), self.index) for accession in exclude: self.index['taxids'][parent][accession] = False return len(include) class TreeDistSuper(GenomeSelection): """Choose genomes with maximum distance in the taxonomic tree.""" def __init__(self, index, select_amount, down_select, dist_location): """ Initialize the class. Parameters ---------- index: dict A dictionary representing the index of genomes. select_amount: int The amount of genomes to select. down_select: str Down selection method: random, sort """ super().__init__(index) self.select_amount = select_amount self.down_select = down_select self.dist_location = dist_location self.set_all_genomes(boolean=False) def _merge(self, l_lists): """ Merge a list of lists
tabla " + nodo.id + " ya existe. \n" def AlterDatabase(nodo, tablaSimbolos): global consola if nodo.rename: b = jBase.alterDatabase(nodo.id.valor, nodo.idnuevo) if b == 0: base = tablaSimbolos.renameBase(nodo.id.valor, nodo.idnuevo) if base: for fk in listaFK: if fk.idbase == nodo.id.valor: fk.idbase = nodo.idnuevo for cons in listaConstraint: if cons.idbase == nodo.id.valor: cons.idbase = nodo.idnuevo consola += "La base se renombró con éxito " + nodo.idnuevo + " \n" else: consola += "Error no se pudo renombrar la base " + \ nodo.id.valor + " en la tabla de simbolos \n" elif b == 2: listaSemanticos.append(Error.ErrorS( "Error Semantico", "La base de datos " + nodo.id.valor + " no existe")) elif b == 3: listaSemanticos.append(Error.ErrorS( "Error Semantico", "La base de datos ya existe " + nodo.idnuevo)) elif b == 1: listaSemanticos.append(Error.ErrorS( "Error Semantico", "Error en la operacion.")) def AlterAddColumn(nodo, tablaSimbolos): global consola global useActual base = tablaSimbolos.get(useActual) tabla = base.getTabla(nodo.idtabla) for col in nodo.listaColumnas: auxcol = TS.SimboloColumna( col.idcolumna, col.tipo, False, None, None, None, True, None, len(tabla.columnas)) if tabla.crearColumna(col.idcolumna, auxcol): b = jBase.alterAddColumn(useActual, nodo.idtabla, col.idcolumna) if b == 0: consola += "La columna " + col.idcolumna + \ " se agregó a la tabla " + nodo.idtabla + " \n" elif b == 1: listaSemanticos.append(Error.ErrorS( "Error Semantico", "Error en la operacion.")) elif b == 2: listaSemanticos.append(Error.ErrorS( "Error Semantico", "Error la base " + useActual + "no existe")) elif b == 3: listaSemanticos.append(Error.ErrorS( "Error Semantico", "Error la tabla " + nodo.idtabla + "no existe")) else: consola += "Error al crear la columna " + col.idcolumna + " \n" def AlterRenameColumn(nodo, tablaSimbolos): base = tablaSimbolos.get(useActual) tabla = base.getTabla(nodo.idtabla) global consola op = tabla.renameColumna(nodo.idcolumna, nodo.idnuevo) if op == 0: for fk in listaFK: if fk.idcfk == nodo.idcolumna: fk.idcfk = nodo.idnuevo tablaRF = base.getTabla(fk.idtlocal) columnaRF = tablaRF.getColumna(fk.idclocal) columnaRF.foreign_key["columna"] = nodo.idnuevo elif fk.idclocal == nodo.idcolumna: fk.idclocal = nodo.idnuevo for cons in listaConstraint: if cons.idcol == nodo.idcolumna: cons.idcol = nodo.idnuevo consola += "Se cambio el nombre de la columna " + \ nodo.idcolumna + " a " + nodo.idnuevo + " con exito \n" elif op == 1: listaSemanticos.append(Error.ErrorS( "Error Semantico", "La columna con nombre " + nodo.idnuevo + " ya existe")) elif op == 2: listaSemanticos.append(Error.ErrorS( "Error Semantico", "La columna con nombre " + nodo.idactual + " no existe")) def AlterRenameTable(nodo, tablaSimbolos): global useActual global consola base = tablaSimbolos.get(useActual) op = base.renameTable(nodo.idactual, nodo.idnuevo) if op == 0: lib = jBase.alterTable(useActual, nodo.idactual, nodo.idnuevo) if lib == 0: for fk in listaFK: if fk.idtfk == nodo.idactual: fk.idtfk = nodo.idnuevo tablaRF = base.getTabla(fk.idtlocal) columnaRF = tablaRF.getColumna(fk.idclocal) columnaRF.foreign_key["tabla"] = nodo.idnuevo elif fk.idtlocal == nodo.idactual: fk.idtlocal = nodo.idnuevo for cons in listaConstraint: if cons.idtabla == nodo.idactual: cons.idtabla = nodo.idnuevo consola += "La tabla " + nodo.idactual + \ " se cambio a " + nodo.idnuevo + " exitosamente \n" elif lib == 1: listaSemanticos.append(Error.ErrorS( "Error Semantico", "Error en la operacion.")) elif lib == 2: listaSemanticos.append(Error.ErrorS( "Error Semantico", "La base de datos " + useActual + " no existe")) elif lib == 3: listaSemanticos.append(Error.ErrorS( "Error Semantico", "La tabla " + nodo.idactual + " no existe")) elif lib == 4: listaSemanticos.append(Error.ErrorS( "Error Semantico", "La tabla " + nodo.idnuevo + " ya existe")) elif op == 1: listaSemanticos.append(Error.ErrorS( "Error Semantico", "La tabla con nombre " + nodo.idnuevo + " ya existe")) elif op == 2: listaSemanticos.append(Error.ErrorS( "Error Semantico", "La tabla con nombre " + nodo.idactual + " no existe")) def AlterTableCheck(nodo, tablaSimbolos): global useActual base = tablaSimbolos.get(useActual) tabla = base.getTabla(nodo.idtabla) condicion = nodo.expresion opIzq = condicion.opIzq idcol = opIzq.valor result = False global consola if nodo.idcons == None: result = tabla.modificarCheck(idcol, condicion, idcol + "_check") listaConstraint.append(TS.Constraints( useActual, nodo.idtabla, idcol + "_check", idcol, "check")) consola += "Se agrego el check a la columna " + idcol + " exitosamente \n" else: result = tabla.modificarCheck(idcol, condicion, nodo.idcons) listaConstraint.append(TS.Constraints( useActual, nodo.idtabla, nodo.idcons, idcol, "check")) consola += "Se agrego el check a la columna " + idcol + " exitosamente \n" if result != True: listaSemanticos.append(Error.ErrorS( "Error Semantico", "No se encontró la columna con id " + idcol)) def AlterTableUnique(nodo, tablaSimbolos): global consola global useActual base = tablaSimbolos.get(useActual) tabla = base.getTabla(nodo.idtabla) if tabla.modificarUnique(nodo.idcolumna, True, nodo.idconstraint): listaConstraint.append(TS.Constraints( useActual, nodo.idtabla, nodo.idconstraint, nodo.idcolumna, "unique")) consola += "Se agrego el unique a la columna " + \ nodo.idcolumna + " exitosamente \n" else: listaSemanticos.append( Error.ErrorS("Error Semantico", "No se encontró la columna con id " + nodo.idcolumna)) def AlterTableFK(nodo, tablaSimbolos): global useActual global consola base = tablaSimbolos.get(useActual) tabla = base.getTabla(nodo.idtabla) for i in range(len(nodo.idlocal)): idlocal = nodo.idlocal[i].valor idfk = nodo.idfk[i].valor columnafk = tablaSimbolos.getColumna(useActual, nodo.idtablafk, idfk) columnalocal = tabla.getColumna(idlocal) if columnafk != None and columnalocal != None: if columnafk.tipo.tipo == columnalocal.tipo.tipo: tabla.modificarFk(idlocal, nodo.idtablafk, idfk) if nodo.idconstraint != None: listaConstraint.append( TS.Constraints(useActual, nodo.idtabla, nodo.idconstraint, columnalocal, "FK")) listaFK.append(TS.llaveForanea( useActual, nodo.idtabla, nodo.idtablafk, idlocal, idfk)) consola += "Se agrego la llave foranea a " + idlocal + " exitosamente \n" else: listaSemanticos.append(Error.ErrorS("Error Semantico", "La columna %s y la columna %s no tienen el mismo tipo" % ( idlocal, idfk))) else: listaSemanticos.append( Error.ErrorS("Error Semantico", "No se encontró la columna")) def AlterTableDropColumn(nodo, tablaSimbolos): global useActual global consola base = tablaSimbolos.get(useActual) tabla = base.getTabla(nodo.idtabla) for col in nodo.listaColumnas: if jBase.alterDropColumn(useActual, nodo.idtabla, tabla.getIndex(col.idcolumna)) == 0: if tabla.deleteColumn(col.idcolumna): consola += "Se eliminó con exito la columna " + col.idcolumna + "\n" else: listaSemanticos.append(Error.ErrorS( "Error Semantico", "La columna " + col.idcolumna + " no existe")) def AlterTableDropConstraint(nodo, tablaSimbolos): global useActual global consola base = tablaSimbolos.get(useActual) tabla = base.getTabla(nodo.idtabla) bandera = False for cons in listaConstraint: if cons.idconstraint == nodo.listaColumnas: bandera = True if cons.tipo == "unique": if tabla.deleteUnique(cons.idcol): consola += "Se eliminó con éxito el constraint " + nodo.listaColumnas + "\n" else: consola += "Error no se pudo eliminar el constraint " + nodo.listaColumnas + "\n" elif cons.tipo == "check": if tabla.deleteCheck(cons.idcol): consola += "Se eliminó con éxito el constraint " + nodo.listaColumnas + "\n" else: consola += "Error no se pudo eliminar el constraint " + nodo.listaColumnas + "\n" elif cons.tipo == "FK": if tabla.deleteFk(cons.idcol): consola += "Se eliminó con éxito el constraint " + nodo.listaColumnas + "\n" else: consola += "Error no se pudo eliminar el constraint " + nodo.listaColumnas + "\n" if bandera == False: listaSemanticos.append(Error.ErrorS( "Error Semantico", "No se encontro el constraint " + nodo.listaColumnas)) def AlterColumnNotNull(nodo, tablaSimbolos): global useActual global consola base = tablaSimbolos.get(useActual) tabla = base.getTabla(nodo.idtabla) for col in nodo.columnas: if tabla.modificarNull(col.idcolumna): consola += "Se cambió a not null con exito la columna " + col.idcolumna + " \n" else: listaSemanticos.append(Error.ErrorS( "Error Semantico", "No se encontro la columna" + col.idcolumna)) def AlterColumnCTipo(nodo, tablaSimbolos): global useActual global consola base = tablaSimbolos.get(useActual) tabla = base.getTabla(nodo.idtabla) for col in nodo.columnas: b = tabla.modificarTipo( col.idcolumna, col.valcambio.tipo, col.valcambio.cantidad) if b == 0: consola += "Se modificó el tipo exitosamente a la columna " + col.idcolumna + " \n" elif b == 1: listaSemanticos.append(Error.ErrorS( "Error Semantico", "El valor es menor al actual")) elif b == 2: listaSemanticos.append(Error.ErrorS( "Error Semantico", "Los tipos no coinciden")) elif b == 3: listaSemanticos.append(Error.ErrorS( "Error Semantico", "la columna no existe " + col.idcolumna)) def InsertTable(nodo, tablaSimbolos): global consola flag = False base = tablaSimbolos.get(useActual) if base != None: tabla = base.getTabla(nodo.id) if tabla != None: if nodo.listaColumnas != None: if len(nodo.listaColumnas) == len(nodo.listValores): result = False # se comprueba la cantidad de columnas y las que tienen valor null b = tabla.comprobarNulas(nodo.listaColumnas) if b["cod"] == 0: # se validan tipos for i in range(len(nodo.listaColumnas)): col = tabla.getColumna(nodo.listaColumnas[i].valor) val = Interpreta_Expresion(nodo.listValores[i], tablaSimbolos, tabla) if hasattr(col.tipo,"valor"): el_tipo = types[col.tipo.valor] if el_tipo is not None: if nodo.listValores[i].valor in el_tipo.keys(): result = True else: result = False listaSemanticos.append(Error.ErrorS( "Error Semantico", "El valor: '%s' no ha sido definido en la colección: '%s'." % (nodo.listValores[i].valor,col.tipo.valor) )) return else: listaSemanticos.append(Error.ErrorS( "Error Semantico", "Error en ENUM TYPE: la colección " + col.tipo.valor + " no ha sido definida. ")) else: if col.tipo.tipo == TipoDato.NUMERICO: result = validarTiposNumericos( col.tipo.dato.lower(), val) elif col.tipo.tipo == TipoDato.CHAR: if val.tipo == Expresion.CADENA: result = validarTiposChar(col.tipo, val) else: result
+ o0oOOo0O0Ooo - Oo0Ooo iII1iI1IIiI . request_nonce_sent = None lisp . lprint ( "Stop request-nonce mode for {}" . format ( lisp . red ( iII1iI1IIiI . rloc_str , False ) ) ) if 27 - 27: OoO0O00 + OoOoOO00 * ooOoO0o iII1iI1IIiI . last_good_echo_nonce_rcvd = lisp . lisp_get_timestamp ( ) else : o0o = "none" if ( iII1iI1IIiI . request_nonce_sent ) : o0o = lisp . lisp_hex_string ( iII1iI1IIiI . request_nonce_sent ) if 71 - 71: i1IIi . I11i + Oo0Ooo * oO0o * iIii1I11I1II1 + I1ii11iIi11i lisp . lprint ( ( "Received echo-nonce 0x{} from {}, but request-" + "nonce is {}" ) . format ( lisp . lisp_hex_string ( oO ) , # OoooooooOO lisp . red ( iII1iI1IIiI . rloc_str , False ) , o0o ) ) if 38 - 38: OoOoOO00 / iIii1I11I1II1 % i11iIiiIii - IiII * iII111i / OoOoOO00 if 13 - 13: OoO0O00 * I1ii11iIi11i - I1Ii111 return if 79 - 79: oO0o % o0oOOo0O0Ooo % OoOoOO00 if 45 - 45: I1IiiI * OOooOOo % OoO0O00 if 24 - 24: ooOoO0o - I11i * oO0o if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii o0O0OOo0oO = { "lisp xtr-parameters" : [ oo000o , { "rloc-probing" : [ True , "yes" , "no" ] , "nonce-echoing" : [ True , "yes" , "no" ] , "data-plane-security" : [ True , "yes" , "no" ] , "data-plane-logging" : [ True , "yes" , "no" ] , "frame-logging" : [ True , "yes" , "no" ] , "flow-logging" : [ True , "yes" , "no" ] , "nat-traversal" : [ True , "yes" , "no" ] , "checkpoint-map-cache" : [ True , "yes" , "no" ] , "ipc-data-plane" : [ True , "yes" , "no" ] , "decentralized-push-xtr" : [ True , "yes" , "no" ] , "decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] , "decentralized-pull-xtr-dns-suffix" : [ True ] , "register-reachable-rtrs" : [ True , "yes" , "no" ] , "program-hardware" : [ True , "yes" , "no" ] } ] , "lisp interface" : [ lispconfig . lisp_interface_command , { "interface-name" : [ True ] , "device" : [ True ] , "instance-id" : [ True , 0 , 0xffffffff ] , "dynamic-eid" : [ True ] , "multi-tenant-eid" : [ True ] , "lisp-nat" : [ True , "yes" , "no" ] , "dynamic-eid-device" : [ True ] , "dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] , "lisp map-resolver" : [ O0O , { "mr-name" : [ True ] , "ms-name" : [ True ] , "dns-name" : [ True ] , "address" : [ True ] } ] , "lisp database-mapping" : [ II111 , { "prefix" : [ ] , "mr-name" : [ True ] , "ms-name" : [ True ] , "instance-id" : [ True , 0 , 0xffffffff ] , "secondary-instance-id" : [ True , 0 , 0xffffffff ] , "eid-prefix" : [ True ] , "group-prefix" : [ True ] , "dynamic-eid" : [ True , "yes" , "no" ] , "signature-eid" : [ True , "yes" , "no" ] , "rloc" : [ ] , "rloc-record-name" : [ True ] , "elp-name" : [ True ] , "geo-name" : [ True ] , "rle-name" : [ True ] , "json-name" : [ True ] , "address" : [ True ] , "interface" : [ True ] , "priority" : [ True , 0 , 255 ] , "weight" : [ True , 0 , 100 ] } ] , "lisp map-cache" : [ lispconfig . lisp_map_cache_command , { "prefix" : [ ] , "instance-id" : [ True , 0 , 0xffffffff ] , "eid-prefix" : [ True ] , "group-prefix" : [ True ] , "send-map-request" : [ True , "yes" , "no" ] , "rloc" : [ ] , "rloc-record-name" : [ True ] , "rle-name" : [ True ] , "elp-name" : [ True ] , "address" : [ True ] , "priority" : [ True , 0 , 255 ] , "weight" : [ True , 0 , 100 ] } ] , "lisp itr-map-cache" : [ lispconfig . lisp_map_cache_command , { "prefix" : [ ] , "instance-id" : [ True , 0 , 0xffffffff ] , "eid-prefix" : [ True ] , "group-prefix" : [ True ] , "rloc" : [ ] , "rloc-record-name" : [ True ] , "rle-name" : [ True ] , "elp-name" : [ True ] , "address" : [ True ] , "priority" : [ True , 0 , 255 ] , "weight" : [ True , 0 , 100 ] } ] , "lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , { "elp-name" : [ False ] , "elp-node" : [ ] , "address" : [ True ] , "probe" : [ True , "yes" , "no" ] , "strict" : [ True , "yes" , "no" ] , "eid" : [ True , "yes" , "no" ] } ] , "lisp replication-list-entry" : [ lispconfig . lisp_rle_command , { "rle-name" : [ False ] , "rle-node" : [ ] , "address" : [ True ] , "level" : [ True , 0 , 255 ] } ] , "lisp geo-coordinates" : [ lispconfig . lisp_geo_command , { "geo-name" : [ False ] , "geo-tag" : [ False ] } ] , "show itr-map-cache" : [ IIiiIiI1 , { } ] , "show itr-rloc-probing" : [ I1i1iii , { } ] , "show itr-keys" : [ oo , { } ] , "show itr-dynamic-eid" : [ lispconfig . lisp_show_dynamic_eid_command , { } ] } if 42 - 42: II111iiii / O0 . iIii1I11I1II1 / O0 / OoO0O00 / OoooooooOO if 62 - 62: O0 . Oo0Ooo if 33 - 33: Oo0Ooo / iIii1I11I1II1 % i1IIi if 76 - 76: Ii1I + iIii1I11I1II1 + OoOoOO00 . OoO0O00 if 49 - 49: IiII / ooOoO0o / OOooOOo if 25 - 25: I1IiiI % O0 + i1IIi - ooOoO0o if ( I111I1Iiii1i ( ) == False ) : lisp . lprint ( "lisp_itr_startup() failed" ) lisp . lisp_print_banner ( "ITR abnormal exit" ) exit ( 1 ) if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii if 94 - 94: iII111i - Oo0Ooo + oO0o O0oooOoO = [ i111I , oO0oIIII , II1Ii1iI1i , Oo0oO0oo0oO00 ] if 62 - 62: OOooOOo / II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i if 36 - 36: OOooOOo % i11iIiiIii Iiii1Ii = True ooOOo00oo0 = [ i111I ] * 3 IIIII1Ii = [ II1Ii1iI1i ] * 3 if 13 - 13: II111iiii while ( True ) : try : o0o000Oo , oO0o0O0o0OO00 , oO0oOO0o = select . select ( O0oooOoO , [ ] , [ ] ) except : break if 23 - 23: OoO0O00 + i11iIiiIii if 20 - 20: I1ii11iIi11i if 3 - 3: OoO0O00 * i1IIi . I1IiiI . O0 - OoOoOO00 if 81 - 81: I1IiiI - iIii1I11I1II1 / I1IiiI / O0 if ( lisp . lisp_ipc_data_plane and Oo0oO0oo0oO00 in o0o000Oo ) : lisp . lisp_process_punt ( Oo0oO0oo0oO00 , II1iII1i , iiI1iIiI ) if 34 - 34: Ii1I * Ii1I - I1ii11iIi11i - O0 . i11iIiiIii if 32 - 32: iIii1I11I1II1 . OoO0O00 * oO0o / OOooOOo . II111iiii - Oo0Ooo if 10 - 10: I1ii11iIi11i / i11iIiiIii - Ii1I + oO0o * I1IiiI if 94 - 94: I1IiiI + iIii1I11I1II1 / O0 - OoooooooOO % I1ii11iIi11i if 64 - 64: I11i + OoO0O00 if ( i111I in o0o000Oo ) : o0O
save_nifti = True if options['debug'] is True else False t1 = test_scan(model, test_x_data, options, save_nifti=save_nifti, cuda=cuda) # second network options['test_name'] = options['experiment'] + '_prob_1.nii.gz' model= models[1] t2 = test_scan(model, test_x_data, options, save_nifti=True, candidate_mask=(t1 > 0.8)) # postprocess the output segmentation # obtain the orientation from the first scan used for testing scans = test_x_data.keys() flair_scans = [test_x_data[s]['FLAIR'] for s in scans] flair_image = load_nii(flair_scans[0]) options['test_name'] = options['experiment'] + '_hard_seg.nii.gz' out_segmentation = post_process_segmentation(t2, options, save_nifti=True, orientation=flair_image.affine) # return out_segmentation return out_segmentation #return t1 def load_test_patches(test_x_data, patch_size, batch_size, voxel_candidates=None, datatype=np.float32): """ Function generator to load test patches with size equal to patch_size, given a list of selected voxels. Patches are returned in batches to reduce the amount of RAM used Inputs: - x_data: list containing all subject image paths for a single modality - selected_voxels: list where each element contains the subject binary mask for selected voxels [len(x), len(y), len(z)] - tuple containing patch size, either 2D (p1, p2, 1) or 3D (p1, p2, p3) - Voxel candidates: a binary mask containing voxels for testing Outputs (in batches): - X: Train X data matrix for the each channel [num_samples, p1, p2, p3] - voxel_coord: list of tuples with voxel coordinates (x,y,z) of selected patches """ # get scan names and number of modalities used scans = list(test_x_data.keys()) modalities = list(test_x_data[scans[0]].keys()) # load all image modalities and normalize intensities images = [] for m in modalities: raw_images = [load_nii(test_x_data[s][m]).get_data() for s in scans] images.append([normalize_data(im) for im in raw_images]) # select voxels for testing. Discard CSF and darker WM in FLAIR. # If voxel_candidates is not selected, using intensity > 0.5 in FLAIR, # else use the binary mask to extract candidate voxels if voxel_candidates is None: flair_scans = [test_x_data[s]['FLAIR'] for s in scans] selected_voxels = [get_mask_voxels(mask) for mask in select_training_voxels(flair_scans, 0.5)][0] else: selected_voxels = get_mask_voxels(voxel_candidates) # yield data for testing with size equal to batch_size # for i in range(0, len(selected_voxels), batch_size): # c_centers = selected_voxels[i:i+batch_size] # X = [] # for m, image_modality in zip(modalities, images): # X.append(get_patches(image_modality[0], c_centers, patch_size)) # yield np.stack(X, axis=1), c_centers X = [] for image_modality in images: X.append(get_patches(image_modality[0], selected_voxels, patch_size)) #print(len(X), len(X[0])) Xs = np.stack(X, axis=1) #print(Xs.shape) return Xs, selected_voxels def get_mask_voxels(mask): """ Compute x,y,z coordinates of a binary mask Input: - mask: binary mask Output: - list of tuples containing the (x,y,z) coordinate for each of the input voxels """ indices = np.stack(np.nonzero(mask), axis=1) indices = [tuple(idx) for idx in indices] return indices def get_patches(image, centers, patch_size=(15, 15, 15)): """ Get image patches of arbitrary size based on a set of centers """ # If the size has even numbers, the patch will be centered. If not, # it will try to create an square almost centered. By doing this we allow # pooling when using encoders/unets. patches = [] list_of_tuples = all([isinstance(center, tuple) for center in centers]) sizes_match = [len(center) == len(patch_size) for center in centers] if list_of_tuples and sizes_match: patch_half = tuple([idx//2 for idx in patch_size]) new_centers = [map(add, center, patch_half) for center in centers] padding = tuple((idx, size-idx) for idx, size in zip(patch_half, patch_size)) new_image = np.pad(image, padding, mode='constant', constant_values=0) slices = [[slice(c_idx-p_idx, c_idx+(s_idx-p_idx)) for (c_idx, p_idx, s_idx) in zip(center, patch_half, patch_size)] for center in new_centers] patches = [new_image[idx] for idx in slices] #patches = np.array(patches) return patches def test_scan(model, test_x_data, options, save_nifti=True, candidate_mask=None, cuda= True): """ Test data based on one model Input: - test_x_data: a nested dictionary containing training image paths: train_x_data['scan_name']['modality'] = path_to_image_modality - save_nifti: save image segmentation - candidate_mask: a binary masks containing voxels to classify Output: - test_scan = Output image containing the probability output segmetnation - If save_nifti --> Saves a nifti file at specified location options['test_folder']/['test_scan'] """ # get_scan name and create an empty nifti image to store segmentation scans = list(test_x_data.keys()) flair_scans = [test_x_data[s]['FLAIR'] for s in scans] flair_image = load_nii(flair_scans[0]) seg_image = np.zeros_like(flair_image.get_data().astype('float32')) if candidate_mask is not None: all_voxels = np.sum(candidate_mask) else: all_voxels = np.sum(flair_image.get_data() > 0) if options['debug'] is True: print ("> DEBUG ", scans[0], "Voxels to classify:", all_voxels) # compute lesion segmentation in batches of size options['batch_size'] batch, centers = load_test_patches(test_x_data, options['patch_size'], options['batch_size'], candidate_mask) if options['debug'] is True: print ("> DEBUG: testing current_batch:", batch.shape,) with torch.no_grad(): model.eval() iter_num = len(batch)//options['batch_size'] if len(batch) % options['batch_size'] ==0 else len(batch)//options['batch_size'] +1 for i in range(iter_num): start=i*options['batch_size'] end=start+options['batch_size'] data_source_valid = batch[start:end, :] current_centers = centers[start:end] # last batch not completed # last iter from batches less than batch_size if i ==iter_num-1 and len(batch) % options['batch_size'] != 0: #data_source_valid = batch[start:, :] #current_centers = centers[start:] end = options['batch_size']-len(data_source_valid) data_source_valid = np.concatenate((data_source_valid, batch[:end, :]), axis=0) current_centers = np.concatenate((current_centers, centers[:end]), axis=0) data_source_valid = torch.from_numpy(data_source_valid) if cuda: data_source_valid = data_source_valid.cuda() data_source_valid= Variable(data_source_valid) s_output, _ = model(data_source_valid) #F.log_softmax(s_output, dim = 1) # sum up batch loss y_pred = s_output.data.max(1)[1] # get the index of the max log-probability y_pred = y_pred.detach().cpu().numpy() y_pred.reshape(-1, 1) #y_pred = y_pred.numpy() [x, y, z] = np.stack(current_centers, axis=1) seg_image[x, y, z] = y_pred if options['debug'] is True: print ("...done!") # check if the computed volume is lower than the minimum accuracy given # by the min_error parameter if check_min_error(seg_image, options, flair_image.header.get_zooms()): if options['debug']: print ("> DEBUG ", scans[0], "lesion volume below ", \ options['min_error'], 'ml') seg_image = np.zeros_like(flair_image.get_data().astype('float32')) if save_nifti: out_scan = nib.Nifti1Image(seg_image, affine=flair_image.affine) out_scan.to_filename(os.path.join(options['test_folder'], options['test_scan'], options['experiment'], options['test_name'])) return seg_image def check_min_error(input_scan, options, voxel_size): """ check that the output volume is higher than the minimum accuracy given by the parameter min_error """ from scipy import ndimage t_bin = options['t_bin'] l_min = options['l_min'] # get voxel size in mm^3 voxel_size = np.prod(voxel_size) / 1000.0 # threshold input segmentation output_scan = np.zeros_like(input_scan) t_segmentation = input_scan >= t_bin # filter candidates by size and store those > l_min labels, num_labels = ndimage.label(t_segmentation) label_list = np.unique(labels) num_elements_by_lesion = ndimage.labeled_comprehension(t_segmentation, labels, label_list, np.sum, float, 0) for l in range(len(num_elements_by_lesion)): if num_elements_by_lesion[l] > l_min: # assign voxels to output current_voxels = np.stack(np.where(labels == l), axis=1) output_scan[current_voxels[:, 0], current_voxels[:, 1], current_voxels[:, 2]] = 1 return (np.sum(output_scan == 1) * voxel_size) < options['min_error'] def select_voxels_from_previous_model(model, train_x_data, options): """ Select training voxels from image segmentation masks """ # get_scan names and number of modalities used scans = list(train_x_data.keys()) # select voxels for training. Discard CSF and darker WM in FLAIR. # flair_scans = [train_x_data[s]['FLAIR'] for s in scans] # selected_voxels = select_training_voxels(flair_scans, options['min_th']) # evaluate training scans using the learned model and extract voxels with # probability higher than 0.5 seg_masks = [] for scan, s in zip(train_x_data.keys(), range(len(scans))): #print(train_x_data.items()) #print(dict(list(train_x_data.items())[s:s+1])) seg_mask = test_scan(model, dict(list(train_x_data.items())[s:s+1]), options, save_nifti=False) seg_masks.append(seg_mask > 0.5) if options['debug']: flair = nib.load(train_x_data[scan]['FLAIR']) tmp_seg = nib.Nifti1Image(seg_mask, affine=flair.affine) #tmp_seg.to_filename(os.path.join(options['weight_paths'], # options['experiment'], # '.train', # scan + '_it0.nii.gz')) # check candidate segmentations: # if no voxels have been selected, return candidate voxels on # FLAIR modality > 2 flair_scans = [train_x_data[s]['FLAIR'] for s in scans] images = [load_nii(name).get_data() for name in flair_scans] images_norm = [normalize_data(im) for im in images] seg_mask = [im > 2 if np.sum(seg) == 0 else seg for im, seg in zip(images_norm, seg_masks)] return seg_mask def post_process_segmentation(input_scan, options, save_nifti=True, orientation=np.eye(4)): """ Post-process the probabilistic segmentation using params t_bin and l_min t_bin: threshold to binarize the output segmentations l_min: minimum lesion volume Inputs: - input_scan: probabilistic input image (segmentation) - options dictionary - save_nifti: save the result as nifti Output: - output_scan: final binarized segmentation """ from scipy import ndimage t_bin = options['t_bin'] l_min = options['l_min'] output_scan = np.zeros_like(input_scan) # threshold input segmentation t_segmentation = input_scan >= t_bin # filter candidates by size and store those > l_min labels, num_labels = ndimage.label(t_segmentation) label_list = np.unique(labels) num_elements_by_lesion = ndimage.labeled_comprehension(t_segmentation, labels, label_list, np.sum, float, 0) for l in range(len(num_elements_by_lesion)): if num_elements_by_lesion[l] > l_min: # assign voxels to output current_voxels = np.stack(np.where(labels == l), axis=1) output_scan[current_voxels[:, 0], current_voxels[:, 1], current_voxels[:, 2]] = 1 # save the output segmentation as Nifti1Image
**kwargs): # noqa: E501 """Unbookmark run # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.unbookmark_run(owner, entity, uuid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str owner: Owner of the namespace (required) :param str entity: Entity: project name, hub name, registry name, ... (required) :param str uuid: Uuid identifier of the sub-entity (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.unbookmark_run_with_http_info(owner, entity, uuid, **kwargs) # noqa: E501 def unbookmark_run_with_http_info(self, owner, entity, uuid, **kwargs): # noqa: E501 """Unbookmark run # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.unbookmark_run_with_http_info(owner, entity, uuid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str owner: Owner of the namespace (required) :param str entity: Entity: project name, hub name, registry name, ... (required) :param str uuid: Uuid identifier of the sub-entity (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'owner', 'entity', 'uuid' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method unbookmark_run" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'owner' is set if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501 local_var_params['owner'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `owner` when calling `unbookmark_run`") # noqa: E501 # verify the required parameter 'entity' is set if self.api_client.client_side_validation and ('entity' not in local_var_params or # noqa: E501 local_var_params['entity'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `entity` when calling `unbookmark_run`") # noqa: E501 # verify the required parameter 'uuid' is set if self.api_client.client_side_validation and ('uuid' not in local_var_params or # noqa: E501 local_var_params['uuid'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `uuid` when calling `unbookmark_run`") # noqa: E501 collection_formats = {} path_params = {} if 'owner' in local_var_params: path_params['owner'] = local_var_params['owner'] # noqa: E501 if 'entity' in local_var_params: path_params['entity'] = local_var_params['entity'] # noqa: E501 if 'uuid' in local_var_params: path_params['uuid'] = local_var_params['uuid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['ApiKey'] # noqa: E501 return self.api_client.call_api( '/api/v1/{owner}/{entity}/runs/{uuid}/unbookmark', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def update_run(self, owner, project, run_uuid, body, **kwargs): # noqa: E501 """Update run # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_run(owner, project, run_uuid, body, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str owner: Owner of the namespace (required) :param str project: Project where the run will be assigned (required) :param str run_uuid: UUID (required) :param V1Run body: Run object (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: V1Run If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.update_run_with_http_info(owner, project, run_uuid, body, **kwargs) # noqa: E501 def update_run_with_http_info(self, owner, project, run_uuid, body, **kwargs): # noqa: E501 """Update run # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_run_with_http_info(owner, project, run_uuid, body, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str owner: Owner of the namespace (required) :param str project: Project where the run will be assigned (required) :param str run_uuid: UUID (required) :param V1Run body: Run object (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1Run, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'owner', 'project', 'run_uuid', 'body' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method update_run" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'owner' is set if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501 local_var_params['owner'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `owner` when calling `update_run`") # noqa: E501 # verify the required parameter 'project' is set if self.api_client.client_side_validation and ('project' not in local_var_params or # noqa: E501 local_var_params['project'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `project` when calling `update_run`") # noqa: E501 # verify the required parameter 'run_uuid' is set if self.api_client.client_side_validation and ('run_uuid' not in local_var_params or # noqa: E501 local_var_params['run_uuid'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `run_uuid` when calling `update_run`") # noqa: E501 # verify the required parameter 'body' is set if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501 local_var_params['body'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `body` when calling `update_run`") # noqa: E501 collection_formats = {} path_params = {} if 'owner' in local_var_params: path_params['owner'] = local_var_params['owner'] # noqa: E501 if 'project' in local_var_params: path_params['project'] = local_var_params['project'] # noqa: E501 if 'run_uuid' in local_var_params: path_params['run.uuid'] = local_var_params['run_uuid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['ApiKey'] # noqa: E501 return self.api_client.call_api( '/api/v1/{owner}/{project}/runs/{run.uuid}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Run', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def upload_run_artifact(self, owner, project, uuid, uploadfile, **kwargs): # noqa: E501 """Upload an artifact file to a store via run access # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.upload_run_artifact(owner, project, uuid, uploadfile, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str owner: Owner of the namespace (required) :param str project: Project having access to the store (required) :param str uuid: Unique integer identifier of the entity (required) :param file uploadfile: The file to upload. (required) :param str path: File path query params. :param bool overwrite: File path query params. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data.
import pygame import random import yaml import os import Objects OBJECT_TEXTURE = os.path.join("texture", "objects") ENEMY_TEXTURE = os.path.join("texture", "enemies") ALLY_TEXTURE = os.path.join("texture", "ally") def create_sprite(img, sprite_size, mmp_tile): icon = pygame.image.load(img).convert_alpha() icon_mmp = pygame.transform.scale(icon, (mmp_tile, mmp_tile)) icon = pygame.transform.scale(icon, (sprite_size, sprite_size)) sprite = pygame.Surface((sprite_size, sprite_size), pygame.HWSURFACE) sprite_mmp = pygame.Surface((mmp_tile, mmp_tile), pygame.HWSURFACE) sprite.blit(icon, (0, 0)) sprite_mmp.blit(icon_mmp, (0, 0)) return sprite, sprite_mmp def reload_game(engine, hero): global level_list level_list_max = len(level_list) - 1 engine.level += 1 hero.position = [1, 1] engine.objects = [] generator = level_list[min(engine.level, level_list_max)] _map = generator['map'].get_map() engine.load_map(_map) engine.add_objects(generator['obj'].get_objects(_map)) engine.add_hero(hero) def restore_hp(engine, hero): if random.randint(1, 10) == 1: engine.score -= 0.05 engine.hero = Objects.EvilEye(hero) engine.notify("You were cursed: unlucky") else: engine.score += 0.1 hero.hp = hero.max_hp engine.notify("HP restored") def apply_blessing(engine, hero): if hero.gold >= int(20 * 1.5 ** engine.level) - 2 * hero.stats["intelligence"]: engine.score += 0.2 hero.gold -= int(20 * 1.5 ** engine.level) - \ 2 * hero.stats["intelligence"] if random.randint(0, 1) == 0: engine.hero = Objects.Blessing(hero) engine.notify("Blessing applied") else: engine.hero = Objects.Berserk(hero) engine.notify("Berserk applied") else: engine.score -= 0.1 engine.notify("Nothing happened") def remove_effect(engine, hero): if hero.gold >= int(10 * 1.5 ** engine.level) - 2 * hero.stats["intelligence"] and "base" in dir(hero): hero.gold -= int(10 * 1.5 ** engine.level) - \ 2 * hero.stats["intelligence"] engine.hero = hero.base engine.hero.calc_max_HP() engine.notify("Effect removed") else: engine.notify("Nothing happened") def add_gold(engine, hero): if random.randint(1, 10) == 1: engine.score -= 0.05 engine.hero = Objects.Weakness(hero) engine.notify("You were cursed: weak") else: engine.score += 0.1 gold = int(random.randint(10, 1000) * (1.1 ** (engine.hero.level - 1))) hero.gold += gold engine.notify(f"{gold} gold added") def fight(engine, enemy, hero): enemy_value = enemy.stats['strength'] + enemy.stats['endurance'] + \ enemy.stats['intelligence'] + enemy.stats['luck'] hero_value = sum(hero.stats.values()) while random.randint(1, enemy_value + hero_value) > hero_value and hero.hp > 0: hero.hp -= 1 if hero.hp > 0: engine.score += 1 hero.exp += enemy.xp engine.notify("Defeated enemy!") hero.level_up() else: engine.game_process = False engine.notify("Lost!") engine.notify("GAME OVER!!!") def enhance(engine, hero): engine.score += 0.2 engine.hero = Objects.Enhance(hero) hero.hp = max(hero.max_hp, hero.hp) engine.notify("You was enhanced!") class MapFactory(yaml.YAMLObject): @classmethod def from_yaml(cls, loader, node): def get_end(loader, node): return {'map': EndMap.Map(), 'obj': EndMap.Objects()} def get_random(loader, node): return {'map': RandomMap.Map(), 'obj': RandomMap.Objects()} def get_special(loader, node): data = loader.construct_mapping(node) try: rat = data["rat"] except KeyError: rat = 0 try: knight = data["knight"] except KeyError: knight = 0 ret = {} _map = SpecialMap.Map() _obj = SpecialMap.Objects() _obj.config = {'rat': rat, 'knight': knight} ret["map"] = _map ret["obj"] = _obj return ret def get_empty(loader, node): return {'map': EmptyMap.Map(), 'obj': EmptyMap.Objects()} data = loader.construct_mapping(node) try: rat = data["rat"] except KeyError: rat = 0 try: knight = data["knight"] except KeyError: knight = 0 _obj = cls.create_objects() _obj.config = {'rat': rat, 'knight': knight} return {'map': cls.create_map(), 'obj': _obj} @classmethod def create_map(cls): return cls.Map() @classmethod def create_objects(cls): return cls.Objects() class EndMap(MapFactory): yaml_tag = "!end_map" class Map: def __init__(self): self.Map = ['000000000000000000000000000000000000000', '0 0', '0 0', '0 0 0 000 0 0 00000 0 0 0', '0 0 0 0 0 0 0 0 0 0 0', '0 000 0 0 00000 0000 0 0 0', '0 0 0 0 0 0 0 0 0 0 0', '0 0 0 000 0 0 00000 00000 0', '0 0 0', '0 0', '000000000000000000000000000000000000000' ] self.Map = list(map(list, self.Map)) for i in self.Map: for j in range(len(i)): i[j] = wall if i[j] == '0' else floor1 def get_map(self): return self.Map class Objects: def __init__(self): self.objects = [] def get_objects(self, _map): return self.objects class RandomMap(MapFactory): yaml_tag = "!random_map" class Map: w, h = 39, 25 def __init__(self): w = self.w h = self.h self.Map = [[0 for _ in range(w)] for _ in range(h)] for i in range(w): for j in range(h): if i == 0 or j == 0 or i == w - 1 or j == h - 1: self.Map[j][i] = wall else: self.Map[j][i] = [wall, floor1, floor2, floor3, floor1, floor2, floor3, floor1, floor2][random.randint(0, 8)] def get_map(self): return self.Map class Objects: def __init__(self): self.objects = [] def get_objects(self, _map): w, h = 38, 24 for obj_name in object_list_prob['objects']: prop = object_list_prob['objects'][obj_name] for i in range(random.randint(prop['min-count'], prop['max-count'])): coord = (random.randint(1, w), random.randint(1, h)) intersect = True while intersect: intersect = False if _map[coord[1]][coord[0]] == wall: intersect = True coord = (random.randint(1, w), random.randint(1, h)) continue for obj in self.objects: if coord == obj.position or coord == (1, 1): intersect = True coord = (random.randint(1, w), random.randint(1, h)) self.objects.append(Objects.Ally( prop['sprite'], prop['action'], coord)) for obj_name in object_list_prob['ally']: prop = object_list_prob['ally'][obj_name] for i in range(random.randint(prop['min-count'], prop['max-count'])): coord = (random.randint(1, w), random.randint(1, h)) intersect = True while intersect: intersect = False if _map[coord[1]][coord[0]] == wall: intersect = True coord = (random.randint(1, w), random.randint(1, h)) continue for obj in self.objects: if coord == obj.position or coord == (1, 1): intersect = True coord = (random.randint(1, w), random.randint(1, h)) self.objects.append(Objects.Ally( prop['sprite'], prop['action'], coord)) for obj_name in object_list_prob['enemies']: prop = object_list_prob['enemies'][obj_name] for i in range(random.randint(0, 5)): coord = (random.randint(1, w), random.randint(1, h)) intersect = True while intersect: intersect = False if _map[coord[1]][coord[0]] == wall: intersect = True coord = (random.randint(1, w), random.randint(1, h)) continue for obj in self.objects: if coord == obj.position or coord == (1, 1): intersect = True coord = (random.randint(1, w), random.randint(1, h)) self.objects.append(Objects.Enemy( prop['sprite'], prop, prop['experience'], coord)) return self.objects class SpecialMap(MapFactory): yaml_tag = "!special_map" class Map: def __init__(self): self.Map = ['000000000000000000000000000000000000000', '0 0', '0 0 0', '0 0 0 0000 0 0 00 00 0 0', '0 0 0 0 0 0 0 0 00 0 0 00', '0 000 0000 0000 0 0 0 00', '0 0 0 0 0 0 0 0 0 0 00', '0 0 0 0 0000 0 0 0 0 0', '0 0 0', '0 0', '000000000000000000000000000000000000000' ] self.Map = list(map(list, self.Map)) for i in self.Map: for j in range(len(i)): i[j] = wall if i[j] == '0' else floor1 def get_map(self): return self.Map class Objects: def __init__(self): self.objects = [] self.config = {} def get_objects(self, _map): w, h = 10, 38 for obj_name in object_list_prob['objects']: prop = object_list_prob['objects'][obj_name] for i in range(random.randint(prop['min-count'], prop['max-count'])): coord = (random.randint(1, h), random.randint(1, w)) intersect = True while intersect: intersect = False if _map[coord[1]][coord[0]] == wall: intersect = True coord = (random.randint(1, h), random.randint(1, w)) continue for obj in self.objects: if coord == obj.position or coord == (1, 1): intersect = True coord = (random.randint(1, h), random.randint(1, w)) self.objects.append(Objects.Ally( prop['sprite'], prop['action'], coord)) for obj_name in object_list_prob['ally']: prop = object_list_prob['ally'][obj_name] for i in range(random.randint(prop['min-count'], prop['max-count'])): coord = (random.randint(1, h), random.randint(1, w)) intersect = True while intersect: intersect = False if _map[coord[1]][coord[0]] == wall: intersect = True coord = (random.randint(1, h), random.randint(1, w)) continue for obj in self.objects: if coord == obj.position or coord == (1, 1): intersect = True coord = (random.randint(1, h), random.randint(1, w)) self.objects.append(Objects.Ally( prop['sprite'], prop['action'], coord)) for enemy, count in self.config.items(): prop = object_list_prob['enemies'][enemy] for i in range(random.randint(0, count)): coord = (random.randint(1, h), random.randint(1, w)) intersect = True while intersect: intersect = False if _map[coord[1]][coord[0]] == wall: intersect = True coord = (random.randint(1, h), random.randint(1, w)) continue for obj in self.objects: if coord == obj.position or coord == (1, 1): intersect = True coord = (random.randint(1, h), random.randint(1, w)) self.objects.append(Objects.Enemy( prop['sprite'], prop, prop['experience'], coord)) return self.objects class EmptyMap(MapFactory): yaml_tag = "!empty_map" @classmethod def from_yaml(cls, loader, node): return {'map': EmptyMap.Map(), 'obj': EmptyMap.Objects()} class Map: def __init__(self): self.Map = [[]] def get_map(self): return self.Map class Objects: def __init__(self): self.objects = [] def get_objects(self, _map): return self.objects wall = [0] floor1 = [0] floor2 = [0] floor3 = [0] def service_init(sprite_size, tile, full=True): global object_list_prob, level_list global wall global floor1 global floor2 global floor3 wall[0] = create_sprite(os.path.join("texture", "wall.png"), sprite_size, tile) floor1[0] = create_sprite(os.path.join("texture", "Ground_1.png"), sprite_size, tile) floor2[0] = create_sprite(os.path.join("texture", "Ground_2.png"), sprite_size, tile) floor3[0] = create_sprite(os.path.join("texture", "Ground_3.png"), sprite_size, tile) file = open("objects.yml", "r") object_list_tmp = yaml.load(file.read(), Loader=yaml.Loader) if full: object_list_prob = object_list_tmp object_list_actions = {'reload_game': reload_game, 'add_gold': add_gold, 'apply_blessing': apply_blessing, 'remove_effect': remove_effect, 'restore_hp': restore_hp, 'fight': fight, 'enhance': enhance} for obj in object_list_prob['objects']: prop = object_list_prob['objects'][obj] prop_tmp = object_list_tmp['objects'][obj] prop['sprite'][0] = create_sprite( os.path.join(OBJECT_TEXTURE, prop_tmp['sprite'][0]), sprite_size, tile) prop['action'] = object_list_actions[prop_tmp['action']] for ally in object_list_prob['ally']: prop = object_list_prob['ally'][ally]
<reponame>theislab/disent import os from random import shuffle import anndata import numpy as np import scanpy as sc from matplotlib import pyplot as plt from scipy import sparse from sklearn import preprocessing import pandas as pd def data_remover(adata, remain_list, remove_list, cell_type_key, condition_key): """ Removes specific cell type in stimulated condition form `adata`. # Parameters adata: `~anndata.AnnData` Annotated data matrix remain_list: list list of cell types which are going to be remained in `adata`. remove_list: list list of cell types which are going to be removed from `adata`. # Returns merged_data: list returns array of specified cell types in stimulated condition # Example ```python import scgen import anndata train_data = anndata.read("./data/train_kang.h5ad") remove_list = ["CD14+Mono", "CD8T"] remain_list = ["CD4T", "Dendritic"] filtered_data = data_remover(train_data, remain_list, remove_list) ``` """ source_data = [] for i in remain_list: source_data.append(extractor(adata, i, conditions={"ctrl": "control", "stim": "stimulated"}, cell_type_key=cell_type_key, condition_key=condition_key)[3]) target_data = [] for i in remove_list: target_data.append(extractor(adata, i, conditions={"ctrl": "control", "stim": "stimulated"}, cell_type_key=cell_type_key, condition_key=condition_key)[1]) merged_data = training_data_provider(source_data, target_data) merged_data.var_names = adata.var_names return merged_data def extractor(data, cell_type, conditions, cell_type_key="cell_type", condition_key="condition"): """ Returns a list of `data` files while filtering for a specific `cell_type`. # Parameters data: `~anndata.AnnData` Annotated data matrix cell_type: basestring specific cell type to be extracted from `data`. conditions: dict dictionary of stimulated/control of `data`. # Returns list of `data` files while filtering for a specific `cell_type`. # Example ```python import scgen import anndata train_data = anndata.read("./data/train.h5ad") test_data = anndata.read("./data/test.h5ad") train_data_extracted_list = extractor(train_data, "CD4T", conditions={"ctrl": "control", "stim": "stimulated"}) ``` """ cell_with_both_condition = data[data.obs[cell_type_key] == cell_type] condtion_1 = data[(data.obs[cell_type_key] == cell_type) & (data.obs[condition_key] == conditions["ctrl"])] condtion_2 = data[(data.obs[cell_type_key] == cell_type) & (data.obs[condition_key] == conditions["stim"])] training = data[~((data.obs[cell_type_key] == cell_type) & (data.obs[condition_key] == conditions["stim"]))] return [training, condtion_1, condtion_2, cell_with_both_condition] def training_data_provider(train_s, train_t): """ Concatenates two lists containing adata files # Parameters train_s: `~anndata.AnnData` Annotated data matrix. train_t: `~anndata.AnnData` Annotated data matrix. # Returns Concatenated Annotated data matrix. # Example ```python import scgen import anndata train_data = anndata.read("./data/train_kang.h5ad") test_data = anndata.read("./data/test.h5ad") whole_data = training_data_provider(train_data, test_data) ``` """ train_s_X = [] train_s_diet = [] train_s_groups = [] for i in train_s: train_s_X.append(i.X.A) train_s_diet.append(i.obs["condition"].tolist()) train_s_groups.append(i.obs["cell_type"].tolist()) train_s_X = np.concatenate(train_s_X) temp = [] for i in train_s_diet: temp = temp + i train_s_diet = temp temp = [] for i in train_s_groups: temp = temp + i train_s_groups = temp train_t_X = [] train_t_diet = [] train_t_groups = [] for i in train_t: train_t_X.append(i.X.A) train_t_diet.append(i.obs["condition"].tolist()) train_t_groups.append(i.obs["cell_type"].tolist()) temp = [] for i in train_t_diet: temp = temp + i train_t_diet = temp temp = [] for i in train_t_groups: temp = temp + i train_t_groups = temp train_t_X = np.concatenate(train_t_X) train_real = np.concatenate([train_s_X, train_t_X]) # concat all train_real = anndata.AnnData(train_real) train_real.obs["condition"] = train_s_diet + train_t_diet train_real.obs["cell_type"] = train_s_groups + train_t_groups return train_real def balancer(adata, cell_type_key="cell_type", condition_key="condition"): """ Makes cell type population equal. # Parameters adata: `~anndata.AnnData` Annotated data matrix. # Returns balanced_data: `~anndata.AnnData` Equal cell type population Annotated data matrix. # Example ```python import scgen import anndata train_data = anndata.read("./train_kang.h5ad") train_ctrl = train_data[train_data.obs["condition"] == "control", :] train_ctrl = balancer(train_ctrl) ``` """ class_names = np.unique(adata.obs[cell_type_key]) class_pop = {} for cls in class_names: class_pop[cls] = adata.copy()[adata.obs[cell_type_key] == cls].shape[0] max_number = np.max(list(class_pop.values())) all_data_x = [] all_data_label = [] all_data_condition = [] for cls in class_names: temp = adata.copy()[adata.obs[cell_type_key] == cls] index = np.random.choice(range(len(temp)), max_number) if sparse.issparse(temp.X): temp_x = temp.X.A[index] else: temp_x = temp.X[index] all_data_x.append(temp_x) temp_ct = np.repeat(cls, max_number) all_data_label.append(temp_ct) temp_cc = np.repeat(np.unique(temp.obs[condition_key]), max_number) all_data_condition.append(temp_cc) balanced_data = anndata.AnnData(np.concatenate(all_data_x)) balanced_data.obs[cell_type_key] = np.concatenate(all_data_label) balanced_data.obs[condition_key] = np.concatenate(all_data_label) class_names = np.unique(balanced_data.obs[cell_type_key]) class_pop = {} for cls in class_names: class_pop[cls] = len(balanced_data[balanced_data.obs[cell_type_key] == cls]) return balanced_data def shuffle_adata(adata): """ Shuffles the `adata`. # Parameters adata: `~anndata.AnnData` Annotated data matrix. labels: numpy nd-array list of encoded labels # Returns adata: `~anndata.AnnData` Shuffled annotated data matrix. labels: numpy nd-array Array of shuffled labels if `labels` is not None. # Example ```python import scgen import anndata import pandas as pd train_data = anndata.read("./data/train.h5ad") train_labels = pd.read_csv("./data/train_labels.csv", header=None) train_data, train_labels = shuffle_data(train_data, train_labels) ``` """ if sparse.issparse(adata.X): adata.X = adata.X.A ind_list = [i for i in range(adata.shape[0])] shuffle(ind_list) new_adata = adata[ind_list, :] return new_adata def batch_removal(network, adata, batch_key="batch", cell_label_key="cell_type"): """ Removes batch effect of adata # Parameters network: `scgen VAE` Variational Auto-encoder class object after training the network. adata: `~anndata.AnnData` Annotated data matrix. adata must have `batch_key` and `cell_label_key` which you pass to the function in its obs. # Returns corrected: `~anndata.AnnData` Annotated matrix of corrected data consisting of all cell types whether they have batch effect or not. # Example ```python import scgen import anndata train = anndata.read("data/pancreas.h5ad") train.obs["cell_type"] = train.obs["celltype"].tolist() network = scgen.VAEArith(x_dimension=train.shape[1], model_path="./models/batch") network.train(train_data=train, n_epochs=20) corrected_adata = scgen.batch_removal(network, train) ``` """ if sparse.issparse(adata.X): latent_all = network.to_latent(adata.X.A) else: latent_all = network.to_latent(adata.X) adata_latent = anndata.AnnData(latent_all) adata_latent.obs = adata.obs.copy(deep=True) unique_cell_types = np.unique(adata_latent.obs[cell_label_key]) shared_ct = [] not_shared_ct = [] for cell_type in unique_cell_types: temp_cell = adata_latent[adata_latent.obs[cell_label_key] == cell_type] if len(np.unique(temp_cell.obs[batch_key])) < 2: cell_type_ann = adata_latent[adata_latent.obs[cell_label_key] == cell_type] not_shared_ct.append(cell_type_ann) continue temp_cell = adata_latent[adata_latent.obs[cell_label_key] == cell_type] batch_list = {} batch_ind = {} max_batch = 0 max_batch_ind = "" batches = np.unique(temp_cell.obs[batch_key]) for i in batches: temp = temp_cell[temp_cell.obs[batch_key] == i] temp_ind = temp_cell.obs[batch_key] == i if max_batch < len(temp): max_batch = len(temp) max_batch_ind = i batch_list[i] = temp batch_ind[i] = temp_ind max_batch_ann = batch_list[max_batch_ind] for study in batch_list: delta = np.average(max_batch_ann.X, axis=0) - np.average(batch_list[study].X, axis=0) batch_list[study].X = delta + batch_list[study].X temp_cell[batch_ind[study]].X = batch_list[study].X shared_ct.append(temp_cell) all_shared_ann = anndata.AnnData.concatenate(*shared_ct, batch_key="concat_batch", index_unique=None) if "concat_batch" in all_shared_ann.obs.columns: del all_shared_ann.obs["concat_batch"] if len(not_shared_ct) < 1: corrected = anndata.AnnData(network.reconstruct(all_shared_ann.X, use_data=True)) corrected.obs = all_shared_ann.obs.copy(deep=True) corrected.var_names = adata.var_names.tolist() corrected = corrected[adata.obs_names] if adata.raw is not None: adata_raw = anndata.AnnData(X=adata.raw.X, var=adata.raw.var) adata_raw.obs_names = adata.obs_names corrected.raw = adata_raw return corrected else: all_not_shared_ann = anndata.AnnData.concatenate(*not_shared_ct, batch_key="concat_batch", index_unique=None) all_corrected_data = anndata.AnnData.concatenate(all_shared_ann, all_not_shared_ann, batch_key="concat_batch", index_unique=None) if "concat_batch" in all_shared_ann.obs.columns: del all_corrected_data.obs["concat_batch"] corrected = anndata.AnnData(network.reconstruct(all_corrected_data.X, use_data=True), ) corrected.obs = pd.concat([all_shared_ann.obs, all_not_shared_ann.obs]) corrected.var_names = adata.var_names.tolist() corrected = corrected[adata.obs_names] if adata.raw is not None: adata_raw = anndata.AnnData(X=adata.raw.X, var=adata.raw.var) adata_raw.obs_names = adata.obs_names corrected.raw = adata_raw return corrected def label_encoder(adata): """ Encode labels of Annotated `adata` matrix using sklearn.preprocessing.LabelEncoder class. Parameters ---------- adata: `~anndata.AnnData` Annotated data matrix. Returns ------- labels: numpy nd-array Array of encoded labels Example -------- >>> import scgen >>> import scanpy as sc >>> train_data = sc.read("./data/train.h5ad") >>> train_labels, label_encoder = label_encoder(train_data) """ le = preprocessing.LabelEncoder() labels = le.fit_transform(adata.obs["condition"].tolist()) return labels.reshape(-1, 1), le def visualize_trained_network_results(network, train, cell_type, conditions={"ctrl": "control", "stim": "stimulated"}, condition_key="condition", cell_type_key="cell_type", path_to_save="./figures/", plot_umap=True, plot_reg=True): plt.close("all") os.makedirs(path_to_save, exist_ok=True) sc.settings.figdir = os.path.abspath(path_to_save) if isinstance(network, scgen.VAEArithKeras): if sparse.issparse(train.X): latent = network.to_latent(train.X.A) else: latent = network.to_latent(train.X) latent = sc.AnnData(X=latent, obs={condition_key: train.obs[condition_key].tolist(), cell_type_key: train.obs[cell_type_key].tolist()}) if plot_umap: sc.pp.neighbors(latent) sc.tl.umap(latent) sc.pl.umap(latent, color=[condition_key, cell_type_key], save=f"_latent", show=False) cell_type_data = train[train.obs[cell_type_key] == cell_type] pred, delta = network.predict(adata=cell_type_data, conditions=conditions, cell_type_key=cell_type_key, condition_key=condition_key, celltype_to_predict=cell_type) pred_adata = anndata.AnnData(pred, obs={condition_key: ["pred"] * len(pred)}, var={"var_names": cell_type_data.var_names}) all_adata = cell_type_data.concatenate(pred_adata) sc.tl.rank_genes_groups(cell_type_data, groupby=condition_key, n_genes=100) diff_genes = cell_type_data.uns["rank_genes_groups"]["names"][conditions["stim"]] if plot_reg: scgen.plotting.reg_mean_plot(all_adata, condition_key=condition_key, axis_keys={"x": "pred", "y": conditions["stim"]}, gene_list=diff_genes[:5], path_to_save=os.path.join(path_to_save, f"reg_mean_all_genes.pdf")) scgen.plotting.reg_var_plot(all_adata, condition_key=condition_key, axis_keys={"x": "pred", "y": conditions["stim"]}, gene_list=diff_genes[:5], path_to_save=os.path.join(path_to_save, f"reg_var_all_genes.pdf")) all_adata_top_100_genes = all_adata.copy()[:, diff_genes.tolist()] scgen.plotting.reg_mean_plot(all_adata_top_100_genes, condition_key=condition_key, axis_keys={"x": "pred", "y": conditions["stim"]}, gene_list=diff_genes[:5], path_to_save=os.path.join(path_to_save, f"reg_mean_top_100_genes.pdf")) scgen.plotting.reg_var_plot(all_adata_top_100_genes, condition_key=condition_key, axis_keys={"x": "pred", "y": conditions["stim"]}, gene_list=diff_genes[:5], path_to_save=os.path.join(path_to_save, f"reg_var_top_100_genes.pdf")) all_adata_top_50_genes = all_adata.copy()[:, diff_genes.tolist()[:50]] scgen.plotting.reg_mean_plot(all_adata_top_50_genes, condition_key=condition_key, axis_keys={"x": "pred", "y": conditions["stim"]}, gene_list=diff_genes[:5], path_to_save=os.path.join(path_to_save, f"reg_mean_top_50_genes.pdf")) scgen.plotting.reg_var_plot(all_adata_top_50_genes, condition_key=condition_key, axis_keys={"x": "pred", "y": conditions["stim"]}, gene_list=diff_genes[:5], path_to_save=os.path.join(path_to_save, f"reg_var_top_50_genes.pdf")) if plot_umap: sc.pp.neighbors(all_adata) sc.tl.umap(all_adata) sc.pl.umap(all_adata, color=condition_key, save="pred_all_genes", show=False) sc.pp.neighbors(all_adata_top_100_genes) sc.tl.umap(all_adata_top_100_genes) sc.pl.umap(all_adata_top_100_genes, color=condition_key, save="pred_top_100_genes", show=False) sc.pp.neighbors(all_adata_top_50_genes) sc.tl.umap(all_adata_top_50_genes) sc.pl.umap(all_adata_top_50_genes, color=condition_key, save="pred_top_50_genes", show=False) sc.pl.violin(all_adata, keys=diff_genes.tolist()[0], groupby=condition_key, save=f"_{diff_genes.tolist()[0]}", show=False) plt.close("all") elif isinstance(network, scgen.VAEArith): if sparse.issparse(train.X): latent = network.to_latent(train.X.A) else: latent = network.to_latent(train.X) latent = sc.AnnData(X=latent, obs={condition_key: train.obs[condition_key].tolist(), cell_type_key: train.obs[cell_type_key].tolist()}) if plot_umap: sc.pp.neighbors(latent) sc.tl.umap(latent) sc.pl.umap(latent, color=[condition_key, cell_type_key], save=f"_latent", show=False) cell_type_data = train[train.obs[cell_type_key] == cell_type] pred, delta = network.predict(adata=cell_type_data, conditions=conditions, cell_type_key=cell_type_key, condition_key=condition_key, celltype_to_predict=cell_type) pred_adata = anndata.AnnData(pred, obs={condition_key: ["pred"] * len(pred)}, var={"var_names": cell_type_data.var_names}) all_adata = cell_type_data.concatenate(pred_adata) sc.tl.rank_genes_groups(cell_type_data, groupby=condition_key, n_genes=100) diff_genes = cell_type_data.uns["rank_genes_groups"]["names"][conditions["stim"]] if plot_reg: scgen.plotting.reg_mean_plot(all_adata, condition_key=condition_key, axis_keys={"x": "pred", "y": conditions["stim"]}, gene_list=diff_genes[:5], path_to_save=os.path.join(path_to_save, f"reg_mean_all_genes.pdf")) scgen.plotting.reg_var_plot(all_adata, condition_key=condition_key, axis_keys={"x": "pred", "y": conditions["stim"]}, gene_list=diff_genes[:5], path_to_save=os.path.join(path_to_save, f"reg_var_all_genes.pdf")) all_adata_top_100_genes = all_adata.copy()[:, diff_genes.tolist()] scgen.plotting.reg_mean_plot(all_adata_top_100_genes, condition_key=condition_key, axis_keys={"x": "pred", "y": conditions["stim"]}, gene_list=diff_genes[:5], path_to_save=os.path.join(path_to_save, f"reg_mean_top_100_genes.pdf")) scgen.plotting.reg_var_plot(all_adata_top_100_genes, condition_key=condition_key, axis_keys={"x": "pred", "y": conditions["stim"]}, gene_list=diff_genes[:5], path_to_save=os.path.join(path_to_save, f"reg_var_top_100_genes.pdf")) all_adata_top_50_genes = all_adata.copy()[:, diff_genes.tolist()[:50]]
# MIT License # # Copyright (c) 2017 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ Contains the RecoverReflectanceShadingLayer. Contains the RecoverReflectanceShadingLayer that recovers RGB reflectance and shading from a (possibly scalar) estimation of one of the two and an input image. See the description of the class. """ from __future__ import absolute_import, division, print_function import sys import os import numpy as np # import timeit from skimage.color import rgb2lab, lab2rgb import cv2 try: import caffe except ImportError: sys.path.insert(0, os.path.join(os.path.expanduser('~'), 'Repositories', 'malabar', 'python')) import caffe # np.finfo(np.float32).eps = 1.1920929e-07 # np.finfo(np.float).eps = 2.2204460492503131e-16 EPS = np.finfo(np.float32).eps # RGB2LIGHTNESS = [0.299, 0.587, 0.114] # not used right now class RecoverReflectanceShadingLayer(caffe.Layer): """ Recover RGB reflectance and shading from estimation and input image. Depending on interpretation, the input can be either an estimation of reflectance, or shading. Further, it can be a one channel estimation, if we limit ourselves to white light, or a three channel estimation. """ def setup(self, bottom, top): """Check that layer is correctly set up by checking input pair.""" if len(bottom) != 2: raise Exception("Expected inputs:\n" "0) Estimation of reflectance or shading,\n" "1) input images.") def print_shapes(): """Print the input shapes.""" print("bottom[0].data.shape:", bottom[0].data.shape, "\nbottom[1].data.shape:", bottom[1].data.shape) # check that inputs match if not (bottom[0].data.ndim == bottom[1].data.ndim == 4): print_shapes() raise Exception("Expecting 4D blobs on both inputs.") if bottom[0].data.shape[0] != bottom[1].data.shape[0]: print_shapes() raise Exception("Batch sizes of the inputs do not match!") if bottom[0].data.shape[2] != bottom[1].data.shape[2]: print_shapes() raise Exception("Heights of the input images do not match!") if bottom[0].data.shape[3] != bottom[1].data.shape[3]: print_shapes() raise Exception("Widths of the input images do not match!") self.eps = EPS # now set up which function to use (how to interpret input) params = self.param_str.split('-') mode = params[0] # print("Setting up RecoverReflectanceShadingLayer.", # "mode is:", mode) possible_modes = ['sAbs', 'S', 'rAbs', 'R', 'RS', 'rRelNorm', 'rRelMean', 'rRelY', 'rRelMax', 'sRelNorm', 'sRelMean', 'sRelY', 'sRelMax', 'CIELAB', 'HLS', 'HSV', ] if mode not in possible_modes: print("given first part (before '-') of param_str '", mode, "' in RecoverReflectanceShadingLayer was not one of", "the expected: " + str(possible_modes) + ".", "rRelNorm is chosen as standard") mode = 'rRelMax' if mode == 'sAbs': self.f = self.interpret_input_as_shading_intensity_absolute if bottom[0].data.shape[1] != 1: raise Exception("The input (interpreted as scalar shading) " "is supposed to have one channel") elif mode == 'sRelNorm': self.f = self.interpret_input_as_shading_intensity_relative if bottom[0].data.shape[1] != 1: raise Exception("The input (interpreted as scalar shading) " "is supposed to have one channel") self.norm = _norm_L2norm elif mode == 'sRelMean': self.f = self.interpret_input_as_reflectance_intensity_relative if bottom[0].data.shape[1] != 1: raise Exception("The input (interpreted as scalar reflectance)" " is supposed to have one channel") self.norm = _norm_Mean elif mode == 'sRelY': self.f = self.interpret_input_as_reflectance_intensity_relative if bottom[0].data.shape[1] != 1: raise Exception("The input (interpreted as scalar reflectance)" " is supposed to have one channel") self.norm = _norm_Lightness elif mode == 'sRelMax': self.f = self.interpret_input_as_reflectance_intensity_relative if bottom[0].data.shape[1] != 1: raise Exception("The input (interpreted as scalar reflectance)" " is supposed to have one channel") self.norm = _norm_Max elif mode == 'S': self.f = self.interpret_input_as_shading_RGB if bottom[0].data.shape[1] != 3: raise Exception("The input (interpreted as RGB shading) " "is supposed to have three channels") elif mode == 'rAbs': self.f = self.interpret_input_as_reflectance_intensity_absolute if bottom[0].data.shape[1] != 1: raise Exception("The input (interpreted as scalar reflectance)" " is supposed to have one channel") elif mode == 'rRelNorm': self.f = self.interpret_input_as_reflectance_intensity_relative if bottom[0].data.shape[1] != 1: raise Exception("The input (interpreted as scalar reflectance)" " is supposed to have one channel") self.norm = _norm_L2norm elif mode == 'rRelMean': self.f = self.interpret_input_as_reflectance_intensity_relative if bottom[0].data.shape[1] != 1: raise Exception("The input (interpreted as scalar reflectance)" " is supposed to have one channel") self.norm = _norm_Mean elif mode == 'rRelY': self.f = self.interpret_input_as_reflectance_intensity_relative if bottom[0].data.shape[1] != 1: raise Exception("The input (interpreted as scalar reflectance)" " is supposed to have one channel") self.norm = _norm_Lightness elif mode == 'rRelMax': self.f = self.interpret_input_as_reflectance_intensity_relative if bottom[0].data.shape[1] != 1: raise Exception("The input (interpreted as scalar reflectance)" " is supposed to have one channel") self.norm = _norm_Max elif mode == 'R': self.f = self.interpret_input_as_reflectance_RGB if bottom[0].data.shape[1] != 3: raise Exception("The input (interpreted as RGB reflectance)" " is supposed to have three channels") elif mode == 'RS': self.f = self.interpret_input_as_concatenation_of_R_and_S if bottom[0].data.shape[1] != 6: raise Exception("The input (interpreted as RGB reflectance " "concatenated with RGB shading) is supposed " "to have six channels") def reshape(self, bottom, top): """Define dimensions of data.""" # output: # 0) reflectance # 1) shading batch_size, channels, height, width = bottom[1].data.shape top[0].reshape(batch_size, channels, height, width) top[1].reshape(batch_size, channels, height, width) def forward(self, bottom, top): """Forward pass of the layer.""" # start = timeit.default_timer() # print("RecoverReflectanceShadingLayer: min, max, mean R_i:", # np.min(R_i), np.max(R_i), np.mean(R_i)) self.f(bottom, top) # stop = timeit.default_timer() # print("RecoverReflectanceShadingLayer: ", # "Time to recover reflectance and shading:", # stop-start, "seconds.") def backward(self, top, propagate_down, bottom): """Backward pass of the layer.""" # print("RecoverReflectanceShadingLayer: " # "Computing backward to R_i (bottom[0]):", propagate_down[0]) # print("RecoverReflectanceShadingLayer: " # "Computing backward to images (bottom[1]):", propagate_down[1]) if propagate_down[0]: diff_reflectance = top[0].diff * self.diff_reflectance diff_shading = top[1].diff * self.diff_shading # print(top[0].diff.shape, top[1].diff.shape, # self.diff_reflectance.shape, self.diff_shading.shape) if bottom[0].data.shape[1] == 6: diff_input = np.concatenate((diff_reflectance, diff_shading), axis=1) elif bottom[0].data.shape[1] == 3: # diff_input = diff_reflectance # diff_input = diff_shading # diff_input = np.sqrt(diff_reflectance**2 + diff_shading**2) diff_input = diff_reflectance + diff_shading elif bottom[0].data.shape[1] == 1: # since every RGB component has same influence, we propagate # their sum diff_input = np.sum(np.concatenate((diff_reflectance, diff_shading), axis=1), axis=1, keepdims=True) # TODO: do we need to compute the correct weighting when using # the proper RGB weighting for Y? else: Exception("Num channels is expected to be one of [1, 3, 6].", bottom[0].data.shape[1]) bottom[0].diff[...] = diff_input if propagate_down[1]: print("RecoverReflectanceShadingLayer: " "can't propagate down to images!") # raise Exception("There should be no diff towards images!") def interpret_input_as_reflectance_intensity_relative(self, bottom, top): """Interpret input as reflectance intensity.""" # use real input # R_i = bottom[0].data # image = bottom[1].data # threshold with eps to avoid division by zero R_i = _threshold(bottom[0].data) # image = np.maximum(bottom[1].data, eps) image = bottom[1].data # probably better to threshold intensity, instead of image, see below # NOTE: diff of R_i should not change when changing intens, since # only dependent on input image, not estimation! # threshold image_intensity = _threshold(self.norm(image)) # recover reflectance and shading by equation (5) in iiw: # first get the factor: Ri/intens factor = 1. / image_intensity # normalize image normalized_image = factor * image # get reflectance reflectance = R_i * normalized_image # now we want intens/Ri as the factor for shading Ri_inv = 1. / R_i factor = image_intensity * Ri_inv # now get shading shading = factor * np.ones_like(image) # assign the output: top[0].data[...] = reflectance top[1].data[...] = shading # save already computed values for backprop self.diff_reflectance = normalized_image self.diff_shading = -Ri_inv * shading def interpret_input_as_reflectance_intensity_absolute(self, bottom, top): """Interpret input as reflectance intensity.""" # threshold with eps to avoid division by zero R_i = _threshold(bottom[0].data) image = bottom[1].data # get reflectance reflectance = R_i * image Ri_inv = 1. / R_i # now get shading shading = Ri_inv * np.ones_like(image) # assign the output: top[0].data[...] = reflectance
import io from PIL import Image import logging import os import dash import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc from dash.dependencies import Input, Output, State from demo_dash import header_v3 as header from database import query_database, query_reid from urllib.parse import parse_qs, urlencode import base64 import sys import pandas as pd from datetime import datetime, timedelta sys.path.append('../') sys.path.append(f'../reid') from inference import reid_inference from utils import to_sqlite _reid_db_path = None _reid = None external_stylesheets = [ dbc.themes.COSMO, 'https://codepen.io/chriddyp/pen/bWLwgP.css', "https://use.fontawesome.com/releases/v5.7.2/css/all.css", ] app = dash.Dash( __name__, title="RE-ID Dash", external_stylesheets=external_stylesheets, suppress_callback_exceptions=True, meta_tags=[{ 'name': 'viewport', 'content': 'width=device-width, initial-scale=1.0' }]) app.layout = dbc.Container( id='app-layout', children=[ dcc.Location(id='url', refresh=False), header.title_block, header.subtitle, html.Hr(), dbc.Container(id='page-content', fluid=True), ], fluid=True, ) @app.callback( Output(component_id='page-content', component_property='children'), Input(component_id='url', component_property='pathname'), Input(component_id='url', component_property='search'), ) def display_page(pathname, search): params = extract_results_search_params(pathname, search) layout_page = [] try: if params is not None and pathname[1:] == 'results': layout_page.append(results_page_content(params)) else: layout_page.append(home_page_content()) except Exception as ex: logging.error(ex) return layout_page # , title SIDEBAR_STYLE = { "position": "static", "top": 0, "left": 0, "bottom": 0, "width": "20rem", "padding": "2rem 1rem", "background-color": "#f8f9fa", 'height': '100%', } def home_page_content(): global _reid_db_path _reid_db_path = None headerColWidth=2 content = dbc.Col( id='home-page', children=[ # Select Database for use dbc.Card(dbc.CardBody([ dbc.Row([ dbc.Col(html.P('Select Database', style={ 'font-weight': 'bold'}), width=headerColWidth), dbc.Col(dcc.Dropdown(id='database-id', options=get_database_options()), width=True), ]), ])), html.Br(), dbc.Card(dbc.CardBody([ dbc.Row([ dbc.Col(html.P('Start Date & Time', style={ 'font-weight': 'bold'}), width=headerColWidth), dbc.Col([ dbc.Row([ dbc.Col(html.P('Date:', style={'font-weight': 'bold'}), width='auto', align='center'), dbc.Col(dcc.DatePickerSingle(id='db-date-start-id',display_format='DD-MM-YYYY'), width='auto'), dbc.Col(html.P('Hour (0 ~ 23):', style={'font-weight': 'bold'}), width='auto', align='center'), dbc.Col(dbc.Input(id='db-time-start-hr-id', type='number', min=0, max=23), width=1), dbc.Col(html.P('Minute (0 ~ 59):', style={'font-weight': 'bold'}), width='auto', align='center'), dbc.Col(dbc.Input(id='db-time-start-min-id', type='number', min=0, max=59), width=1), ]), #dash_datetimepicker.DashDatetimepicker(id='datetime-range-id'), ], width=True ), ]), dbc.Row([ dbc.Col(html.P('End Date & Time', style={ 'font-weight': 'bold'}), width=headerColWidth), dbc.Col([ dbc.Row([ dbc.Col(html.P('Date:', style={'font-weight': 'bold'}), width='auto', align='center'), dbc.Col(dcc.DatePickerSingle(id='db-date-end-id',display_format='DD-MM-YYYY'), width='auto'), dbc.Col(html.P('Hour (0 ~ 23):', style={'font-weight': 'bold'}), width='auto', align='center'), dbc.Col(dbc.Input(id='db-time-end-hr-id', type='number', min=0, max=23), width=1), dbc.Col(html.P('Minute (0 ~ 59):', style={'font-weight': 'bold'}), width='auto', align='center'), dbc.Col(dbc.Input(id='db-time-end-min-id', type='number', min=0, max=59), width=1), ]), ], width=True ), ]), html.Br(), dbc.Row([ dbc.Col(html.P('Select Camera ID', style={ 'font-weight': 'bold'}), width=headerColWidth), dbc.Col(dcc.Dropdown(id='camera-id'), width=True), ]), # Line separator dbc.Row([ dbc.Col(html.Hr(), align='center'), dbc.Col( html.P("or", style={'font-weight': 'bold'}), align='center', width='auto'), dbc.Col(html.Hr(), align='center'), ], align='start', ), dbc.Row([ dbc.Col(html.P('Upload an image', style={ 'font-weight': 'bold'}), width=headerColWidth), dbc.Col( dcc.Upload( id='upload-image', accept='image/*', multiple=True, children=[ dbc.Button('Click to upload', id='upload-image-button', color='primary', block=True, size="lg", style={'word-wrap': 'normal'}) ], ), width='auto', ), ]), html.P('Picture Requirement:', style={'font-size': 'small'}), html.P('• Best with aspect ratio of 1:2 i.e. 128W, 256H', style={'font-size': 'small'}), html.P('• Full body image from head to toe', style={'font-size': 'small'}), ])), html.Br(), # Upload image dbc.Card(dbc.CardBody([ dbc.Row([ dbc.Col([ html.P('Select Human Image', style={'font-weight': 'bold'}), html.P('(Narrow the search by date & time)', style={'font-size': 'small', 'font-style': 'italic'}), ], width=headerColWidth), dbc.Col( id='display-col', children=[ dbc.Spinner(dbc.Row( id='view-db-images', form=True, style={ 'display': 'flex', 'flex-wrap': 'wrap', 'overflow': 'auto', }, #no_gutters=True, #fluid=True, )), ], width=True, align='stretch' ), ]), ])), ], width=True, ) return dbc.Row(children=[ content, ]) def results_page_content(params): if 'database' in params: path_db = params['database'] else: path_db = None if 'image_id' in params: img_id = params['image_id'] else: img_id = None if 'image' in params: img = params['image'] else: img = None if 'image_filename' in params: img_name = params['image_filename'] else: img_name = None sidebar_contents = [] # Show selected image if path_db is not None and os.path.exists(path_db): dbquery = query_database.DbQuery(path_db) minDate, maxDate = dbquery.get_date_range() details_row = [] image=None if img_id is not None: df = dbquery.get_images(img_id=img_id) row = df.iloc[0] encoded_image = base64.b64encode(row.img) image = 'data:image/png;base64,{}'.format(encoded_image.decode()) if row.img_id is not None: details_row.append(dbc.Row( [ html.B('Image ID:', style={'margin-right': '5px'}), html.P(row.img_id), ], #className="card-text", )) if row.timestamp is not None: details_row.append(dbc.Row( [ html.B('Date/Time:', style={'margin-right': '5px'}), html.P(row.timestamp), ], #className="card-text", )) if row.cam_id is not None: details_row.append(dbc.Row( [ html.B('Camera ID:', style={'margin-right': '5px'}), html.P(row.cam_id), ], #className="card-text", )) if "loc" in df.columns and row["loc"] is not None: details_row.append(dbc.Row( [ html.B('Location:', style={'margin-right': '5px'}), html.P(row["loc"]), ], #className="card-text", )) elif img is not None: image = img if img_name is not None: details_row.append(dbc.Row( [ html.B('File Name:', style={'margin-right': '5px'}), html.P(img_name), ], )) if image is not None: sidebar_contents.append( dbc.Card( children=[ dbc.CardImg( id='results-sidebar-image', src=image, style={ 'width': '8vw', 'object-fit': 'contain', }, ), dbc.CardBody(details_row), ], style={ 'padding': '5%', }, ) ) # filter sidebar_contents.append( dbc.Card([ dbc.CardBody([ html.H6('Search Filter', style={ 'font-weight': 'bold', 'color': '#007fcf',}), #html.Br(), dbc.Col([ html.P('Select Start Date & Time', style={'font-weight': 'bold'}), dbc.Col([ dbc.Row([ dbc.Col(html.P('Date:'), width='auto', align='center'), dbc.Col( dcc.DatePickerSingle( id='results-filter-date-start-id', display_format='DD-MM-YYYY', min_date_allowed=minDate.strftime('%Y-%m-%d'), max_date_allowed=maxDate.strftime('%Y-%m-%d')), width=True), ]), dbc.Row([ dbc.Col(html.P('Hour (0 ~ 23):'), width='auto', align='center'), dbc.Col(dbc.Input(id='results-filter-time-start-hr-id', type='number', min=0, max=23), width=True), ]), dbc.Row([ dbc.Col(html.P('Minute (0 ~ 59):'), width='auto', align='center'), dbc.Col(dbc.Input(id='results-filter-time-start-min-id', type='number', min=0, max=59), width=True), ]), ]), #dash_datetimepicker.DashDatetimepicker(id='results-filter-datetime'), ], style={'padding': '1%'}), dbc.Col([ html.P('Select End Date & Time', style={'font-weight': 'bold'}), dbc.Col([ dbc.Row([ dbc.Col(html.P('Date:'), width='auto', align='center'), dbc.Col( dcc.DatePickerSingle( id='results-filter-date-end-id', display_format='DD-MM-YYYY', min_date_allowed=minDate.strftime('%Y-%m-%d'), max_date_allowed=maxDate.strftime('%Y-%m-%d')), width=True), ]), dbc.Row([ dbc.Col(html.P('Hour (0 ~ 23):'), width='auto', align='center'), dbc.Col(dbc.Input(id='results-filter-time-end-hr-id', type='number', min=0, max=23), width=True), ]), dbc.Row([ dbc.Col(html.P('Minute (0 ~ 59):'), width='auto', align='center'), dbc.Col(dbc.Input(id='results-filter-time-end-min-id', type='number', min=0, max=59), width=True), ]), ]), #dash_datetimepicker.DashDatetimepicker(id='results-filter-datetime'), ], style={'padding': '1%'}), dbc.Col([ html.P('Camera ID', style={'font-weight': 'bold'}), dcc.Dropdown(id='results-filter-cam-id', options=dbquery.get_cam_id_options()), ], style={'padding': '1%'}), dbc.Col([ html.P(children='Threshold (Default is 0.60)', style={'font-weight': 'bold'}), dbc.Input(id='results-filter-threshold',type='number', step=0.01, value=0.6), ], style={'padding': '1%'}), html.Br(), dbc.Button(children="Filter", id='results-filter-button', color="primary", block=True, size='lg'), ]), ]) ) return dbc.Row(children=[ dbc.Col( id='results-page-sidebar', children=sidebar_contents, width=3, style=SIDEBAR_STYLE, ), dbc.Col(dbc.Spinner( id='display-results-col', #width=True, ),width=True,), ]) @app.callback( Output(component_id='camera-id', component_property='options'), Input(component_id='database-id', component_property='value'), ) def update_camera_ids(path_db): if path_db is not None: dbquery = query_database.DbQuery(path_db) return dbquery.get_cam_id_options() else: return [] @app.callback( Output(component_id='upload-image-button', component_property='disabled'), Input(component_id='database-id', component_property='value'), ) def update_camera_ids(path_db): return path_db is None @app.callback( Output(component_id='db-date-start-id', component_property='min_date_allowed'), Output(component_id='db-date-end-id', component_property='max_date_allowed'), Input(component_id='database-id', component_property='value'), ) def update_db_start_date_min_end_date_max(path_db): if path_db is not None: dbquery = query_database.DbQuery(path_db) minDate, maxDate = dbquery.get_date_range() return minDate.strftime('%Y-%m-%d'), maxDate.strftime('%Y-%m-%d') else: return None, None @app.callback( Output(component_id='db-date-start-id', component_property='max_date_allowed'), Input(component_id='db-date-end-id', component_property='date'), Input(component_id='db-date-end-id', component_property='max_date_allowed'), ) def update_db_start_date_max(end_date, end_max_date): if end_date is not None: return end_date else: return end_max_date @app.callback( Output(component_id='db-date-end-id', component_property='min_date_allowed'), Input(component_id='db-date-start-id', component_property='date'), Input(component_id='db-date-start-id', component_property='min_date_allowed'), ) def update_db_end_date_min(start_date, start_min_date): if start_date is not None: return start_date else: return start_min_date @app.callback( Output(component_id='results-filter-date-start-id',component_property='max_date_allowed'), Input(component_id='results-filter-date-end-id', component_property='date'), Input(component_id='results-filter-date-end-id', component_property='max_date_allowed'), ) def update_results_start_date_max(end_date, end_max_date): if end_date is not None: return end_date else: return end_max_date @app.callback( Output(component_id='results-filter-date-end-id',component_property='min_date_allowed'), Input(component_id='results-filter-date-start-id', component_property='date'), Input(component_id='results-filter-date-start-id',component_property='min_date_allowed'), ) def update_results_end_date_min(start_date, start_min_date): if start_date is not None: return start_date else: return start_min_date @app.callback( Output(component_id='view-db-images', component_property='children'), Input(component_id='database-id', component_property='value'), Input(component_id='db-date-start-id', component_property='date'), Input(component_id='db-time-start-hr-id', component_property='value'), Input(component_id='db-time-start-min-id', component_property='value'), Input(component_id='db-date-end-id', component_property='date'), Input(component_id='db-time-end-hr-id', component_property='value'), Input(component_id='db-time-end-min-id', component_property='value'), Input(component_id='camera-id', component_property='value'), Input(component_id='upload-image', component_property='contents'), State(component_id='upload-image', component_property='filename'), ) def show_database_images(path_db, start_date, start_hour, start_minute, end_date, end_hour, end_minute, cam_id, upload_img, upload_filename): dict_trig = get_callback_trigger() if 'upload-image' in dict_trig: images_col = [] for img, filename in zip(upload_img, upload_filename): tooltip_msg = f"File name: {filename}" images_col.append( dbc.Card([ dbc.CardLink( dbc.CardImg( src=img, title=tooltip_msg.strip(), style={ 'width': '8vw', 'object-fit': 'contain' }, ), key=filename, # f'{urlResults}?{urlencode(url_dict)}' href=get_results_href( path_db, img=img, img_filename=filename) ), ]) ) return images_col elif path_db is not None and cam_id is not None: dbimage = query_database.DbQuery(path_db) start_datetime = compile_start_datetime(start_date, start_hour, start_minute) end_datetime = compile_end_datetime(end_date, end_hour, end_minute) df_images = dbimage.get_images( cam_id=cam_id, start_datetime=start_datetime, end_datetime=end_datetime) images_col = [] for _, row in df_images.iterrows(): encoded_image = base64.b64encode(row.img) components = [ #html.P(f'Camera {row.cam_id}', style={'text-overflow': 'ellipsis', 'width': '8vw', 'margin': '0'}) ] timestamp = row.timestamp if timestamp is not None: if type(timestamp) == str: timestamp = datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S') components.extend([ html.P(timestamp.date(), style={'text-overflow': 'ellipsis', 'width': '8vw', 'margin': '0'}), html.P(timestamp.strftime("%X"), style={'text-overflow': 'ellipsis', 'width': '8vw', 'margin': '0'}), ]) components.append( html.Img( src='data:image/png;base64,{}'.format(encoded_image.decode()), title=row.img_id, style={ 'width': '8vw', 'object-fit': 'contain' } )) tooltip_msg = "" if (row.img_id is not None): tooltip_msg += f"Image ID: {row.img_id}\r\n" if (timestamp is not None): tooltip_msg += f"Datetime: {timestamp}\r\n" if (row.cam_id is not None): tooltip_msg += f"Camera ID: {row.cam_id}\r\n" if "loc" in df_images.columns: tooltip_msg += f'Location: {row["loc"]}\r\n' images_col.append( dbc.Card([ dbc.CardLink( dbc.CardImg( src='data:image/png;base64,{}'.format(encoded_image.decode()), title=tooltip_msg.strip(), style={ 'width': '8vw', 'object-fit': 'contain' }, ), key=row.img_id, href=get_results_href(path_db, img_id=row.img_id)#f'{urlResults}?{urlencode(url_dict)}' ), ]) ) return images_col else: return None @app.callback( Output(component_id='results-filter-button', component_property='href'), Input(component_id='results-filter-date-start-id', component_property='date'), Input(component_id='results-filter-time-start-hr-id', component_property='value'), Input(component_id='results-filter-time-start-min-id', component_property='value'), Input(component_id='results-filter-date-end-id', component_property='date'), Input(component_id='results-filter-time-end-hr-id', component_property='value'), Input(component_id='results-filter-time-end-min-id', component_property='value'), Input(component_id='results-filter-cam-id', component_property='value'), Input(component_id='results-filter-threshold', component_property='value'), State(component_id='url', component_property='pathname'), State(component_id='url', component_property='search'), ) def update_filter_link(start_date, start_hour, start_minute, end_date, end_hour, end_minute, filter_cam_id, filter_threshold, pathname, search): path_db, img_id, img, img_filename, start_datetime, end_datetime, cam_id, threshold = decode_results_search_params(pathname, search) #dict_trig = get_callback_trigger() # date=None # hour=None # minute=None # if start_datetime is not None: # date = start_datetime.date() # hour = start_datetime.hour # minute = start_datetime.minute # if 'results-filter-date-start-id' in dict_trig: # date = start_date # if 'results-filter-time-start-hr-id' in dict_trig: # hour = start_hour # if 'results-filter-time-start-min-id' in dict_trig: # minute = start_minute # start_datetime = compile_datetime(date, hour, minute) start_datetime = compile_datetime(start_date, start_hour, start_minute) # date = None # hour = None # minute = None # if end_datetime is not None: # date = end_datetime.date() # hour = end_datetime.hour # minute = end_datetime.minute # if 'results-filter-date-end-id' in dict_trig: # date = end_date #