hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
11fd108bc639c00e56343449b15b63cee71ed98f | 27,541 | py | Python | talent/google/cloud/talent_v4beta1/gapic/company_service_client.py | DaveCheez/google-cloud-python | fc03d4d41f13e9d13db7206438163b3a471fdabd | [
"Apache-2.0"
] | null | null | null | talent/google/cloud/talent_v4beta1/gapic/company_service_client.py | DaveCheez/google-cloud-python | fc03d4d41f13e9d13db7206438163b3a471fdabd | [
"Apache-2.0"
] | null | null | null | talent/google/cloud/talent_v4beta1/gapic/company_service_client.py | DaveCheez/google-cloud-python | fc03d4d41f13e9d13db7206438163b3a471fdabd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.talent.v4beta1 CompanyService API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.cloud.talent_v4beta1.gapic import company_service_client_config
from google.cloud.talent_v4beta1.gapic import enums
from google.cloud.talent_v4beta1.gapic.transports import company_service_grpc_transport
from google.cloud.talent_v4beta1.proto import application_pb2
from google.cloud.talent_v4beta1.proto import application_service_pb2
from google.cloud.talent_v4beta1.proto import application_service_pb2_grpc
from google.cloud.talent_v4beta1.proto import company_pb2
from google.cloud.talent_v4beta1.proto import company_service_pb2
from google.cloud.talent_v4beta1.proto import company_service_pb2_grpc
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-talent").version
class CompanyServiceClient(object):
"""A service that handles company management, including CRUD and enumeration."""
SERVICE_ADDRESS = "jobs.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.talent.v4beta1.CompanyService"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CompanyServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def company_path(cls, project, tenant, company):
"""Return a fully-qualified company string."""
return google.api_core.path_template.expand(
"projects/{project}/tenants/{tenant}/companies/{company}",
project=project,
tenant=tenant,
company=company,
)
@classmethod
def company_without_tenant_path(cls, project, company):
"""Return a fully-qualified company_without_tenant string."""
return google.api_core.path_template.expand(
"projects/{project}/companies/{company}", project=project, company=company
)
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
"projects/{project}", project=project
)
@classmethod
def tenant_path(cls, project, tenant):
"""Return a fully-qualified tenant string."""
return google.api_core.path_template.expand(
"projects/{project}/tenants/{tenant}", project=project, tenant=tenant
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.CompanyServiceGrpcTransport,
Callable[[~.Credentials, type], ~.CompanyServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = company_service_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=company_service_grpc_transport.CompanyServiceGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = company_service_grpc_transport.CompanyServiceGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def create_company(
self,
parent,
company,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a new company entity.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.CompanyServiceClient()
>>>
>>> parent = client.tenant_path('[PROJECT]', '[TENANT]')
>>>
>>> # TODO: Initialize `company`:
>>> company = {}
>>>
>>> response = client.create_company(parent, company)
Args:
parent (str): Required. Resource name of the tenant under which the company is
created.
The format is "projects/{project\_id}/tenants/{tenant\_id}", for
example, "projects/api-test-project/tenant/foo".
Tenant id is optional and a default tenant is created if unspecified,
for example, "projects/api-test-project".
company (Union[dict, ~google.cloud.talent_v4beta1.types.Company]): Required. The company to be created.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.Company`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Company` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_company" not in self._inner_api_calls:
self._inner_api_calls[
"create_company"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_company,
default_retry=self._method_configs["CreateCompany"].retry,
default_timeout=self._method_configs["CreateCompany"].timeout,
client_info=self._client_info,
)
request = company_service_pb2.CreateCompanyRequest(
parent=parent, company=company
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_company"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_company(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Retrieves specified company.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.CompanyServiceClient()
>>>
>>> name = client.company_path('[PROJECT]', '[TENANT]', '[COMPANY]')
>>>
>>> response = client.get_company(name)
Args:
name (str): Required. The resource name of the company to be retrieved.
The format is
"projects/{project\_id}/tenants/{tenant\_id}/companies/{company\_id}",
for example, "projects/api-test-project/tenants/foo/companies/bar".
Tenant id is optional and the default tenant is used if unspecified, for
example, "projects/api-test-project/companies/bar".
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Company` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_company" not in self._inner_api_calls:
self._inner_api_calls[
"get_company"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_company,
default_retry=self._method_configs["GetCompany"].retry,
default_timeout=self._method_configs["GetCompany"].timeout,
client_info=self._client_info,
)
request = company_service_pb2.GetCompanyRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_company"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def update_company(
self,
company,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates specified company.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.CompanyServiceClient()
>>>
>>> # TODO: Initialize `company`:
>>> company = {}
>>>
>>> response = client.update_company(company)
Args:
company (Union[dict, ~google.cloud.talent_v4beta1.types.Company]): Required. The company resource to replace the current resource in the
system.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.Company`
update_mask (Union[dict, ~google.cloud.talent_v4beta1.types.FieldMask]): Optional but strongly recommended for the best service experience.
If ``update_mask`` is provided, only the specified fields in ``company``
are updated. Otherwise all the fields are updated.
A field mask to specify the company fields to be updated. Only top level
fields of ``Company`` are supported.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Company` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_company" not in self._inner_api_calls:
self._inner_api_calls[
"update_company"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_company,
default_retry=self._method_configs["UpdateCompany"].retry,
default_timeout=self._method_configs["UpdateCompany"].timeout,
client_info=self._client_info,
)
request = company_service_pb2.UpdateCompanyRequest(
company=company, update_mask=update_mask
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("company.name", company.name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["update_company"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def delete_company(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes specified company.
Prerequisite: The company has no jobs associated with it.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.CompanyServiceClient()
>>>
>>> name = client.company_path('[PROJECT]', '[TENANT]', '[COMPANY]')
>>>
>>> client.delete_company(name)
Args:
name (str): Required. The resource name of the company to be deleted.
The format is
"projects/{project\_id}/tenants/{tenant\_id}/companies/{company\_id}",
for example, "projects/api-test-project/tenants/foo/companies/bar".
Tenant id is optional and the default tenant is used if unspecified, for
example, "projects/api-test-project/companies/bar".
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_company" not in self._inner_api_calls:
self._inner_api_calls[
"delete_company"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_company,
default_retry=self._method_configs["DeleteCompany"].retry,
default_timeout=self._method_configs["DeleteCompany"].timeout,
client_info=self._client_info,
)
request = company_service_pb2.DeleteCompanyRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls["delete_company"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_companies(
self,
parent,
page_size=None,
require_open_jobs=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists all companies associated with the project.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.CompanyServiceClient()
>>>
>>> parent = client.tenant_path('[PROJECT]', '[TENANT]')
>>>
>>> # Iterate over all results
>>> for element in client.list_companies(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_companies(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required. Resource name of the tenant under which the company is
created.
The format is "projects/{project\_id}/tenants/{tenant\_id}", for
example, "projects/api-test-project/tenant/foo".
Tenant id is optional and the default tenant is used if unspecified, for
example, "projects/api-test-project".
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
require_open_jobs (bool): Optional. Set to true if the companies requested must have open jobs.
Defaults to false.
If true, at most ``page_size`` of companies are fetched, among which
only those with open jobs are returned.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.cloud.talent_v4beta1.types.Company` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_companies" not in self._inner_api_calls:
self._inner_api_calls[
"list_companies"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_companies,
default_retry=self._method_configs["ListCompanies"].retry,
default_timeout=self._method_configs["ListCompanies"].timeout,
client_info=self._client_info,
)
request = company_service_pb2.ListCompaniesRequest(
parent=parent, page_size=page_size, require_open_jobs=require_open_jobs
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_companies"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="companies",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
| 41.53997 | 151 | 0.612033 |
6c14cccfb30e297b0d006cf699609b330d4f4f0d | 6,118 | py | Python | esrally/log.py | alexsapran/rally | 6c4164bcc681b267feac969c100809964aac9c8f | [
"Apache-2.0"
] | 2 | 2019-04-11T10:45:11.000Z | 2019-06-05T15:24:05.000Z | esrally/log.py | alexsapran/rally | 6c4164bcc681b267feac969c100809964aac9c8f | [
"Apache-2.0"
] | null | null | null | esrally/log.py | alexsapran/rally | 6c4164bcc681b267feac969c100809964aac9c8f | [
"Apache-2.0"
] | null | null | null | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import logging.config
import json
import time
import os
import hashlib
from esrally.utils import io
def configure_utc_formatter(*args, **kwargs):
"""
Logging formatter that renders timestamps in UTC to ensure consistent
timestamps across all deployments regardless of machine settings.
"""
formatter = logging.Formatter(fmt=kwargs["format"], datefmt=kwargs["datefmt"])
formatter.converter = time.gmtime
return formatter
def log_config_path():
"""
:return: The absolute path to Rally's log configuration file.
"""
return os.path.join(os.path.expanduser("~"), ".rally", "logging.json")
def default_log_path():
"""
:return: The absolute path to the directory that contains Rally's log file.
"""
return os.path.join(os.path.expanduser("~"), ".rally", "logs")
def remove_obsolete_default_log_config():
"""
Log rotation is problematic because Rally uses multiple processes and there is a lurking race condition when
rolling log files. Hence, we do not rotate logs from within Rally and leverage established tools like logrotate for that.
Checks whether the user has a problematic out-of-the-box logging configuration delivered with Rally 1.0.0 which
used log rotation and removes it so it can be replaced by a new one in a later step.
"""
log_config = log_config_path()
if io.exists(log_config):
source_path = io.normalize_path(os.path.join(os.path.dirname(__file__), "resources", "logging_1_0_0.json"))
with open(source_path, "r", encoding="UTF-8") as src:
contents = src.read().replace("${LOG_PATH}", default_log_path())
source_hash = hashlib.sha512(contents.encode()).hexdigest()
with open(log_config, "r", encoding="UTF-8") as target:
target_hash = hashlib.sha512(target.read().encode()).hexdigest()
if source_hash == target_hash:
os.rename(log_config, "{}.bak".format(log_config))
def install_default_log_config():
"""
Ensures a log configuration file is present on this machine. The default
log configuration is based on the template in resources/logging.json.
It also ensures that the default log path has been created so log files
can be successfully opened in that directory.
"""
log_config = log_config_path()
if not io.exists(log_config):
io.ensure_dir(io.dirname(log_config))
source_path = io.normalize_path(os.path.join(os.path.dirname(__file__), "resources", "logging.json"))
with open(log_config, "w", encoding="UTF-8") as target:
with open(source_path, "r", encoding="UTF-8") as src:
contents = src.read().replace("${LOG_PATH}", default_log_path())
target.write(contents)
io.ensure_dir(default_log_path())
def load_configuration():
"""
Loads the logging configuration. This is a low-level method and usually
`configure_logging()` should be used instead.
:return: The logging configuration as `dict` instance.
"""
with open(log_config_path()) as f:
return json.load(f)
def post_configure_actor_logging():
"""
Reconfigures all loggers in actor processes.
See https://groups.google.com/forum/#!topic/thespianpy/FntU9umtvhc for the rationale.
"""
# see configure_logging()
logging.captureWarnings(True)
# at this point we can assume that a log configuration exists. It has been created already during startup.
logger_configuration = load_configuration()
if "root" in logger_configuration and "level" in logger_configuration["root"]:
root_logger = logging.getLogger()
root_logger.setLevel(logger_configuration["root"]["level"])
if "loggers" in logger_configuration:
for lgr, cfg in load_configuration()["loggers"].items():
if "level" in cfg:
logging.getLogger(lgr).setLevel(cfg["level"])
def configure_logging():
"""
Configures logging for the current process.
"""
logging.config.dictConfig(load_configuration())
# Avoid failures such as the following (shortened a bit):
#
# ---------------------------------------------------------------------------------------------
# "esrally/driver/driver.py", line 220, in create_client
# "thespian-3.8.0-py3.5.egg/thespian/actors.py", line 187, in createActor
# [...]
# "thespian-3.8.0-py3.5.egg/thespian/system/multiprocCommon.py", line 348, in _startChildActor
# "python3.5/multiprocessing/process.py", line 105, in start
# "python3.5/multiprocessing/context.py", line 267, in _Popen
# "python3.5/multiprocessing/popen_fork.py", line 18, in __init__
# sys.stderr.flush()
#
# OSError: [Errno 5] Input/output error
# ---------------------------------------------------------------------------------------------
#
# This is caused by urllib3 wanting to send warnings about insecure SSL connections to stderr when we disable them (in client.py) with:
#
# urllib3.disable_warnings()
#
# The filtering functionality of the warnings module causes the error above on some systems. If we instead redirect the warning output
# to our logs instead of stderr (which is the warnings module's default), we can disable warnings safely.
logging.captureWarnings(True)
| 40.25 | 139 | 0.680615 |
a3725bdc3f12580b8310b1115e34abf0f1e083ca | 674 | py | Python | students/K33401/Klishin_Nikita/Lr_3/peer_review_system/manage.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
] | 4 | 2020-09-03T15:41:42.000Z | 2021-12-24T15:28:20.000Z | students/K33401/Klishin_Nikita/Lr_3/peer_review_system/manage.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
] | 48 | 2020-09-13T20:22:42.000Z | 2021-04-30T11:13:30.000Z | students/K33401/Klishin_Nikita/Lr_3/peer_review_system/manage.py | ShubhamKunal/ITMO_ICT_WebDevelopment_2020-2021 | bb91c91a56d21cec2b12ae4cc722eaa652a88420 | [
"MIT"
] | 69 | 2020-09-06T10:32:37.000Z | 2021-11-28T18:13:17.000Z | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'peer_review_system.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.304348 | 82 | 0.683976 |
44523a50564b4376c24be98aad1f8b2090959c7f | 7,638 | py | Python | chrome/common/extensions/docs/server2/api_list_data_source_test.py | shaochangbin/chromium-crosswalk | 634d34e4cf82b4f7400357c53ec12efaffe94add | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2019-01-16T03:57:28.000Z | 2021-01-23T15:29:45.000Z | chrome/common/extensions/docs/server2/api_list_data_source_test.py | shaochangbin/chromium-crosswalk | 634d34e4cf82b4f7400357c53ec12efaffe94add | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | chrome/common/extensions/docs/server2/api_list_data_source_test.py | shaochangbin/chromium-crosswalk | 634d34e4cf82b4f7400357c53ec12efaffe94add | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2017-03-15T13:21:38.000Z | 2017-03-15T13:21:38.000Z | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import json
from api_list_data_source import APIListDataSource
from extensions_paths import CHROME_EXTENSIONS
from server_instance import ServerInstance
from test_file_system import TestFileSystem
def _ToTestData(obj):
'''Transforms |obj| into test data by turning a list of files into an object
mapping that file to its contents (derived from its name).
'''
return dict((name, name) for name in obj)
def _ToTestFeatures(names):
'''Transforms a list of strings into a minimal JSON features object.
'''
def platforms_to_extension_types(platforms):
return ['platform_app' if platform == 'apps' else 'extension'
for platform in platforms]
features = dict((name, {
'name': name,
'extension_types': platforms_to_extension_types(platforms),
}) for name, platforms in names)
features['sockets.udp']['channel'] = 'dev'
return features
def _ToTestApiData(names):
api_data = dict((name, [{'namespace': name, 'description': description}])
for name, description in names)
return api_data
def _ToTestApiSchema(names, apis):
for name, json_file in names:
apis['api'][json_file] = json.dumps(_TEST_API_DATA[name])
return apis
_TEST_API_FEATURES = _ToTestFeatures([
('alarms', ['apps', 'extensions']),
('app.window', ['apps']),
('browserAction', ['extensions']),
('experimental.bluetooth', ['apps']),
('experimental.history', ['extensions'],),
('experimental.power', ['apps', 'extensions']),
('infobars', ['extensions']),
('something_internal', ['apps']),
('something_else_internal', ['extensions']),
('storage', ['apps', 'extensions']),
('sockets.udp', ['apps', 'extensions'])
])
_TEST_API_DATA = _ToTestApiData([
('alarms', u'<code>alarms</code>'),
('app.window', u'<code>app.window</code>'),
('browserAction', u'<code>browserAction</code>'),
('experimental.bluetooth', u'<code>experimental.bluetooth</code>'),
('experimental.history', u'<code>experimental.history</code>'),
('experimental.power', u'<code>experimental.power</code>'),
('infobars', u'<code>infobars</code>'),
('something_internal', u'<code>something_internal</code>'),
('something_else_internal', u'<code>something_else_internal</code>'),
('storage', u'<code>storage</code>'),
('sockets.udp', u'<code>sockets.udp</code>')
])
_TEST_API_SCHEMA = [
('alarms', 'alarms.json'),
('app.window', 'app_window.json'),
('browserAction', 'browser_action.json'),
('experimental.bluetooth', 'experimental_bluetooth.json'),
('experimental.history', 'experimental_history.json'),
('experimental.power', 'experimental_power.json'),
('infobars', 'infobars.json'),
('something_internal', 'something_internal.json'),
('something_else_internal', 'something_else_internal.json'),
('storage', 'storage.json'),
('sockets.udp', 'sockets_udp.json')
]
_TEST_DATA = _ToTestApiSchema(_TEST_API_SCHEMA, {
'api': {
'_api_features.json': json.dumps(_TEST_API_FEATURES),
'_manifest_features.json': '{}',
'_permission_features.json': '{}',
},
'docs': {
'templates': {
'json': {
'api_availabilities.json': '{}',
'manifest.json': '{}',
'permissions.json': '{}',
},
'public': {
'apps': _ToTestData([
'alarms.html',
'app_window.html',
'experimental_bluetooth.html',
'experimental_power.html',
'storage.html',
'sockets_udp.html'
]),
'extensions': _ToTestData([
'alarms.html',
'browserAction.html',
'experimental_history.html',
'experimental_power.html',
'infobars.html',
'storage.html',
'sockets_udp.html'
]),
},
},
},
})
class APIListDataSourceTest(unittest.TestCase):
def setUp(self):
server_instance = ServerInstance.ForTest(
TestFileSystem(_TEST_DATA, relative_to=CHROME_EXTENSIONS))
# APIListDataSource takes a request but doesn't use it,
# so put None
self._api_list = APIListDataSource(server_instance, None)
self.maxDiff = None
def testApps(self):
self.assertEqual({
'stable': [
{
'name': 'alarms',
'platforms': ['apps', 'extensions'],
'version': 5,
'description': u'<code>alarms</code>'
},
{
'name': 'app.window',
'platforms': ['apps'],
# Availability logic will look for a camelCase format filename
# (i.e. 'app.window.html') at version 20 and below, but the
# unix_name format above won't be found at these versions.
'version': 21,
'description': u'<code>app.window</code>'
},
{
'name': 'storage',
'platforms': ['apps', 'extensions'],
'last': True,
'version': 5,
'description': u'<code>storage</code>'
}],
'dev': [
{
'name': 'sockets.udp',
'platforms': ['apps', 'extensions'],
'last': True,
'description': u'<code>sockets.udp</code>'
}],
'beta': [],
'trunk': []
}, self._api_list.get('apps').get('chrome'))
def testExperimentalApps(self):
self.assertEqual([
{
'name': 'experimental.bluetooth',
'platforms': ['apps'],
'description': u'<code>experimental.bluetooth</code>'
},
{
'name': 'experimental.power',
'platforms': ['apps', 'extensions'],
'last': True,
'description': u'<code>experimental.power</code>'
}], self._api_list.get('apps').get('experimental'))
def testExtensions(self):
self.assertEqual({
'stable': [
{
'name': 'alarms',
'platforms': ['apps', 'extensions'],
'version': 5,
'description': u'<code>alarms</code>'
},
{
'name': 'browserAction',
'platforms': ['extensions'],
# See comment above for 'app.window'.
'version': 21,
'description': u'<code>browserAction</code>'
},
{
'name': 'infobars',
'platforms': ['extensions'],
'version': 5,
'description': u'<code>infobars</code>'
},
{
'name': 'storage',
'platforms': ['apps', 'extensions'],
'last': True,
'version': 5,
'description': u'<code>storage</code>'
}],
'dev': [
{
'name': 'sockets.udp',
'platforms': ['apps', 'extensions'],
'last': True,
'description': u'<code>sockets.udp</code>'
}],
'beta': [],
'trunk': []
}, self._api_list.get('extensions').get('chrome'))
def testExperimentalExtensions(self):
self.assertEqual([
{
'name': 'experimental.history',
'platforms': ['extensions'],
'description': u'<code>experimental.history</code>'
},
{
'name': 'experimental.power',
'platforms': ['apps', 'extensions'],
'description': u'<code>experimental.power</code>',
'last': True
}], self._api_list.get('extensions').get('experimental'))
if __name__ == '__main__':
unittest.main()
| 31.17551 | 80 | 0.567819 |
641aad101b3666e6b27e675bc56de8904532fbbb | 7,134 | py | Python | contexto/utils/stanza_funcs.py | sergiomora03/ConTexto | 6e67f13222c9b6310b4c3d112bd5c7fead7fe100 | [
"X11"
] | null | null | null | contexto/utils/stanza_funcs.py | sergiomora03/ConTexto | 6e67f13222c9b6310b4c3d112bd5c7fead7fe100 | [
"X11"
] | null | null | null | contexto/utils/stanza_funcs.py | sergiomora03/ConTexto | 6e67f13222c9b6310b4c3d112bd5c7fead7fe100 | [
"X11"
] | null | null | null | import os
import stanza
import torch
def stanza_pipeline(lenguaje, procesadores='tokenize, pos, lemma', modelo_lemas='',
modelo_ner='', modelo_pos=''):
"""
Carga y retorna un pipeline, o flujo de trabajo, de Stanza del y lenguaje y con los procesos \
especificados por el usuario. Los procesos que el usuario puede elegir añadir al pipeline incluyen \
tokenización, *Part of Speech* (POS), lematización y *Named Entity Recognition* (NER), entre otros. \
Para mayor información sobre estos modelos y los pipelines se puede consultar la página web \
de Stanza (https://stanfordnlp.github.io/stanza/pipeline.html#processors).
:param lenguaje: (str). Lenguaje para el que se desean cargar los modelos de Stanza. Stanza tiene modelos \
disponibles para varios lenguajes, dependiendo de la función a realizar. Para mayor información, visitar \
https://stanfordnlp.github.io/stanza/available_models.html
:param procesadores: (str). Valor por defecto: 'tokenize, pos, lemma'. Lista de procesadores, también \
entendidos como procesos o tareas que se desean aplicar a un texto de entrada, que se desean incluir \
en el pipeline. Se ingresa un string en el que los diferentes procesadores van separados por comas.
:param modelo_lemas: (str). Valor por defecto: ''. Unicación de un archivo que contenga el modelo o procesador \
que el usuario desea utilizar para aplicar lematización a los textos. Si este parámetro se deja vacío, se \
utilizará el procesador disponible de la librería Stanza para el lenguaje especificado.
:param modelo_ner: (str). Valor por defecto: ''. Unicación de un archivo que contenga el modelo o procesador \
que el usuario desea utilizar para aplicar *Named Entity Recognition* a los textos. Si este parámetro se deja \
vacío, se utilizará el procesador disponible de la librería Stanza para el lenguaje especificado.
:param modelo_pos: (str). Valor por defecto: ''. Unicación de un archivo que contenga el modelo o procesador \
que el usuario desea utilizar para aplicar *Part of Speech* a los textos. Si este parámetro se deja vacío, se \
utilizará el procesador disponible de la librería Stanza para el lenguaje especificado.
:return: (stanza Pipeline). Pipeline de Stanza, del lenguaje especificado, con los procesadores determinados por \
el usuario. Si los modelos requeridos no están disponibles en el computador del usuario, la función descargará \
los modelos correspondientes, lo cual puede tardar algunos minutos dependiendo del tamaño de los modelos y la \
velocidad de conexión a internet del usuario.
"""
# Configuración básica del pipeline
config = {
'processors': procesadores,
'lang': lenguaje,
}
# Si se añade algún modelo custom, se agrega al diccionario
if modelo_pos != '':
config['pos_model_path'] = modelo_pos
if modelo_lemas != '':
config['lemma_model_path'] = modelo_lemas
if modelo_ner != '':
config['ner_model_path'] = modelo_ner
# Intentar crear pipeline. Si el modelo no está descargado, se descarga
# primero
try:
nlp_pipe = stanza.Pipeline(**config, verbose=0, )
except BaseException:
print('[INFO] Descargando modelo. Este proceso puede tardar varios minutos.\n')
stanza.download(lenguaje)
nlp_pipe = stanza.Pipeline(**config, verbose=0)
# Retornar pipeline
return nlp_pipe
def modificar_modelo(nlp_pipe, tipo, nuevo_diccionario, archivo_entrada='',
archivo_salida='', gpu=False):
"""
A partir de un diccionario de entrada, modifica un procesador de un pipeline existente de Stanza.
:param nlp_pipe: (stanza Pipeline). Pipeline de Stanza que contiene el procesador que se desea modificar.
:param tipo: (str) ['lemma', 'pos', 'tokenize']. Tipo de procesador o modelo que se desea modificar.
:param nuevo_diccionario: (dict). Diccionario de Python que contiene los elementos para añadir o modificar \
en el modelo que se desea cambiar. Las llaves y los valores de este diccionario pueden tener distinta \
información, dependiendo del tipo de modelo que se va a modificar. Por ejemplo, para modificar el \
modelo de lematización (tipo='lemma'), las llaves del diccionario serán palabras y los valores del \
diccionario serán los lemas.
:param archivo_entrada: (str). Valor por defecto: ''. Ubicación del archivo que contiene el modelo o \
procesador que se desea modificar. Si de deja vacío este parámetro, la modificación se hará sobre \
el modelo que trae por defecto el pipeline de Stanza.
:param archivo_salida: (str). Valor por defecto: ''. Ubicación y nombre del archivo en donde se desea \
guardar el modelo o procesador modificado. Si este valor se deja vacío, la función retornará el \
pipeline con el modelo modificado, pero el modelo no será guardado.
:param gpu: (bool). Valor por defecto: False. Parámetro opcional que permite al usuario especificar si \
va a utilizar una GPU para cargar y modificar los objetos (modelos) de PyTorch.
:return: (stanza Pipeline). Pipeline de entrada, después de modificar el procesador o modelo determinado \
por el usuario.
"""
# Definir ubicación del modelo
tipo = tipo.lower()
if archivo_entrada == '':
procesador = [
i for i in nlp_pipe.loaded_processors if tipo in str(i).lower()][0]
archivo_entrada = procesador.config['model_path']
# Cargar modelo y diccionarios del modelo
locacion = 'gpu' if gpu else 'cpu'
modelo = torch.load(archivo_entrada, map_location=locacion)
if 'lemma' in tipo:
dict_palabras, dict_compuesto = modelo['dicts']
# Añadir nuevas palabras
for key in nuevo_diccionario:
if isinstance(key, tuple):
dict_compuesto[key] = nuevo_diccionario[key]
else:
dict_palabras[key] = nuevo_diccionario[key]
# TODO: falta completar los otros casos, se mirará cuando se incluya POS y NER en la librería
else:
dict_vocab = modelo['vocab']
# Acá falta seguir el proceso para cada caso
# Establecer dónde se va a guardar el modelo
borrar_modelo = False
if archivo_salida == '':
borrar_modelo = True
archivo_salida = "{}.pt".format(os.getpid())
# Guardar modelo modificado
torch.save(modelo, archivo_salida)
# Cargar el modelo modificado
tipo = tipo.lower()
if tipo == 'lemma':
nlp_pipe = stanza_pipeline('es', modelo_lemas=archivo_salida)
elif tipo == 'pos':
nlp_pipe = stanza_pipeline('es', modelo_pos=archivo_salida)
elif tipo == 'ner':
nlp_pipe = stanza_pipeline('es', modelo_ner=archivo_salida)
# Si no se especificó una ubicación para el modelo resultante, este se
# borra
if borrar_modelo:
os.remove(archivo_salida)
# Devolver modelo modificado
return nlp_pipe
| 57.532258 | 121 | 0.695402 |
7eeb4d9cc91b04bdce54dd0fdf76feb816774470 | 7,147 | py | Python | common_files/slack_helpers.py | ycaoT/socless-slack | 887a486eb5ff29e168d4dba183193f4920a52247 | [
"Apache-2.0"
] | 8 | 2019-10-21T03:53:53.000Z | 2020-05-21T23:53:31.000Z | common_files/slack_helpers.py | ycaoT/socless-slack | 887a486eb5ff29e168d4dba183193f4920a52247 | [
"Apache-2.0"
] | 11 | 2019-10-23T22:02:56.000Z | 2021-08-04T19:14:10.000Z | common_files/slack_helpers.py | ycaoT/socless-slack | 887a486eb5ff29e168d4dba183193f4920a52247 | [
"Apache-2.0"
] | 12 | 2019-10-03T20:45:52.000Z | 2021-12-07T19:08:17.000Z | import os
import boto3
from slack_sdk import WebClient
from slack_sdk.signature import SignatureVerifier
from slack_sdk.web.slack_response import SlackResponse
CACHE_USERS_TABLE = os.environ.get("CACHE_USERS_TABLE")
SOCLESS_BOT_TOKEN = os.environ["SOCLESS_BOT_TOKEN"]
class SlackError(Exception):
pass
def is_event_from_slack(event: dict, signing_secret: str) -> bool:
signature_verifier = SignatureVerifier(signing_secret)
return signature_verifier.is_valid_request(event["body"], event["headers"])
def get_bot_friendly_name_from_endpoint_query_params(event: dict) -> str:
try:
bot_name = event["queryStringParameters"]["bot"]
return bot_name
except (KeyError, TypeError) as e:
raise SlackError(
f"No bot friendly name provided via query params. Example: <aws_url>/<stage>/slack?bot=tsirt_dev \n {e}"
)
class SlackHelper:
def __init__(self, token="") -> None:
if not token:
token = SOCLESS_BOT_TOKEN
self.client = WebClient(token)
def find_user(self, name: str, page_limit=1000, include_locale="false"):
"""Find a user's Slack profile based on their full or display name.
Args:
name: A user's Full Name or Display Name
"""
name_lower = name.lower()
paginate = True
next_cursor = ""
while paginate:
resp = self.client.users_list(
cursor=next_cursor, limit=page_limit, include_locale=include_locale
)
data = resp.data
next_cursor = resp.data["response_metadata"].get("next_cursor", "")
if not next_cursor:
paginate = False
for user in data["members"]:
user_names = list(
map(
str.lower,
[
user.get("name", ""),
user.get("real_name", ""),
user.get("profile", {}).get("real_name", ""),
],
)
)
if name_lower in user_names:
return {"found": True, "user": user}
return {"found": False}
def get_slack_id_from_username(self, username: str):
"""Fetch user's slack_id from their username.
Checks against the dynamoDB cache (if enabled), or paginates through slack API users.list
looking for the supplied username. If cache enabled, saves the found slack_id
Args:
username : (string) slack username (usually display_name)
Returns:
slack_id
"""
slack_id = get_id_from_cache(username) if CACHE_USERS_TABLE else ""
if not slack_id:
search = self.find_user(username)
if not search["found"]:
raise Exception(f"Unable to find user: {username}")
slack_id = search["user"]["id"]
if CACHE_USERS_TABLE:
save_user_to_cache(username=username, slack_id=slack_id)
return slack_id
def get_user_info_via_id(self, slack_id):
"""API Docs https://api.slack.com/methods/users.info"""
resp = self.client.users_info(user=slack_id)
return resp["user"]
def resolve_slack_target(self, target_name: str, target_type: str) -> str:
"""Fetches the ID of a Slack Channel or User.
Args:
target_name: (string) The name of the channel / name of user / slack id of channel/user
target_type: (string) The Channel type, either "user" or "channel" or "slack_id"
Returns:
(string) A Slack ID that can be used to message the channel directly
"""
if target_type == "slack_id":
slack_id = target_name
elif target_type == "user":
slack_id = self.get_slack_id_from_username(target_name)
elif target_type == "channel":
slack_id = target_name if target_name.startswith("#") else f"#{target_name}"
else:
raise Exception(
f"target_type is not 'channel|user|slack_id'. failed target_type: {target_type} for target: {target_name}"
)
return slack_id
def slack_post_msg_wrapper(self, target, target_type, **kwargs) -> SlackResponse:
target_id = self.resolve_slack_target(target, target_type)
resp = self.client.chat_postMessage(channel=target_id, **kwargs)
if not resp.data["ok"]:
raise SlackError(
f"Slack error during post_message to {target}: {resp.data['error']}"
)
print(f'returned channel: {resp["channel"]}')
return resp
def get_id_from_cache(username: str) -> str:
"""Check if username exists in cache, return their slack_id.
Args:
username: slack username
Returns:
slack_id
"""
dynamodb = boto3.resource("dynamodb")
if not CACHE_USERS_TABLE:
raise Exception(
"env var CACHE_USERS_TABLE is not set, please check socless-slack serverless.yml"
)
table_resource = dynamodb.Table(CACHE_USERS_TABLE)
key_obj = {"username": username}
response = table_resource.get_item(TableName=CACHE_USERS_TABLE, Key=key_obj)
return response["Item"]["slack_id"] if "Item" in response else False
def save_user_to_cache(username: str, slack_id: str):
"""Save a username -> slack_id mapping to the cache table
Args:
username: slack username
slack_id: user's slack id
"""
dynamodb = boto3.resource("dynamodb")
if not CACHE_USERS_TABLE:
raise Exception(
"env var CACHE_USERS_TABLE is not set, please check socless-slack serverless.yml"
)
table_resource = dynamodb.Table(CACHE_USERS_TABLE)
new_item = {"username": username, "slack_id": slack_id}
response = table_resource.put_item(TableName=CACHE_USERS_TABLE, Item=new_item)
print(response)
def paginated_api_call(api_method, response_objects_name, **kwargs):
"""Calls api method and cycles through all pages to get all objects.
Args:
api_method: api method to call
response_objects_name: name of collection in response json
kwargs: url params to pass to call, additionally to limit and cursor which will be added automatically
"""
ret = list()
cursor = None
call_limit = 1000
while cursor != "":
if cursor is not None:
r = api_method(limit=call_limit, cursor=cursor, **kwargs)
else:
r = api_method(limit=call_limit, **kwargs)
response_objects = r.get(response_objects_name)
if response_objects is not None:
for channel in r[response_objects_name]:
if isinstance(channel, str):
channel_name = channel
else:
channel_name = channel.get("name")
ret.append(channel_name)
metadata = r.get("response_metadata")
if metadata is not None:
cursor = metadata["next_cursor"]
else:
cursor = ""
return ret
| 36.09596 | 122 | 0.613404 |
59e76e2cd233cf99cdb241fb7d26926426a14dc4 | 7,236 | py | Python | src/cogent3/maths/stats/period.py | tla256/cogent3 | 58533d11852d0cafbc0cfc6ae26429a6c0b2cb75 | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/maths/stats/period.py | tla256/cogent3 | 58533d11852d0cafbc0cfc6ae26429a6c0b2cb75 | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/maths/stats/period.py | tla256/cogent3 | 58533d11852d0cafbc0cfc6ae26429a6c0b2cb75 | [
"BSD-3-Clause"
] | null | null | null | from random import choice, random, shuffle
import numpy
from cogent3.maths.stats.special import igam
try:
from math import factorial
except ImportError: # python version < 2.6
from cogent3.maths.stats.special import Gamma
factorial = lambda x: Gamma(x + 1)
__author__ = "Hua Ying, Julien Epps and Gavin Huttley"
__copyright__ = "Copyright 2007-2019, The Cogent Project"
__credits__ = ["Julien Epps", "Hua Ying", "Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2019.12.6a"
__maintainer__ = "Gavin Huttley"
__email__ = "Gavin.Huttley@anu.edu.au"
__status__ = "Production"
def chi_square(x, p, df=1):
"""returns the chisquare statistic and it's probability"""
N = len(x)
end = N
sim = numpy.logical_not(numpy.logical_xor(x[0 : end - p], x[p:end])) * 1
s = ((numpy.ones((N - p,), float) - sim) ** 2).sum()
D = s / (N - p)
p_val = 1 - igam(df / 2.0, D / 2)
return D, p_val
def g_statistic(X, p=None, idx=None):
"""
return g statistic and p value
arguments:
X - the periodicity profile (e.g. DFT magnitudes, autocorrelation etc)
X needs to contain only those period values being considered,
i.e. only periods in the range [llim, ulim]
"""
# X should be real
X = abs(numpy.array(X))
if p is None:
power = X.max(0)
idx = X.argmax(0)
else:
assert idx is not None
power = X[idx]
g_obs = power / X.sum()
M = numpy.floor(1 / g_obs)
pmax = len(X)
result = numpy.zeros((int(M + 1),), float)
pmax_fact = factorial(pmax)
for index in range(1, min(pmax, int(M)) + 1):
v = (-1) ** (index - 1) * pmax_fact / factorial(pmax - index) / factorial(index)
v *= (1 - index * g_obs) ** (pmax - 1)
result[index] = v
p_val = result.sum()
return g_obs, p_val
def _seq_to_symbols(seq, motifs, motif_length, result=None):
"""return symbolic represetation of the sequence
Parameters
----------
seq
a sequence
motifs
a list of sequence motifs
motif_length
length of first motif
"""
if result is None:
result = numpy.zeros(len(seq), numpy.uint8)
else:
result.fill(0)
if motif_length is None:
motif_length = len(motifs[0])
for i in range(len(seq) - motif_length + 1):
if seq[i : i + motif_length] in motifs:
result[i] = 1
return result
try:
from cogent3.maths._period import seq_to_symbols
except ImportError:
seq_to_symbols = _seq_to_symbols
class SeqToSymbols(object):
"""class for converting all occurrences of motifs in passed sequence
to 1/0 otherwise"""
def __init__(self, motifs, length=None, motif_length=None):
super(SeqToSymbols, self).__init__()
if type(motifs) == str:
motifs = [motifs]
for i in range(len(motifs)):
try:
motifs[i] = motifs[i].encode("utf8")
except AttributeError:
pass
self.motifs = motifs
self.length = length
self.motif_length = motif_length or len(motifs[0])
self.working = None
if length is not None:
self.setResultArray(length)
def setResultArray(self, length):
"""sets a result array for length"""
self.working = numpy.zeros(length, numpy.uint8)
self.length = length
def __call__(self, seq, result=None):
if result is None and self.working is None:
self.setResultArray(len(seq))
elif self.working is not None:
if len(seq) != self.working.shape[0]:
self.setResultArray(len(seq))
result = self.working
result.fill(0)
if type(seq) == str:
seq = seq.encode("utf8")
elif type(seq) != bytes:
seq = b"".join(seq)
return seq_to_symbols(seq, self.motifs, self.motif_length, result)
def circular_indices(vector, start, length, num):
"""docstring for circular_indices"""
if start > length:
start = start - length
if start + num < length:
return vector[start : start + num]
# get all till end, then from beginning
return vector[start:] + vector[: start + num - length]
def sampled_places(block_size, length):
"""returns randomly sampled positions with block_size to make a new vector
with length
"""
# Main condition is to identify when a draw would run off end, we want to
# draw from beginning
num_seg, remainder = divmod(length, block_size)
vector = list(range(length))
result = []
for seg_num in range(num_seg):
i = choice(vector)
result += circular_indices(vector, i, length, block_size)
if remainder:
result += circular_indices(vector, i + block_size, length, remainder)
assert len(result) == length, len(result)
return result
def blockwise_bootstrap(
signal, calc, block_size, num_reps, seq_to_symbols=None, num_stats=None
):
"""returns observed statistic and the probability from the bootstrap
test of observing more `power' by chance than that estimated from the
observed signal
Parameters
----------
signal
a series, can be a sequence object
calc
function to calculate the period power, e.g. ipdft, hybrid,
auto_corr or any other statistic.
block_size
size of contiguous values for resampling
num_reps
number of randomly generated permutations
seq_to_symbols
function to convert a sequence to 1/0. If not
provided, the raw data is used.
num_stats
the number of statistics being evaluated for each
interation. Default to 1.
"""
signal_length = len(signal)
if seq_to_symbols is not None:
dtype = "c"
else:
dtype = None # let numpy guess
signal = numpy.array(list(signal), dtype=dtype)
if seq_to_symbols is not None:
symbolic = seq_to_symbols(signal)
data = symbolic
else:
data = signal
obs_stat = calc(data)
if seq_to_symbols is not None:
if sum(symbolic) == 0:
p = [numpy.array([1.0, 1.0, 1.0]), 1.0][num_stats == 1]
return obs_stat, p
if num_stats is None:
try:
num_stats = calc.getNumStats()
except AttributeError:
num_stats = 1
if num_stats == 1:
count = 0
else:
count = numpy.zeros(num_stats)
for rep in range(num_reps):
# get sample positions
sampled_indices = sampled_places(block_size, signal_length)
new_signal = signal.take(sampled_indices)
if seq_to_symbols is not None:
symbolic = seq_to_symbols(new_signal)
data = symbolic
else:
data = new_signal
sim_stat = calc(data)
# count if > than observed
if num_stats > 1:
count[sim_stat >= obs_stat] += 1
elif sim_stat >= obs_stat:
count += 1
return obs_stat, count / num_reps
# def percrb4():
# """Return SNR and CRB for periodicity estimates from symbolic signals"""
# # TODO: complete the function according to Julien's percrb4.m
# pass
#
| 28.046512 | 88 | 0.61194 |
ca1d27b8cc6cdac0f667035199bf809c88b3e5b1 | 4,013 | py | Python | yoyo/tests/test_connections.py | Varriount/Yoyo | 4530c575af6e749875ca12aeaba574d1c8ac21e4 | [
"Apache-2.0"
] | null | null | null | yoyo/tests/test_connections.py | Varriount/Yoyo | 4530c575af6e749875ca12aeaba574d1c8ac21e4 | [
"Apache-2.0"
] | null | null | null | yoyo/tests/test_connections.py | Varriount/Yoyo | 4530c575af6e749875ca12aeaba574d1c8ac21e4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Oliver Cope
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from mock import patch, call, MagicMock
import pytest
from yoyo.connections import parse_uri, BadConnectionURI
class MockDatabaseError(Exception):
pass
class TestParseURI:
@pytest.mark.skipif(
sys.version_info < (2, 7, 4),
reason="Requires python>=2.7.4 " "(http://bugs.python.org/issue7904)",
)
def test_it_parses_all_fields(self):
parsed = parse_uri("protocol://scott:tiger@server:666/db?k=1")
assert tuple(parsed) == (
"protocol",
"scott",
"tiger",
"server",
666,
"db",
{"k": "1"},
)
def test_it_parses_escaped_username(self):
parsed = parse_uri("protocol://scott%40example.org:tiger@localhost/db")
assert parsed.username == "scott@example.org"
def test_it_requires_scheme(self):
with pytest.raises(BadConnectionURI):
parse_uri("//scott:tiger@localhost/db")
def test_it_roundtrips(self):
cases = [
"proto://scott%40example.org:tiger@localhost/db",
"proto://localhost/db?a=1+2",
"proto://localhost/db?a=a%3D1",
]
for case in cases:
parsed = parse_uri(case)
assert parsed.uri == case
def test_it_returns_relative_paths_for_sqlite(self):
assert parse_uri("sqlite:///foo/bar.db").database == "foo/bar.db"
def test_it_returns_absolute_paths_for_sqlite(self):
assert parse_uri("sqlite:////foo/bar.db").database == "/foo/bar.db"
def test_passwords_with_slashes_dont_break_netloc(self):
parsed = parse_uri("postgresql://user:a%2Fb@localhost:5432/db")
assert parsed.netloc == 'user:a%2Fb@localhost:5432'
assert parsed.port == 5432
assert parsed.password == 'a/b'
@patch(
"yoyo.backends.get_dbapi_module",
return_value=MagicMock(
DatabaseError=MockDatabaseError, paramstyle="qmark"
),
)
def test_connections(get_dbapi_module):
from yoyo import backends
u = parse_uri("odbc://scott:tiger@db.example.org:42/northwind?foo=bar")
cases = [
(
backends.ODBCBackend,
"pyodbc",
call(
"UID=scott;PWD=tiger;ServerName=db.example.org;"
"Port=42;Database=northwind;foo=bar"
),
),
(
backends.MySQLBackend,
"pymysql",
call(
user="scott",
passwd="tiger",
host="db.example.org",
port=42,
db="northwind",
foo="bar",
),
),
(
backends.SQLiteBackend,
"sqlite3",
call(
"file:northwind?cache=shared",
uri=True,
detect_types=get_dbapi_module("sqlite3").PARSE_DECLTYPES,
),
),
(
backends.PostgresqlBackend,
"psycopg2",
call(
user="scott",
password="tiger",
port=42,
host="db.example.org",
dbname="northwind",
foo="bar",
),
),
]
for cls, driver_module, connect_args in cases:
cls(u, "_yoyo_migration")
assert get_dbapi_module.call_args == call(driver_module)
assert get_dbapi_module().connect.call_args == connect_args
| 30.172932 | 79 | 0.57837 |
a9bccb9c56473a7c84d744f92e85edb4d9714d6b | 7,808 | py | Python | insights/util/__init__.py | akshay196/insights-core | 598865e6563119089c77152599300de38a77c72c | [
"Apache-2.0"
] | null | null | null | insights/util/__init__.py | akshay196/insights-core | 598865e6563119089c77152599300de38a77c72c | [
"Apache-2.0"
] | 10 | 2018-04-16T15:38:04.000Z | 2018-05-15T18:43:02.000Z | insights/util/__init__.py | akshay196/insights-core | 598865e6563119089c77152599300de38a77c72c | [
"Apache-2.0"
] | null | null | null | import collections
import inspect
import logging
import functools
import platform
import os
import warnings
TMP_DIR = os.path.join("/tmp", "insights-web")
logger = logging.getLogger(__name__)
TRUTH = {
"true": True,
"false": False,
"yes": True,
"no": False,
"1": True,
"0": False
}
def parse_bool(s, default=False):
"""
Return the boolean value of an English string or default if it can't be
determined.
"""
if s is None:
return default
return TRUTH.get(s.lower(), default)
def which(cmd, env=None):
env = env or os.environ
if cmd.startswith("/"):
if os.access(cmd, os.X_OK) and os.path.isfile(cmd):
return cmd
return None
paths = env.get("PATH").split(os.pathsep)
for path in paths:
c = os.path.join(path, cmd)
if os.access(c, os.X_OK) and os.path.isfile(c):
return c
return None
class KeyPassingDefaultDict(collections.defaultdict):
""" A default dict that passes the key to its factory function. """
def __init__(self, *args, **kwargs):
super(KeyPassingDefaultDict, self).__init__(*args, **kwargs)
def __missing__(self, key):
if self.default_factory:
self[key] = self.default_factory(key)
return self[key]
else:
return super(KeyPassingDefaultDict, self).__missing__(key)
def enum(*e):
enums = dict(zip(e, range(len(e))))
return type("Enum", (), enums)
def defaults(default=None):
"""
Catches any exception thrown by the wrapped function and returns `default`
instead.
Parameters
----------
default : object
The default value to return if the wrapped function throws an exception
"""
def _f(func):
@functools.wraps(func)
def __f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception:
return default
return __f
return _f
def keys_in(items, *args):
"""
Use this utility function to ensure multiple keys are in one or more
dicts. Returns `True` if all keys are present in at least one of the
given dicts, otherwise returns `False`.
:Parameters:
- `items`: Iterable of required keys
- Variable number of subsequent arguments, each one being a dict to check.
"""
found = dict((key, False) for key in items)
for d in args:
for item in items:
if not found[item] and item in d:
found[item] = True
return all(found.values())
def logging_level(logger, level):
def _f(func):
@functools.wraps(func)
def check_log_level(*args, **kwargs):
if logger.getEffectiveLevel() <= level:
return func(*args, **kwargs)
return check_log_level
return _f
def deprecated(func, solution):
"""
Mark a parser or combiner as deprecated, and give a message of how to fix
this. This will emit a warning in the logs when the function is used.
When combined with modifications to conftest, this causes deprecations to
become fatal errors when testing, so they get fixed.
Arguments:
func (function): the function or method being deprecated.
solution (str): a string describing the replacement class, method or
function that replaces the thing being deprecated. For example,
"use the `fnord()` function" or "use the `search()` method with
the parameter `name='(value)'`".
"""
def get_name_line(src):
for line in src:
if "@" not in line:
return line.strip()
path = inspect.getsourcefile(func)
src, line_no = inspect.getsourcelines(func)
name = get_name_line(src) or "Unknown"
the_msg = "<{c}> at {p}:{l} is deprecated: {s}".format(
c=name, p=path, l=line_no, s=solution
)
warnings.warn(the_msg, DeprecationWarning)
def make_iter(item):
if isinstance(item, list):
return item
else:
return [item]
def ensure_dir(path, dirname=False):
log = logging.getLogger(__name__)
try:
if dirname:
path = os.path.dirname(path)
log.debug("Ensure dir '%s'", path)
os.makedirs(path)
except Exception as e:
if log.level <= logging.DEBUG:
log.debug("Failed to ensure dir: %s", e)
return False
return True
def _create_log_record(msg, date, level, machine_id):
log_record = logging.LogRecord("upload_client", logging.getLevelName(level),
machine_id, None, msg.strip(), None, None)
log_record.asctime = date
return log_record
class objectview(object):
def __init__(self, dict_):
self.__dict__ = dict_
def parse_keypair_lines(content, delim='|', kv_sep='='):
"""
Parses a set of entities, where each entity is a set of key-value pairs
contained all on one line. Each entity is parsed into a dictionary and
added to the list returned from this function.
"""
r = []
if content:
for row in [line for line in content if line]:
item_dict = {}
for item in row.split(delim):
key, value = [i.strip("'\"").strip() for i in item.strip().split(kv_sep)]
item_dict[key] = value
r.append(item_dict)
return r
def rsplit(_str, seps):
"""
Splits _str by the first sep in seps that is found from the right side.
Returns a tuple without the separator.
"""
for idx, ch in enumerate(reversed(_str)):
if ch in seps:
return _str[0:-idx - 1], _str[-idx:]
def check_path(path):
found = os.path.exists(path)
logger.debug("Checking for path [%s], found = %s.", path, found)
return found
def get_addr():
from insights.settings import web as config
return "http://%s:%s" % (platform.node(), config["port"])
def get_path_for_system_id(category, system_id):
return os.path.join(TMP_DIR, category, system_id[:2], system_id)
def word_wrap(line, wrap_len=72):
if len(line) > wrap_len:
for i, c in enumerate(reversed(line[:wrap_len])):
if c == " ":
break_point = wrap_len - i
yield line[:break_point].strip()
for more in word_wrap(line[break_point:], wrap_len):
yield more
break
else:
yield line.strip()
def case_variants(*elements):
"""
For configs which take case-insensitive options, it is necessary to extend the list with
various common case variants (all combinations are not practical). In the future, this should
be removed, when parser filters are made case-insensitive.
Args:
*elements (str): list of elements which need case-sensitive expansion, you should use
default case such as `Ciphers`, `MACs`, `UsePAM`, `MaxAuthTries`
Returns:
list: list of all expanded elements
"""
expanded_list = []
for element in elements:
low = element.lower()
up = element.upper()
title = element.title()
# Inner case conversion, such as `MACs` or `UsePAM` to `Macs` and `UsePam`
converted = []
for i, letter in enumerate(element):
if i == 0:
converted.append(letter)
else:
if element[i - 1].isupper():
converted.append(letter.lower())
else:
converted.append(letter)
converted = "".join(converted)
for new_element in (element, converted, low, up, title):
if new_element not in expanded_list:
expanded_list.append(new_element)
return expanded_list
| 28.49635 | 97 | 0.604892 |
1eef578a496b10f4f5962426bb1cfdb80ec976b5 | 3,868 | py | Python | blink/identify.py | S-Anmol/MishMash | 6b260cf61c3a960d06a47dbf64b78332c68d3229 | [
"MIT"
] | null | null | null | blink/identify.py | S-Anmol/MishMash | 6b260cf61c3a960d06a47dbf64b78332c68d3229 | [
"MIT"
] | 18 | 2020-06-06T01:00:30.000Z | 2022-03-29T22:29:04.000Z | blink/identify.py | S-Anmol/MishMash | 6b260cf61c3a960d06a47dbf64b78332c68d3229 | [
"MIT"
] | null | null | null | import face_recognition
import cv2
import numpy as np
# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the
# other example, but it includes some basic performance tweaks to make things run a lot faster:
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)
# 2. Only detect faces in every other frame of video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Load a sample picture and learn how to recognize it.
vishal_image = face_recognition.load_image_file("vishal.jpg")
vishal_face_encoding = face_recognition.face_encodings(vishal_image)[0]
# Load a second sample picture and learn how to recognize it.
# Create arrays of known face encodings and their names
known_face_encodings = [
vishal_face_encoding,
]
known_face_names = [
"Vishal",
]
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
| 38.68 | 116 | 0.691055 |
3839e69ccbfad005398b3eb8a3c198cb1adfb097 | 4,579 | py | Python | ryu/lib/packet/packet_utils.py | umkcdcrg01/ryu_openflow | 37ed5b88f7d119344e07c95314a7450235c037a8 | [
"Apache-2.0"
] | 269 | 2015-03-08T11:32:45.000Z | 2022-03-30T11:18:16.000Z | ryu/lib/packet/packet_utils.py | umkcdcrg01/ryu_openflow | 37ed5b88f7d119344e07c95314a7450235c037a8 | [
"Apache-2.0"
] | 2 | 2018-12-23T13:52:26.000Z | 2021-10-31T13:01:43.000Z | ryu/lib/packet/packet_utils.py | umkcdcrg01/ryu_openflow | 37ed5b88f7d119344e07c95314a7450235c037a8 | [
"Apache-2.0"
] | 205 | 2015-01-13T04:52:25.000Z | 2022-03-30T13:37:33.000Z | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import socket
import struct
from ryu.lib import addrconv
def carry_around_add(a, b):
c = a + b
return (c & 0xffff) + (c >> 16)
def checksum(data):
if len(data) % 2:
data += '\x00'
data = str(data) # input can be bytearray.
s = sum(array.array('H', data))
s = (s & 0xffff) + (s >> 16)
s += (s >> 16)
return socket.ntohs(~s & 0xffff)
# avoid circular import
_IPV4_PSEUDO_HEADER_PACK_STR = '!4s4sxBH'
_IPV6_PSEUDO_HEADER_PACK_STR = '!16s16sI3xB'
def checksum_ip(ipvx, length, payload):
"""
calculate checksum of IP pseudo header
IPv4 pseudo header
UDP RFC768
TCP RFC793 3.1
0 7 8 15 16 23 24 31
+--------+--------+--------+--------+
| source address |
+--------+--------+--------+--------+
| destination address |
+--------+--------+--------+--------+
| zero |protocol| length |
+--------+--------+--------+--------+
IPv6 pseudo header
RFC2460 8.1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ +
| |
+ Source Address +
| |
+ +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ +
| |
+ Destination Address +
| |
+ +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Upper-Layer Packet Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| zero | Next Header |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
if ipvx.version == 4:
header = struct.pack(_IPV4_PSEUDO_HEADER_PACK_STR,
addrconv.ipv4.text_to_bin(ipvx.src),
addrconv.ipv4.text_to_bin(ipvx.dst),
ipvx.proto, length)
elif ipvx.version == 6:
header = struct.pack(_IPV6_PSEUDO_HEADER_PACK_STR,
addrconv.ipv6.text_to_bin(ipvx.src),
addrconv.ipv6.text_to_bin(ipvx.dst),
length, ipvx.nxt)
else:
raise ValueError('Unknown IP version %d' % ipvx.version)
buf = header + payload
return checksum(buf)
_MODX = 4102
def fletcher_checksum(data, offset):
"""
Fletcher Checksum -- Refer to RFC1008
calling with offset == _FLETCHER_CHECKSUM_VALIDATE will validate the
checksum without modifying the buffer; a valid checksum returns 0.
"""
c0 = 0
c1 = 0
pos = 0
length = len(data)
data = bytearray(data)
data[offset:offset + 2] = [0] * 2
while pos < length:
tlen = min(length - pos, _MODX)
for d in data[pos:pos + tlen]:
c0 += d
c1 += c0
c0 %= 255
c1 %= 255
pos += tlen
x = ((length - offset - 1) * c0 - c1) % 255
if x <= 0:
x += 255
y = 510 - c0 - x
if y > 255:
y -= 255
data[offset] = x
data[offset + 1] = y
return (x << 8) | (y & 0xff)
| 33.423358 | 72 | 0.389168 |
5eadc7c231376ea282676802b0603e744ee7faab | 4,994 | py | Python | src/neocities_sync/cmdline.py | kugland/neocities-sync | 35b75871b8a0c1fce793ea82a2641a24d1a959e0 | [
"MIT"
] | 2 | 2021-11-30T17:39:56.000Z | 2021-12-05T02:00:36.000Z | src/neocities_sync/cmdline.py | kugland/neocities-sync | 35b75871b8a0c1fce793ea82a2641a24d1a959e0 | [
"MIT"
] | null | null | null | src/neocities_sync/cmdline.py | kugland/neocities-sync | 35b75871b8a0c1fce793ea82a2641a24d1a959e0 | [
"MIT"
] | null | null | null | """Parse command line arguments."""
from argparse import ArgumentParser
from dataclasses import dataclass
from typing import List
from .config import config_file_path, config_file_path_unexpanded
@dataclass
class CmdlineOptions:
"""Command line options."""
sites: List[str] = None
quietness: int = 0
dry_run: bool = False
config_file: str = None
help: bool = False
def parse(args: List[str]) -> CmdlineOptions:
"""
Parse command line arguments.
Parameters
----------
args : List[str]
List of command line arguments.
Returns
-------
CmdlineOptions
Command line options.
>>> parse(['-C', 'myconf', '-qqq', '--dry-run', '--site=foo.com', '--site=bar.com'])
CmdlineOptions(sites=['foo.com', 'bar.com'], quietness=3, dry_run=True, config_file='myconf', help=False)
>>> parse(['-C', 'myconf', '-v', '--site=foo.com', '--site=bar.com'])
CmdlineOptions(sites=['foo.com', 'bar.com'], quietness=-1, dry_run=False, config_file='myconf', help=False)
>>> parse(['-C', 'myconf', '-vvqds', 'site'])
CmdlineOptions(sites=['site'], quietness=-1, dry_run=True, config_file='myconf', help=False)
"""
argparser = ArgumentParser(
description="Sync local directories with a neocities.org remote.",
add_help=False,
)
argparser.add_argument(
"-s",
"--site",
help="which site to sync (can be used multiple times)",
action="append",
default=[],
)
argparser.add_argument('-C', '--config-file', default=config_file_path)
argparser.add_argument("-d", "--dry-run", action="store_true")
argparser.add_argument("-v", "--verbose", action="count", default=0)
argparser.add_argument("-q", "--quiet", action="count", default=0)
argparser.add_argument('-h', '--help', action='store_true', default=False)
parsed = argparser.parse_args(args)
if parsed.help:
print(help_message())
exit(0)
return CmdlineOptions(
sites=parsed.site,
quietness=parsed.quiet - parsed.verbose,
dry_run=parsed.dry_run,
config_file=parsed.config_file,
help=parsed.help,
)
def help_message() -> str:
"""
Return help message.
Returns
-------
str
Help message.
"""
msg = f"""neocities-sync
Sync local directories with neocities.org sites.
Usage:
neocities-sync options] [--dry-run] [-c CONFIG] [-s SITE1] [-s SITE2] ...
Options:
-C CONFIG_FILE Path to the config file to use.
(defaults to "{config_file_path_unexpanded}".)
-s SITE Which site to sync (as specified in the config file).
The default is to sync all sites in the config file.
--dry-run Do not actually upload anything.
-v Verbose output.
-q Quiet output.
-h, --help Show this help message and exit.
Config file:
The config file is an ini file, located at "{config_file_path_unexpanded}".
Each section of the config file describes a different site (the name of the
section doesn't need to be the same as the site's domain, since the api_key
suffices to identify the site).
The keys of the config file are:
api_key (str) [required]
The api key of the site.
root_dir (path) [required]
The local directory to sync.
sync_disallowed (yes/no) [default: no]
Whether to sync files that are only allowed for paying users.
sync_hidden (yes/no) [default: no]
Whether to sync hidden files.
sync_vcs (yes/no) [default: no]
Whether to sync version control files.
allowed_extensions (list of str) [default: not set]
Which file extensions to sync. If not set, all files are synced.
remove_empty_dirs (yes/no) [default: yes]
Whether to remove empty directories after sync.
Example config:
[site1]
api_key = 6b9b522e7d8d93e88c464aafc421a61b
root_dir = ~/path/to/site1
allowed_extensions = .html .css .js
remove_empty_dirs = no
[site2]
api_key = 78559e6ebc35fe33eec21de05666a243
root_dir = /var/www/path/to/site2
allowed_extensions = .html .css .js .woff2
.neocitiesignore
In any subdirectory of the root directory, a file named ".neocitiesignore"
can be used to specify which files to ignore. The syntax is the same as
the one for ".gitignore".
Credits:
This software was developed by Andre Kugland <kugland@gmail.com>."""
return msg
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30.45122 | 111 | 0.588706 |
88026160ff81ab0d77d58fba18f73fa09f515df5 | 460 | py | Python | data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_warren_large_fog_red.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_warren_large_fog_red.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_warren_large_fog_red.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_warren_large_fog_red.iff"
result.attribute_template_id = -1
result.stfName("lair_n","warren")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 27.058824 | 91 | 0.732609 |
649ec0ac2cdb92cbf61a4d2da6320d334cbc683f | 2,097 | py | Python | color_recognition/scripts/color_recognition_server.py | Nishida-Lab/motoman_interaction | bbe5119c08ef4e84d5559e243dcc19e19de55c77 | [
"BSD-3-Clause"
] | 1 | 2018-08-20T07:16:23.000Z | 2018-08-20T07:16:23.000Z | color_recognition/scripts/color_recognition_server.py | Nishida-Lab/motoman_interaction | bbe5119c08ef4e84d5559e243dcc19e19de55c77 | [
"BSD-3-Clause"
] | 4 | 2018-01-16T14:40:11.000Z | 2018-02-02T03:58:08.000Z | color_recognition/scripts/color_recognition_server.py | Nishida-Lab/motoman_interaction | bbe5119c08ef4e84d5559e243dcc19e19de55c77 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import rospy
from motoman_interaction_msgs.msg import StringArrayWithStatus
from motoman_interaction_msgs.msg import ImageArray
from motoman_interaction_msgs.srv import *
from cv_bridge import CvBridge, CvBridgeError
import cv2
from recognition_class_ import *
class ColorRecognitionServer:
def __init__(self):
rospy.init_node('color_recongnition_server')
s = rospy.Service('color_recongnition', ImageRecognitionWithStatus, self.callback)
self.bridge = CvBridge()
self.recognition = ColorRecognition()
self.image_array = []
print "waiting for client to connect"
def get_color_string_array(self,ros_image_array):
color_string_array = []
status_array = []
# image_cnt = 0
for ros_image in ros_image_array:
try:
cv_image = self.bridge.imgmsg_to_cv2(ros_image, "8UC3")
recognition_result, recognition_status = self.recognition(cv_image)
color_string_array.append(recognition_result)
status_array.append(recognition_status)
# cv2.imwrite(str(image_cnt)+".jpg", cv_image)
# image_cnt += 1
except CvBridgeError as e:
print(e)
color_string_array.append("error")
status_array.append(0)
return color_string_array, status_array
def callback(self,data):
color_msg = StringArrayWithStatus()
self.image_array = copy.deepcopy(data.images.images)
color_msg.header.stamp = data.images.header.stamp
color_msg.header.frame_id = data.images.header.frame_id
color_msg.strings, color_msg.status = self.get_color_string_array(self.image_array)
rospy.loginfo(color_msg.header.frame_id)
rospy.loginfo(color_msg.strings)
return ImageRecognitionWithStatusResponse(color_msg)
def main():
ColorRecognitionServer()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
main()
| 27.592105 | 91 | 0.671435 |
948807f69e7efeb733ce61c512217dfe6e7038b7 | 5,186 | py | Python | var/spack/repos/builtin/packages/pocl/package.py | kehw/spack | 4f49b1a9301447a8cf880c99820cad65e5c2d7e3 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2020-09-10T22:50:08.000Z | 2021-01-12T22:18:54.000Z | var/spack/repos/builtin/packages/pocl/package.py | kehw/spack | 4f49b1a9301447a8cf880c99820cad65e5c2d7e3 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2021-01-08T22:23:53.000Z | 2022-03-30T11:08:17.000Z | var/spack/repos/builtin/packages/pocl/package.py | kehw/spack | 4f49b1a9301447a8cf880c99820cad65e5c2d7e3 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
from spack.package_test import compile_c_and_execute, compare_output_file
class Pocl(CMakePackage):
"""Portable Computing Language (pocl) is an open source implementation
of the OpenCL standard which can be easily adapted for new targets
and devices, both for homogeneous CPU and heterogeneous
GPUs/accelerators."""
homepage = "http://portablecl.org"
url = "https://github.com/pocl/pocl/archive/v1.1.tar.gz"
git = "https://github.com/pocl/pocl.git"
version("master", branch="master")
version('1.5', sha256='4fcf4618171727d165fc044d465a66e3119217bb4577a97374f94fcd8aed330e')
version('1.4', sha256='ec237faa83bb1c803fbdf7c6e83d8a2ad68b6f0ed1879c3aa16c0e1dcc478742')
version('1.3', sha256='6527e3f47fab7c21e96bc757c4ae3303901f35e23f64642d6da5cc4c4fcc915a')
version('1.2', sha256='0c43e68f336892f3a64cba19beb99d9212f529bedb77f7879c0331450b982d46')
version('1.1', sha256='1e8dd0693a88c84937754df947b202871a40545b1b0a97ebefa370b0281c3c53')
version('1.0', sha256='94bd86a2f9847c03e6c3bf8dca12af3734f8b272ffeacbc3fa8fcca58844b1d4')
version('0.14', sha256='2127bf925a91fbbe3daf2f1bac0da5c8aceb16e2a9434977a3057eade974106a')
version('0.13', sha256='a17f37d8f26819c0c8efc6de2b57f67a0c8a81514fc9cd5005434e49d67499f9')
version('0.12', sha256='5160d7a59721e6a7d0fc85868381c0afceaa7c07b9956c9be1e3b51e80c29f76')
version('0.11', sha256='24bb801fb87d104b66faaa95d1890776fdeabb37ad1b12fb977281737c7f29bb')
version('0.10', sha256='e9c38f774a77e61f66d850b705a5ba42d49356c40e75733db4c4811e091e5088')
# This is Github's pocl/pocl#373
patch("uint.patch", when="@:0.13")
patch("vecmathlib.patch", when="@:0.13")
# Note: We should describe correctly which pocl versions provide
# which version of the OpenCL standard
# OpenCL standard versions are: 1.0, 1.1, 1.2, 2.0, 2.1, 2.2
provides('opencl@:2.0')
depends_on("cmake @2.8.12:", type="build")
depends_on("hwloc")
depends_on("hwloc@:1.99.99", when="@:1.1.99")
depends_on("libtool", type=("build", "link", "run"))
depends_on("pkgconfig", type="build")
# We don't request LLVM's shared libraries because these are not
# enabled by default, and also because they fail to build for us
# (see #1616)
# These are the supported LLVM versions
depends_on("llvm +clang @6.0:10.0", when="@master")
depends_on("llvm +clang @6.0:10.0", when="@1.5")
depends_on("llvm +clang @6.0:9.0", when="@1.4")
depends_on("llvm +clang @5.0:8.0", when="@1.3")
depends_on("llvm +clang @5.0:7.0", when="@1.2")
depends_on("llvm +clang @5.0:6.0", when="@1.1")
depends_on("llvm +clang @4.0:5.0", when="@1.0")
depends_on("llvm +clang @3.7:4.0", when="@0.14")
depends_on("llvm +clang @3.7:3.8", when="@0.13")
depends_on("llvm +clang @3.2:3.7", when="@0.12")
depends_on("llvm +clang @3.2:3.6", when="@0.11")
depends_on("llvm +clang @3.2:3.5", when="@0.10")
variant("distro", default=False,
description=("Support several CPU architectures, "
"suitable e.g. in a build "
"that will be made available for download"))
variant("icd", default=False,
description="Support a system-wide ICD loader")
depends_on('ocl-icd', when='+icd')
def url_for_version(self, version):
if version >= Version('1.0'):
url = "https://github.com/pocl/pocl/archive/v{0}.tar.gz"
else:
url = "http://portablecl.org/downloads/pocl-{0}.tar.gz"
return url.format(version.up_to(2))
def cmake_args(self):
spec = self.spec
args = ["-DINSTALL_OPENCL_HEADERS=ON"]
if "~shared" in spec["llvm"]:
args += ["-DSTATIC_LLVM"]
if "+distro" in spec:
args += ["-DKERNELLIB_HOST_CPU_VARIANTS=distro"]
args += ["-DENABLE_ICD=%s" % ("ON" if "+icd" in spec else "OFF")]
return args
@run_after('install')
def symlink_opencl(self):
os.symlink("CL", self.prefix.include.OpenCL)
@run_after('install')
@on_package_attributes(run_tests=True)
def check_install(self):
# Build and run a small program to test the installed OpenCL library
spec = self.spec
print("Checking pocl installation...")
checkdir = "spack-check"
with working_dir(checkdir, create=True):
source = join_path(os.path.dirname(self.module.__file__),
"example1.c")
cflags = spec["pocl"].headers.cpp_flags.split()
# ldflags = spec["pocl"].libs.ld_flags.split()
ldflags = ["-L%s" % spec["pocl"].prefix.lib,
"-lOpenCL", "-lpoclu"]
output = compile_c_and_execute(source, cflags, ldflags)
compare_output_file(
output,
join_path(os.path.dirname(self.module.__file__),
"example1.out"))
| 44.324786 | 94 | 0.653876 |
a8eadf9e95926072e2a30c69aa323df0f81d3ae3 | 5,620 | py | Python | tests/test_randomized_propensity.py | zoltuz/bioscrape | bf0f1d4b6f68f265fc208733b75ec16c36a28688 | [
"MIT"
] | 16 | 2017-11-16T02:22:25.000Z | 2020-06-15T20:36:44.000Z | tests/test_randomized_propensity.py | zoltuz/bioscrape | bf0f1d4b6f68f265fc208733b75ec16c36a28688 | [
"MIT"
] | 39 | 2018-10-11T18:01:53.000Z | 2020-06-08T19:13:16.000Z | tests/test_randomized_propensity.py | zoltuz/bioscrape | bf0f1d4b6f68f265fc208733b75ec16c36a28688 | [
"MIT"
] | 15 | 2018-04-18T03:09:07.000Z | 2020-06-26T18:02:23.000Z | import warnings, os
# We don't want warnings in dependencies to show up in bioscrape's tests.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import numpy as np
import pylab as plt
import random
import pytest
import test_utils
from bioscrape.simulator import *
from bioscrape.types import *
# Seed RNG value. All tests use this value.
seed = 54173
# Set true to get more diagnostic prints
debug = False
# Parameter ranges to randomly choose parameters (on a log scale)
param_min = -4
param_max = 4
# parameter names required for each propensity (general will be treated by
# itself)
propensity_param_requirements = {
'massaction':['k'],
'hillpositive':['k', 'K', 'n'],
'hillnegative':['k', 'K', 'n'],
'proportionalhillpositive':["k", "K", "n"],
'proportionalhillnegative':["k", "K", "n"]
}
# species (passed in as parameters) requires for each propensity (general
# will be treated by itself)
propensity_species_requirements = {
'hillpositive':['s1'],
'hillnegative':['s1'],
'proportionalhillpositive':['s1', 'd'],
'proportionalhillnegative':['s1', 'd'],
"massaction":[]
}
all_prop_types = ['hillpositive',
'proportionalhillpositive',
'hillnegative',
'proportionalhillnegative',
'massaction', 'general']
TEST_NAME = "random_propensities"
def random_prop_model(prop_type):
'''
Returns a randomish model with a specified propensity type. Set to always
return the same model, for any particular propensity type.
WARNING: To produce consistent Models, this function resets the random seeds
used during Model construction. This may have unexpected effects on random
number generation outside this function as a side-effect.
'''
test_utils.set_seed(seed)
#Will always consider the reaction: A+B-->C
inputs = ["A", "B"]
outputs = ["C"]
all_species = inputs + outputs
x0 = {"A":25, "B": 25, "C":0}
if debug:
print('simulating propensity type ', prop_type)
param_dict = {}
# Here we will use a random(ish) rational function
if prop_type == 'general':
rate_str = "(1+"
numerator_terms = np.random.randint(0, 5)
denominator_terms = np.random.randint(0, 5)
for i in range(numerator_terms):
coef = str(round(np.exp(np.random.uniform(low = param_min,
high = param_max)), 3))
exp = str(round(np.random.uniform(low = 0,high = param_max), 3))
species = all_species[np.random.randint(len(all_species))]
rate_str += coef + "*" + species + "^" + exp + "+"
rate_str = rate_str[:-1] + ")"
rate_str += "/(1+"
for i in range(denominator_terms):
coef =str(round(np.exp(np.random.uniform(low = param_min,
high = param_max)), 3))
exp = str(round(np.random.uniform(low = 0,high = param_max), 3))
species = all_species[np.random.randint(len(all_species))]
rate_str += coef + "*" + species + "^" + exp + "+"
rate_str = rate_str[:-1] + ")"
param_dict['rate'] = rate_str
else:
required_params = propensity_param_requirements[prop_type]
required_species = propensity_species_requirements[prop_type]
param_dict = {}
for p in required_params:
param_dict[p] = \
round(np.exp(np.random.uniform(low = param_min,
high = param_max)), 3)
for i in range(len(required_species)):
k = required_species[i]
param_dict[k] = inputs[i]
if debug:
print('\t params =', param_dict)
rxn = (inputs, outputs, prop_type, param_dict)
M = Model(reactions = [rxn], initial_condition_dict = x0)
M.set_species(x0)
return M
# def test_debug():
# import bioscrape.sbmlutil
# bioscrape.sbmlutil.import_sbml("frozen_sbml_outputs/random_propensities/hillnegative.sbml.tmp")
@pytest.mark.parametrize('prop_type', all_prop_types)
def test_random_propensity_outputs(prop_type):
test_results = dict()
model = random_prop_model(prop_type)
timepoints = np.arange(0, 50, .01)
results_d = py_simulate_model(timepoints, Model = model, stochastic = False, return_dataframe = False).py_get_result()
results_s = py_simulate_model(timepoints, Model = model, stochastic = True, return_dataframe = False).py_get_result()
test_results[prop_type + "_deterministic"] = results_d
test_results[prop_type + "_stochastic"] = results_s
test_utils.check_sim_results(TEST_NAME, test_results)
@pytest.mark.parametrize('prop_type', all_prop_types)
def test_random_propensity_sbml(prop_type):
model_dict = dict()
model_dict[prop_type] = random_prop_model(prop_type)
test_utils.check_sbml_IO(TEST_NAME, model_dict)
def debug_random_prop_tests():
'''
This is not a test.
Plot frozen results for debugging purposes.
'''
propensity_types = ['hillpositive', 'proportionalhillpositive',
'hillnegative', 'proportionalhillnegative',
'massaction', 'general']
colors = {
'massaction':'blue',
'hillpositive': 'cyan',
'hillnegative': 'red',
'proportionalhillpositive': 'orange',
'proportionalhillnegative': 'purple',
'general': 'black'
}
test_loc = os.path.join(test_utils.frozen_results_loc, TEST_NAME)
plt.figure()
for prop_type in propensity_types:
results_d = np.load(os.path.join(test_loc,
prop_type + "_deterministic.npy"))
plt.plot(results_d[:,0], results_d[:,3],
label = "deterministic "+str(prop_type),
# +"params = "+str(param_dict),
color = colors[prop_type])
results_s = np.load(os.path.join(test_loc,
prop_type + "_stochastic.npy"))
plt.plot(results_s[:,0], results_s[:,3], ":",
label = "stochastic "+str(prop_type),
# +"params = "+str(param_dict),
color = colors[prop_type])
# plt.legend()
plt.xlabel("time")
plt.ylabel("C")
plt.legend()
plt.show() | 31.049724 | 119 | 0.698577 |
a03b1098e2b08062c34ae83892b66e60509e74fb | 34 | py | Python | sdk/template/azure-template/azure/template/_version.py | 16pierre/azure-sdk-for-python | 1505d348c6660c1d5a39630522a059a2e3e38839 | [
"MIT"
] | 1 | 2020-05-12T23:29:15.000Z | 2020-05-12T23:29:15.000Z | sdk/template/azure-template/azure/template/_version.py | 16pierre/azure-sdk-for-python | 1505d348c6660c1d5a39630522a059a2e3e38839 | [
"MIT"
] | null | null | null | sdk/template/azure-template/azure/template/_version.py | 16pierre/azure-sdk-for-python | 1505d348c6660c1d5a39630522a059a2e3e38839 | [
"MIT"
] | null | null | null | # matches SEMVER
VERSION = "0.0.3" | 17 | 17 | 0.676471 |
6e27cf1e12d65217cd89d7b4e2f1ee181ecfda68 | 1,896 | py | Python | 2021/day14.py | astonshane/AdventOfCode | 25c7380e73eede3f79287de6a9dedc8314ab7965 | [
"MIT"
] | null | null | null | 2021/day14.py | astonshane/AdventOfCode | 25c7380e73eede3f79287de6a9dedc8314ab7965 | [
"MIT"
] | null | null | null | 2021/day14.py | astonshane/AdventOfCode | 25c7380e73eede3f79287de6a9dedc8314ab7965 | [
"MIT"
] | null | null | null | from pprint import pprint
import math
def factorize(base):
pairs = {}
for i in range(0, len(base)-1):
pair = base[i:i+2]
pairs[pair] = pairs.get(pair, 0) + 1
return pairs
def iterate(base, rules, steps):
pairs = factorize(base)
for i in range(0, steps):
new_pairs = {}
for x in pairs:
if x in rules:
a = x[0]
b = x[1]
c = rules[x]
p1 = a+c
p2 = c+b
new_pairs[p1] = new_pairs.get(p1, 0) + pairs[x]
new_pairs[p2] = new_pairs.get(p2, 0) + pairs[x]
else:
new_pairs[x] = new_pairs.get(x, 0) + pairs[x]
pairs = new_pairs
return pairs
# What do you get if you take the quantity of the most common element and subtract the quantity of the least common element?
def score(pairs):
counts = {}
def inc(i, val):
counts[i] = counts.get(i, 0) + val
for x in pairs:
inc(x[0], pairs[x])
inc(x[1], pairs[x])
for x in counts:
counts[x] = math.ceil(counts[x] / 2)
#pprint(counts)
most_common = max(counts.values())
least_common = min(counts.values())
return most_common - least_common
with open("inputs/day14.txt") as f:
base = f.readline().strip()
f.readline()
rules = {}
for line in f:
line = line.strip().split(" -> ")
rules[line[0]] = line[1]
#assert(iterate("NNCB", rules, 1) == factorize("NCNBCHB"))
#assert(iterate("NNCB", rules, 2) == factorize("NBCCNBBBCBHCB"))
#assert(iterate("NNCB", rules, 3) == factorize("NBBBCNCCNBBNBNBBCHBHHBCHB"))
#assert(iterate("NNCB", rules, 4) == factorize("NBBNBNBBCCNBCNCCNBBNBBNBBBNBBNBBCBHCBHHNHCBBCBHCB"))
print("part1", score(iterate(base, rules, 10)))
print("part2", score(iterate(base, rules, 40)))
| 26.333333 | 124 | 0.553797 |
b90a5b1ffd56522eaaa1c63b6798d07be85e5f3f | 2,789 | py | Python | jmapi/routes/data.py | genwch/jmapi | 7a473801dc2f7c487f6cf59046a2dbeaa93e4c2c | [
"MIT"
] | null | null | null | jmapi/routes/data.py | genwch/jmapi | 7a473801dc2f7c487f6cf59046a2dbeaa93e4c2c | [
"MIT"
] | null | null | null | jmapi/routes/data.py | genwch/jmapi | 7a473801dc2f7c487f6cf59046a2dbeaa93e4c2c | [
"MIT"
] | null | null | null | from abc import ABC
from fastapi_pagination import Page, paginate, add_pagination
from typing import Optional
from fastapi import APIRouter, Depends
from .auth import oauth2_scheme, token2user
import models as mod
from jmapi import lib as lib
class data_rt(ABC):
def __init__(self, mod_name, debug=False):
self.__mod_name = mod_name
self.__data_mod = mod.__model__.get(mod_name)
self.debug = debug
self.data_mod = self.__data_mod(debug=self.debug)
self.model = self.data_mod._model
self.router = self.set_route()
def get(self, token: str = Depends(oauth2_scheme), code: Optional[str] = None):
owner = token2user(token)
self.data_mod.set_owner(owner)
filt = {}
if code != None:
keycols = self.data_mod.cols(attr="key")
filt = {c: code for c in keycols}
df = self.data_mod.select(filt)
if df.empty:
return paginate([])
rtn = self.data_mod.to_dict(df)
return paginate(rtn)
def post(self, token: str = Depends(oauth2_scheme), code: Optional[str] = None, data: dict = {}):
owner = token2user(token)
self.data_mod.set_owner(owner)
updcols = self.data_mod.cols(attr="updcol")
keycols = self.data_mod.cols(attr="key")
if code != None:
data.update({c: code for c in keycols})
upd = {k: v for k, v in data.items() if k in updcols}
key = {k: v for k, v in data.items() if k in keycols and v != None}
upd.update(key)
rtn, dt = self.data_mod.upsert(upd)
if not(rtn):
raise lib.http_exception(
status_code=422, loc=[], msg="Invalid key value")
self.data_mod.save()
return paginate(dt)
def set_route(self):
tags = [self.__mod_name]
# dependencies = [Depends(oauth2_scheme)]
dependencies = None
path = "/{}".format(self.__mod_name)
pathwithpara = "%s/{code}" % (path)
apiroute = APIRouter(tags=tags, dependencies=dependencies)
apiroute.add_api_route(
path=path, methods=["get"], name=f"Get {self.__mod_name}",
endpoint=self.get, response_model=Page[self.model])
apiroute.add_api_route(
path=pathwithpara, methods=["get"], name=f"Get {self.__mod_name}",
endpoint=self.get, response_model=Page[self.model])
apiroute.add_api_route(
path=path, methods=["post"], name=f"Post {self.__mod_name}",
endpoint=self.post, response_model=Page[self.model])
apiroute.add_api_route(
path=pathwithpara, methods=["post"], name=f"Post {self.__mod_name}",
endpoint=self.post, response_model=Page[self.model])
return {"route": apiroute}
| 40.42029 | 101 | 0.617784 |
b838bed42a05bbe4d6e29cc3a6eeb85890dfcc73 | 673 | py | Python | bin/rst2s5.py | incognitoRepo/hdlogger | c738161ef3144469ba0f47caf89770613031e96e | [
"BSD-2-Clause"
] | null | null | null | bin/rst2s5.py | incognitoRepo/hdlogger | c738161ef3144469ba0f47caf89770613031e96e | [
"BSD-2-Clause"
] | null | null | null | bin/rst2s5.py | incognitoRepo/hdlogger | c738161ef3144469ba0f47caf89770613031e96e | [
"BSD-2-Clause"
] | null | null | null | #!/Users/alberthan/VSCodeProjects/hdlogger/bin/python3
# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Chris Liechti <cliechti@gmx.net>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
| 26.92 | 74 | 0.7474 |
3291b3f7d3f248276f426e76c152f45a7aa6e8a8 | 14,039 | py | Python | neural_compressor/ux/utils/workload/workload.py | mdfaijul/neural-compressor | e1d59da3790f9ff9647a21d2ff23da98c78a9682 | [
"Apache-2.0"
] | 100 | 2020-12-01T02:40:12.000Z | 2021-09-09T08:14:22.000Z | neural_compressor/ux/utils/workload/workload.py | mdfaijul/neural-compressor | e1d59da3790f9ff9647a21d2ff23da98c78a9682 | [
"Apache-2.0"
] | 25 | 2021-01-05T00:16:17.000Z | 2021-09-10T03:24:01.000Z | neural_compressor/ux/utils/workload/workload.py | mdfaijul/neural-compressor | e1d59da3790f9ff9647a21d2ff23da98c78a9682 | [
"Apache-2.0"
] | 25 | 2020-12-01T19:07:08.000Z | 2021-08-30T14:20:07.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Workload module."""
import json
import os
from copy import deepcopy
from datetime import datetime, timezone
from enum import Enum
from pathlib import Path
from typing import Any, Dict, Optional
from neural_compressor.ux.components.model.repository import ModelRepository
from neural_compressor.ux.components.optimization import Optimizations
from neural_compressor.ux.utils.consts import Precisions
from neural_compressor.ux.utils.exceptions import ClientErrorException, InternalException
from neural_compressor.ux.utils.json_serializer import JsonSerializer
from neural_compressor.ux.utils.logger import log
from neural_compressor.ux.utils.utils import (
get_file_extension,
get_framework_from_path,
get_predefined_config_path,
)
from neural_compressor.ux.utils.workload.config import Config
class Workload(JsonSerializer):
"""Workload class."""
def __init__(self, data: Dict[str, Any]):
"""Initialize Workload class."""
super().__init__()
self.config: Config = Config()
self.id: str = str(data.get("id", ""))
if not self.id:
raise ClientErrorException("Workload ID not specified.")
self.project_name: str = str(data.get("project_name", ""))
if not self.project_name:
raise ClientErrorException("project_name not specified.")
now = datetime.now(timezone.utc)
self.created_at: str = str(data.get("created_at", now.isoformat()))
self.model_path: str = data.get("model_path", "")
if not self.model_path:
raise ClientErrorException("Model path is not defined!")
self.model_name = Path(self.model_path).stem
self.domain: str = data.get("domain", None)
if not self.domain:
raise ClientErrorException("Domain is not defined!")
self.domain_flavour: str = data.get("domain_flavour", "")
self.framework: str = data.get(
"framework",
get_framework_from_path(self.model_path),
)
self.workspace_path = os.path.join(os.environ.get("HOME", ""), "workdir")
self.workload_path = data.get(
"workload_path",
os.path.join(
self.workspace_path,
"workloads",
f"{self.model_name}_{self.id}",
),
)
self.set_workspace()
self.eval_dataset_path: str = data.get("eval_dataset_path", "")
self.calib_dataset_path: str = data.get("eval_dataset_path", "")
self.set_dataset_paths(data)
for dataset_path in [self.eval_dataset_path, self.calib_dataset_path]:
if dataset_path != "no_dataset_location" and not os.path.exists(
dataset_path,
):
raise ClientErrorException(
f'Could not found dataset in specified location: "{dataset_path}".',
)
if not ModelRepository.is_model_path(self.model_path):
raise ClientErrorException(
f'Could not found model in specified location: "{self.model_path}".',
)
self.supports_profiling: bool = self.check_if_supports_profiling()
self.accuracy_goal: float = data.get("accuracy_goal", 0.01)
self.config_name = "config.yaml"
self.predefined_config_path = data.get(
"config_path",
get_predefined_config_path(self.framework, self.domain, self.domain_flavour),
)
self.config_path = os.path.join(
self.workload_path,
self.config_name,
)
self.input_precision = Precisions.FP32 # TODO: Detect input model precision
self.output_precision = data.get("precision", data.get("output_precision"))
self.mode = self.get_optimization_mode()
self.tune = data.get("tune", data.get("tuning", False))
# FP32 precision works with GraphOptimization without auto-tune
if self.output_precision == "fp32":
self.tune = False
self.execution_mode = ExecutionMode.BASIC
self.set_execution_mode(data)
self.initialize_config(data)
self.input_nodes: Optional[str] = data.get("inputs", data.get("input_nodes"))
self.output_nodes: Optional[str] = data.get("outputs", data.get("output_nodes"))
self.model_output_path = os.path.join(
self.workload_path,
self.model_output_name,
)
self.version = "4.0"
def initialize_config(self, data: dict) -> None:
"""Initialize config."""
if not os.path.isfile(self.config_path):
self.config.load(self.predefined_config_path)
else:
self.config.load(self.config_path)
config_initializers = {
Optimizations.TUNING: self.initialize_tuning_config,
Optimizations.GRAPH: self.initialize_graph_optimization_config,
}
initializer = config_initializers.get(self.mode, None)
if initializer is None:
raise ClientErrorException(f"Could not initialize config for {self.mode} mode.")
self.config.model.name = self.model_name
self.config.set_evaluation_dataset_path(self.eval_dataset_path)
self.config.set_workspace(self.workload_path)
self.config.set_accuracy_goal(self.accuracy_goal)
initializer()
def initialize_tuning_config(self) -> None:
"""Initialize tuning config."""
self.config.set_quantization_dataset_path(self.calib_dataset_path)
if not self.tune:
self.config.tuning.set_performance_only(True)
def initialize_graph_optimization_config(self) -> None:
"""Initialize graph optimization config."""
self.config.pruning = None
self.config.set_optimization_precision(self.framework, self.output_precision)
def get_optimization_mode(self) -> str:
"""Get optimization mode based on precision."""
modes_map = {
Precisions.INT8: Optimizations.TUNING,
Precisions.FP32: Optimizations.GRAPH,
Precisions.MIXED: Optimizations.GRAPH,
}
mode = modes_map.get(self.output_precision, None)
if mode is None:
raise ClientErrorException(
f"Could not found optimization mode for {self.output_precision} precision.",
)
return mode
def check_if_supports_profiling(self) -> bool:
"""
Check if supports profiling.
returns true if model supports profiling else returns false.
"""
if self.domain.lower() in ["nlp", "recommendation"]:
return False
model = ModelRepository().get_model(self.model_path)
return model.supports_profiling
def set_dataset_paths(self, data: dict) -> None:
"""Set calibration and evaluation dataset path."""
if data.get("evaluation", {}).get("dataset_path"):
self.eval_dataset_path = data.get("evaluation", {}).get("dataset_path")
if data.get("quantization", {}).get("dataset_path"):
self.calib_dataset_path = data.get("quantization", {}).get("dataset_path")
if not self.eval_dataset_path:
self.eval_dataset_path = data.get("dataset_path", "")
if not self.calib_dataset_path:
self.calib_dataset_path = data.get("dataset_path", "")
def set_workspace(self) -> None:
"""Create (if missing) necessary folders for workloads."""
os.makedirs(self.workspace_path, exist_ok=True)
os.makedirs(self.workload_path, exist_ok=True)
def dump(self) -> None:
"""Dump workload to yaml."""
json_path = os.path.join(self.workload_path, "workload.json")
with open(json_path, "w") as f:
json.dump(self.serialize(), f, indent=4)
log.debug(f"Successfully saved workload to {json_path}")
@property
def model_output_name(self) -> str:
"""Get output model name."""
output_name = self.model_name
if self.mode == Optimizations.TUNING:
output_name += "_tuned_" + self.output_precision
elif self.mode == Optimizations.GRAPH:
output_name = self.model_name + "_optimized_"
if self.tune:
output_name += "tuned_"
output_name += "_".join(
[precision.strip() for precision in self.output_precision.split(",")],
)
else:
raise ClientErrorException(f"Mode {self.mode} is not supported.")
if os.path.isfile(self.model_path):
output_name += "." + get_file_extension(self.model_path)
return output_name
def set_execution_mode(self, data: dict) -> None:
"""Set execution_mode."""
if "execution_mode" in data:
try:
execution_mode = data.get("execution_mode", ExecutionMode.BASIC)
self.execution_mode = ExecutionMode(execution_mode)
except ValueError:
self.execution_mode = ExecutionMode.BASIC
elif data.get("tuning", False):
self.execution_mode = ExecutionMode.ADVANCED
class ExecutionMode(Enum):
"""Supported execution modes."""
BASIC = "basic"
ADVANCED = "advanced"
class WorkloadMigrator:
"""Workload migrator."""
def __init__(self, workload_json_path: str):
"""Initialize workloads list migrator."""
self.workload_json = workload_json_path
self.workload_data: dict = {}
self.version_migrators = {
2: self._migrate_to_v2,
3: self._migrate_to_v3,
4: self._migrate_to_v4,
}
@property
def current_version(self) -> int:
"""Get version of current workload format."""
self.ensure_workload_loaded()
return int(float(self.workload_data.get("version", 1)))
@property
def require_migration(self) -> bool:
"""Check if workload require migration."""
if not os.path.isfile(self.workload_json):
log.debug("Workload does not exits.")
return False
if self.current_version >= max(self.version_migrators.keys()):
log.debug("Workload already up to date.")
return False
return True
def load_workload_data(self) -> None:
"""Load workload data from json."""
with open(self.workload_json, encoding="utf-8") as workload_json:
self.workload_data = json.load(workload_json)
def ensure_workload_loaded(self) -> None:
"""Make sure that workloads list is loaded."""
if not self.workload_data and os.path.isfile(self.workload_json):
self.load_workload_data()
def dump(self) -> None:
"""Dump workloads information to json."""
with open(self.workload_json, "w") as workload_json:
json.dump(self.workload_data, workload_json, indent=4)
def migrate(self) -> None:
"""Migrate workload to latest version."""
self.ensure_workload_loaded()
if not self.require_migration:
return
migration_steps = range(self.current_version, max(self.version_migrators.keys()))
for step in migration_steps:
migration_version = step + 1
self._migrate_workload(migration_version)
def _migrate_workload(self, migration_version: int) -> None:
"""Migrate workload one version up."""
print(f"Migrate called with {migration_version} migration version.")
migrate = self.version_migrators.get(migration_version, None)
if migrate is None:
raise InternalException(f"Could not parse workload from version {migration_version}")
migrate()
def _migrate_to_v2(self) -> None:
"""Parse workload from v1 to v2."""
print("Migrating workload.json to v2...")
new_data = {
"input_precision": "fp32",
"output_precision": "int8",
"mode": "tuning",
"tune": True,
"version": 2,
}
parsed_workload = deepcopy(self.workload_data)
parsed_workload.update(new_data)
try:
parsed_workload["config"]["tuning"].update({"objective": "performance"})
except KeyError:
log.debug("Could not set tuning objective.")
try:
input_nodes = self.workload_data["config"]["model"]["inputs"].split(",")
except KeyError:
input_nodes = []
parsed_workload.update({"input_nodes": input_nodes})
try:
output_nodes = self.workload_data["config"]["model"]["outputs"].split(",")
except KeyError:
output_nodes = []
parsed_workload.update({"output_nodes": output_nodes})
self.workload_data = parsed_workload
def _migrate_to_v3(self) -> None:
"""Parse workload from v2 to v3."""
print("Migrating workload.json to v3...")
self.workload_data.update(
{
"project_name": os.path.basename(self.workload_data.get("model_path", "")),
"created_at": "2021-07-15T14:19:18.860579",
"version": 3,
},
)
def _migrate_to_v4(self) -> None:
"""Parse workload from v3 to v4."""
print("Migrating workload.json to v4...")
self.workload_data.update(
{
"supports_profiling": False,
"version": 4,
},
)
| 37.140212 | 97 | 0.630387 |
b5318e367ba3b13c80e226b40ff7c29cc7d4fc2d | 573 | py | Python | diventi/landing/migrations/0076_auto_20190524_0740.py | flavoi/diven | 3173ca3ca3fbedc191b8eab3639a6bceb3c442c4 | [
"Apache-2.0"
] | 2 | 2019-06-27T16:00:17.000Z | 2020-08-14T07:46:05.000Z | diventi/landing/migrations/0076_auto_20190524_0740.py | flavoi/diven | 3173ca3ca3fbedc191b8eab3639a6bceb3c442c4 | [
"Apache-2.0"
] | 26 | 2020-02-15T22:39:35.000Z | 2022-02-19T21:09:01.000Z | diventi/landing/migrations/0076_auto_20190524_0740.py | flavoi/diven | 3173ca3ca3fbedc191b8eab3639a6bceb3c442c4 | [
"Apache-2.0"
] | 1 | 2021-11-12T22:30:15.000Z | 2021-11-12T22:30:15.000Z | # Generated by Django 2.2.1 on 2019-05-24 05:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('landing', '0075_auto_20190524_0738'),
]
operations = [
migrations.AlterField(
model_name='feature',
name='color',
field=models.CharField(blank=True, choices=[('info', 'Blue'), ('primary', 'Rose'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('default', 'Gray')], default='warning', max_length=30, verbose_name='color'),
),
]
| 30.157895 | 237 | 0.596859 |
bf9a2fc56e674683ec4c009f32bf55ff63e08fc7 | 5,425 | py | Python | glance/common/scripts/api_image_import/main.py | bwLehrpool/glance | d4119be0543bdaefe78fc11e16c3a01b55aa9e3a | [
"Apache-2.0"
] | null | null | null | glance/common/scripts/api_image_import/main.py | bwLehrpool/glance | d4119be0543bdaefe78fc11e16c3a01b55aa9e3a | [
"Apache-2.0"
] | null | null | null | glance/common/scripts/api_image_import/main.py | bwLehrpool/glance | d4119be0543bdaefe78fc11e16c3a01b55aa9e3a | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__all__ = [
'run',
]
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from glance.api.v2 import images as v2_api
from glance.common import exception
from glance.common.scripts import utils as script_utils
from glance.common import store_utils
from glance.i18n import _
LOG = logging.getLogger(__name__)
def run(t_id, context, task_repo, image_repo, image_factory):
LOG.info('Task %(task_id)s beginning image import '
'execution.', {'task_id': t_id})
_execute(t_id, task_repo, image_repo, image_factory)
# NOTE(nikhil): This lock prevents more than N number of threads to be spawn
# simultaneously. The number N represents the number of threads in the
# executor pool. The value is set to 10 in the eventlet executor.
@lockutils.synchronized("glance_image_import")
def _execute(t_id, task_repo, image_repo, image_factory):
task = script_utils.get_task(task_repo, t_id)
if task is None:
# NOTE: This happens if task is not found in the database. In
# such cases, there is no way to update the task status so,
# it's ignored here.
return
try:
task_input = script_utils.unpack_task_input(task)
image_id = task_input.get('image_id')
task.succeed({'image_id': image_id})
except Exception as e:
# Note: The message string contains Error in it to indicate
# in the task.message that it's a error message for the user.
# TODO(nikhil): need to bring back save_and_reraise_exception when
# necessary
err_msg = ("Error: " + str(type(e)) + ': ' +
encodeutils.exception_to_unicode(e))
log_msg = err_msg + ("Task ID %s" % task.task_id)
LOG.exception(log_msg)
task.fail(_(err_msg)) # noqa
finally:
task_repo.save(task)
def import_image(image_repo, image_factory, task_input, task_id, uri):
original_image = v2_api.create_image(image_repo,
image_factory,
task_input.get('image_properties'),
task_id)
# NOTE: set image status to saving just before setting data
original_image.status = 'saving'
image_repo.save(original_image)
image_id = original_image.image_id
# NOTE: Retrieving image from the database because the Image object
# returned from create_image method does not have appropriate factories
# wrapped around it.
new_image = image_repo.get(image_id)
set_image_data(new_image, uri, task_id)
try:
# NOTE: Check if the Image is not deleted after setting the data
# before saving the active image. Here if image status is
# saving, then new_image is saved as it contains updated location,
# size, virtual_size and checksum information and the status of
# new_image is already set to active in set_image_data() call.
image = image_repo.get(image_id)
if image.status == 'saving':
image_repo.save(new_image)
return image_id
else:
msg = _("The Image %(image_id)s object being created by this task "
"%(task_id)s, is no longer in valid status for further "
"processing.") % {"image_id": image_id,
"task_id": task_id}
raise exception.Conflict(msg)
except (exception.Conflict, exception.NotFound,
exception.NotAuthenticated):
with excutils.save_and_reraise_exception():
if new_image.locations:
for location in new_image.locations:
store_utils.delete_image_location_from_backend(
new_image.context,
image_id,
location)
def set_image_data(image, uri, task_id, backend=None):
data_iter = None
try:
LOG.info("Task %(task_id)s: Got image data uri %(data_uri)s to be "
"imported", {"data_uri": uri, "task_id": task_id})
data_iter = script_utils.get_image_data_iter(uri)
image.set_data(data_iter, backend=backend)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.warning("Task %(task_id)s failed with exception %(error)s" %
{"error": encodeutils.exception_to_unicode(e),
"task_id": task_id})
LOG.info("Task %(task_id)s: Could not import image file"
" %(image_data)s", {"image_data": uri,
"task_id": task_id})
finally:
if hasattr(data_iter, 'close'):
data_iter.close()
| 39.889706 | 79 | 0.640737 |
0ba76c194fbedcd7c1f5b9e6978060d2039b1ff2 | 23,824 | py | Python | elephant/test/test_statistics.py | mer0mingian/elephant | 28e37f7e7769b67023361236d59692056c564815 | [
"BSD-3-Clause"
] | 2 | 2015-07-21T12:29:50.000Z | 2021-07-27T12:54:10.000Z | elephant/test/test_statistics.py | mer0mingian/elephant | 28e37f7e7769b67023361236d59692056c564815 | [
"BSD-3-Clause"
] | 2 | 2017-01-23T12:33:33.000Z | 2017-04-24T09:49:24.000Z | elephant/test/test_statistics.py | ccluri/elephant | f2af72cac2c65ee01ecf5ae1de1a1ef624b042ee | [
"BSD-3-Clause"
] | 1 | 2019-04-11T08:22:24.000Z | 2019-04-11T08:22:24.000Z | # -*- coding: utf-8 -*-
"""
Unit tests for the statistics module.
:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
import unittest
import neo
import numpy as np
from numpy.testing.utils import assert_array_almost_equal, assert_array_equal
import quantities as pq
import scipy.integrate as spint
import elephant.statistics as es
import elephant.kernels as kernels
import warnings
class isi_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_2d = np.array([[0.3, 0.56, 0.87, 1.23],
[0.02, 0.71, 1.82, 8.46],
[0.03, 0.14, 0.15, 0.92]])
self.targ_array_2d_0 = np.array([[-0.28, 0.15, 0.95, 7.23],
[0.01, -0.57, -1.67, -7.54]])
self.targ_array_2d_1 = np.array([[0.26, 0.31, 0.36],
[0.69, 1.11, 6.64],
[0.11, 0.01, 0.77]])
self.targ_array_2d_default = self.targ_array_2d_1
self.test_array_1d = self.test_array_2d[0, :]
self.targ_array_1d = self.targ_array_2d_1[0, :]
def test_isi_with_spiketrain(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms', t_stop=10.0)
target = pq.Quantity(self.targ_array_1d, 'ms')
res = es.isi(st)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_quantities_1d(self):
st = pq.Quantity(self.test_array_1d, units='ms')
target = pq.Quantity(self.targ_array_1d, 'ms')
res = es.isi(st)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_1d(self):
st = self.test_array_1d
target = self.targ_array_1d
res = es.isi(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_2d_default(self):
st = self.test_array_2d
target = self.targ_array_2d_default
res = es.isi(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_2d_0(self):
st = self.test_array_2d
target = self.targ_array_2d_0
res = es.isi(st, axis=0)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_2d_1(self):
st = self.test_array_2d
target = self.targ_array_2d_1
res = es.isi(st, axis=1)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
class isi_cv_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_regular = np.arange(1, 6)
def test_cv_isi_regular_spiketrain_is_zero(self):
st = neo.SpikeTrain(self.test_array_regular, units='ms', t_stop=10.0)
targ = 0.0
res = es.cv(es.isi(st))
self.assertEqual(res, targ)
def test_cv_isi_regular_array_is_zero(self):
st = self.test_array_regular
targ = 0.0
res = es.cv(es.isi(st))
self.assertEqual(res, targ)
class mean_firing_rate_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_3d = np.ones([5, 7, 13])
self.test_array_2d = np.array([[0.3, 0.56, 0.87, 1.23],
[0.02, 0.71, 1.82, 8.46],
[0.03, 0.14, 0.15, 0.92]])
self.targ_array_2d_0 = np.array([3, 3, 3, 3])
self.targ_array_2d_1 = np.array([4, 4, 4])
self.targ_array_2d_None = 12
self.targ_array_2d_default = self.targ_array_2d_None
self.max_array_2d_0 = np.array([0.3, 0.71, 1.82, 8.46])
self.max_array_2d_1 = np.array([1.23, 8.46, 0.92])
self.max_array_2d_None = 8.46
self.max_array_2d_default = self.max_array_2d_None
self.test_array_1d = self.test_array_2d[0, :]
self.targ_array_1d = self.targ_array_2d_1[0]
self.max_array_1d = self.max_array_2d_1[0]
def test_mean_firing_rate_with_spiketrain(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms', t_stop=10.0)
target = pq.Quantity(self.targ_array_1d/10., '1/ms')
res = es.mean_firing_rate(st)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_spiketrain_set_ends(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms', t_stop=10.0)
target = pq.Quantity(2/0.5, '1/ms')
res = es.mean_firing_rate(st, t_start=0.4, t_stop=0.9)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_quantities_1d(self):
st = pq.Quantity(self.test_array_1d, units='ms')
target = pq.Quantity(self.targ_array_1d/self.max_array_1d, '1/ms')
res = es.mean_firing_rate(st)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_quantities_1d_set_ends(self):
st = pq.Quantity(self.test_array_1d, units='ms')
target = pq.Quantity(2/0.6, '1/ms')
res = es.mean_firing_rate(st, t_start=400*pq.us, t_stop=1.)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_1d(self):
st = self.test_array_1d
target = self.targ_array_1d/self.max_array_1d
res = es.mean_firing_rate(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_1d_set_ends(self):
st = self.test_array_1d
target = self.targ_array_1d/(1.23-0.3)
res = es.mean_firing_rate(st, t_start=0.3, t_stop=1.23)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_default(self):
st = self.test_array_2d
target = self.targ_array_2d_default/self.max_array_2d_default
res = es.mean_firing_rate(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_0(self):
st = self.test_array_2d
target = self.targ_array_2d_0/self.max_array_2d_0
res = es.mean_firing_rate(st, axis=0)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_1(self):
st = self.test_array_2d
target = self.targ_array_2d_1/self.max_array_2d_1
res = es.mean_firing_rate(st, axis=1)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_None(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, None)/5.
res = es.mean_firing_rate(st, axis=None, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_0(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, 0)/5.
res = es.mean_firing_rate(st, axis=0, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_1(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, 1)/5.
res = es.mean_firing_rate(st, axis=1, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_2(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, 2)/5.
res = es.mean_firing_rate(st, axis=2, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_1_set_ends(self):
st = self.test_array_2d
target = np.array([4, 1, 3])/(1.23-0.14)
res = es.mean_firing_rate(st, axis=1, t_start=0.14, t_stop=1.23)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_None(self):
st = self.test_array_2d
target = self.targ_array_2d_None/self.max_array_2d_None
res = es.mean_firing_rate(st, axis=None)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_and_units_start_stop_typeerror(self):
st = self.test_array_2d
self.assertRaises(TypeError, es.mean_firing_rate, st,
t_start=pq.Quantity(0, 'ms'))
self.assertRaises(TypeError, es.mean_firing_rate, st,
t_stop=pq.Quantity(10, 'ms'))
self.assertRaises(TypeError, es.mean_firing_rate, st,
t_start=pq.Quantity(0, 'ms'),
t_stop=pq.Quantity(10, 'ms'))
self.assertRaises(TypeError, es.mean_firing_rate, st,
t_start=pq.Quantity(0, 'ms'),
t_stop=10.)
self.assertRaises(TypeError, es.mean_firing_rate, st,
t_start=0.,
t_stop=pq.Quantity(10, 'ms'))
class FanoFactorTestCase(unittest.TestCase):
def setUp(self):
np.random.seed(100)
num_st = 300
self.test_spiketrains = []
self.test_array = []
self.test_quantity = []
self.test_list = []
self.sp_counts = np.zeros(num_st)
for i in range(num_st):
r = np.random.rand(np.random.randint(20) + 1)
st = neo.core.SpikeTrain(r * pq.ms,
t_start=0.0 * pq.ms,
t_stop=20.0 * pq.ms)
self.test_spiketrains.append(st)
self.test_array.append(r)
self.test_quantity.append(r * pq.ms)
self.test_list.append(list(r))
# for cross-validation
self.sp_counts[i] = len(st)
def test_fanofactor_spiketrains(self):
# Test with list of spiketrains
self.assertEqual(
np.var(self.sp_counts) / np.mean(self.sp_counts),
es.fanofactor(self.test_spiketrains))
# One spiketrain in list
st = self.test_spiketrains[0]
self.assertEqual(es.fanofactor([st]), 0.0)
def test_fanofactor_empty(self):
# Test with empty list
self.assertTrue(np.isnan(es.fanofactor([])))
self.assertTrue(np.isnan(es.fanofactor([[]])))
# Test with empty quantity
self.assertTrue(np.isnan(es.fanofactor([] * pq.ms)))
# Empty spiketrain
st = neo.core.SpikeTrain([] * pq.ms, t_start=0 * pq.ms,
t_stop=1.5 * pq.ms)
self.assertTrue(np.isnan(es.fanofactor(st)))
def test_fanofactor_spiketrains_same(self):
# Test with same spiketrains in list
sts = [self.test_spiketrains[0]] * 3
self.assertEqual(es.fanofactor(sts), 0.0)
def test_fanofactor_array(self):
self.assertEqual(es.fanofactor(self.test_array),
np.var(self.sp_counts) / np.mean(self.sp_counts))
def test_fanofactor_array_same(self):
lst = [self.test_array[0]] * 3
self.assertEqual(es.fanofactor(lst), 0.0)
def test_fanofactor_quantity(self):
self.assertEqual(es.fanofactor(self.test_quantity),
np.var(self.sp_counts) / np.mean(self.sp_counts))
def test_fanofactor_quantity_same(self):
lst = [self.test_quantity[0]] * 3
self.assertEqual(es.fanofactor(lst), 0.0)
def test_fanofactor_list(self):
self.assertEqual(es.fanofactor(self.test_list),
np.var(self.sp_counts) / np.mean(self.sp_counts))
def test_fanofactor_list_same(self):
lst = [self.test_list[0]] * 3
self.assertEqual(es.fanofactor(lst), 0.0)
class LVTestCase(unittest.TestCase):
def setUp(self):
self.test_seq = [1, 28, 4, 47, 5, 16, 2, 5, 21, 12,
4, 12, 59, 2, 4, 18, 33, 25, 2, 34,
4, 1, 1, 14, 8, 1, 10, 1, 8, 20,
5, 1, 6, 5, 12, 2, 8, 8, 2, 8,
2, 10, 2, 1, 1, 2, 15, 3, 20, 6,
11, 6, 18, 2, 5, 17, 4, 3, 13, 6,
1, 18, 1, 16, 12, 2, 52, 2, 5, 7,
6, 25, 6, 5, 3, 15, 4, 3, 16, 3,
6, 5, 24, 21, 3, 3, 4, 8, 4, 11,
5, 7, 5, 6, 8, 11, 33, 10, 7, 4]
self.target = 0.971826029994
def test_lv_with_quantities(self):
seq = pq.Quantity(self.test_seq, units='ms')
assert_array_almost_equal(es.lv(seq), self.target, decimal=9)
def test_lv_with_plain_array(self):
seq = np.array(self.test_seq)
assert_array_almost_equal(es.lv(seq), self.target, decimal=9)
def test_lv_with_list(self):
seq = self.test_seq
assert_array_almost_equal(es.lv(seq), self.target, decimal=9)
def test_lv_raise_error(self):
seq = self.test_seq
self.assertRaises(AttributeError, es.lv, [])
self.assertRaises(AttributeError, es.lv, 1)
self.assertRaises(ValueError, es.lv, np.array([seq, seq]))
class RateEstimationTestCase(unittest.TestCase):
def setUp(self):
# create a poisson spike train:
self.st_tr = (0, 20.0) # seconds
self.st_dur = self.st_tr[1] - self.st_tr[0] # seconds
self.st_margin = 5.0 # seconds
self.st_rate = 10.0 # Hertz
st_num_spikes = np.random.poisson(self.st_rate*(self.st_dur-2*self.st_margin))
spike_train = np.random.rand(st_num_spikes) * (self.st_dur-2*self.st_margin) + self.st_margin
spike_train.sort()
# convert spike train into neo objects
self.spike_train = neo.SpikeTrain(spike_train*pq.s,
t_start=self.st_tr[0]*pq.s,
t_stop=self.st_tr[1]*pq.s)
# generation of a multiply used specific kernel
self.kernel = kernels.TriangularKernel(sigma = 0.03*pq.s)
def test_instantaneous_rate_and_warnings(self):
st = self.spike_train
sampling_period = 0.01*pq.s
with warnings.catch_warnings(record=True) as w:
inst_rate = es.instantaneous_rate(
st, sampling_period, self.kernel, cutoff=0)
self.assertEqual("The width of the kernel was adjusted to a minimally "
"allowed width.", str(w[-2].message))
self.assertEqual("Instantaneous firing rate approximation contains "
"negative values, possibly caused due to machine "
"precision errors.", str(w[-1].message))
self.assertIsInstance(inst_rate, neo.core.AnalogSignalArray)
self.assertEquals(
inst_rate.sampling_period.simplified, sampling_period.simplified)
self.assertEquals(inst_rate.simplified.units, pq.Hz)
self.assertEquals(inst_rate.t_stop.simplified, st.t_stop.simplified)
self.assertEquals(inst_rate.t_start.simplified, st.t_start.simplified)
def test_error_instantaneous_rate(self):
self.assertRaises(
TypeError, es.instantaneous_rate, spiketrain=[1,2,3]*pq.s,
sampling_period=0.01*pq.ms, kernel=self.kernel)
self.assertRaises(
TypeError, es.instantaneous_rate, spiketrain=[1,2,3],
sampling_period=0.01*pq.ms, kernel=self.kernel)
st = self.spike_train
self.assertRaises(
TypeError, es.instantaneous_rate, spiketrain=st,
sampling_period=0.01, kernel=self.kernel)
self.assertRaises(
ValueError, es.instantaneous_rate, spiketrain=st,
sampling_period=-0.01*pq.ms, kernel=self.kernel)
self.assertRaises(
TypeError, es.instantaneous_rate, spiketrain=st,
sampling_period=0.01*pq.ms, kernel='NONE')
self.assertRaises(TypeError, es.instantaneous_rate, self.spike_train,
sampling_period=0.01*pq.s, kernel='wrong_string',
t_start=self.st_tr[0]*pq.s, t_stop=self.st_tr[1]*pq.s,
trim=False)
self.assertRaises(
TypeError, es.instantaneous_rate, spiketrain=st,
sampling_period=0.01*pq.ms, kernel=self.kernel, cutoff=20*pq.ms)
self.assertRaises(
TypeError, es.instantaneous_rate, spiketrain=st,
sampling_period=0.01*pq.ms, kernel=self.kernel, t_start=2)
self.assertRaises(
TypeError, es.instantaneous_rate, spiketrain=st,
sampling_period=0.01*pq.ms, kernel=self.kernel, t_stop=20*pq.mV)
self.assertRaises(
TypeError, es.instantaneous_rate, spiketrain=st,
sampling_period=0.01*pq.ms, kernel=self.kernel, trim=1)
def test_rate_estimation_consistency(self):
"""
Test, whether the integral of the rate estimation curve is (almost)
equal to the number of spikes of the spike train.
"""
kernel_types = [obj for obj in kernels.__dict__.values()
if isinstance(obj, type) and
issubclass(obj, kernels.Kernel) and
hasattr(obj, "_evaluate") and
obj is not kernels.Kernel and
obj is not kernels.SymmetricKernel]
kernel_list = [kernel_type(sigma=0.5*pq.s, invert=False)
for kernel_type in kernel_types]
kernel_resolution = 0.01*pq.s
for kernel in kernel_list:
rate_estimate_a0 = es.instantaneous_rate(self.spike_train,
sampling_period=kernel_resolution,
kernel='auto',
t_start=self.st_tr[0]*pq.s,
t_stop=self.st_tr[1]*pq.s,
trim=False)
rate_estimate0 = es.instantaneous_rate(self.spike_train,
sampling_period=kernel_resolution,
kernel=kernel)
rate_estimate1 = es.instantaneous_rate(self.spike_train,
sampling_period=kernel_resolution,
kernel=kernel,
t_start=self.st_tr[0]*pq.s,
t_stop=self.st_tr[1]*pq.s,
trim=False)
rate_estimate2 = es.instantaneous_rate(self.spike_train,
sampling_period=kernel_resolution,
kernel=kernel,
t_start=self.st_tr[0]*pq.s,
t_stop=self.st_tr[1]*pq.s,
trim=True)
### test consistency
rate_estimate_list = [rate_estimate0, rate_estimate1,
rate_estimate2, rate_estimate_a0]
for rate_estimate in rate_estimate_list:
num_spikes = len(self.spike_train)
auc = spint.cumtrapz(y=rate_estimate.magnitude[:, 0],
x=rate_estimate.times.rescale('s').magnitude)[-1]
self.assertAlmostEqual(num_spikes, auc, delta=0.05*num_spikes)
class TimeHistogramTestCase(unittest.TestCase):
def setUp(self):
self.spiketrain_a = neo.SpikeTrain(
[0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
self.spiketrain_b = neo.SpikeTrain(
[0.1, 0.7, 1.2, 2.2, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
self.spiketrains = [self.spiketrain_a, self.spiketrain_b]
def tearDown(self):
del self.spiketrain_a
self.spiketrain_a = None
del self.spiketrain_b
self.spiketrain_b = None
def test_time_histogram(self):
targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0])
histogram = es.time_histogram(self.spiketrains, binsize=pq.s)
assert_array_equal(targ, histogram[:, 0].magnitude)
def test_time_histogram_binary(self):
targ = np.array([2, 2, 1, 1, 2, 2, 1, 0, 1, 0])
histogram = es.time_histogram(self.spiketrains, binsize=pq.s,
binary=True)
assert_array_equal(targ, histogram[:, 0].magnitude)
def test_time_histogram_tstart_tstop(self):
# Start, stop short range
targ = np.array([2, 1])
histogram = es.time_histogram(self.spiketrains, binsize=pq.s,
t_start=5 * pq.s, t_stop=7 * pq.s)
assert_array_equal(targ, histogram[:, 0].magnitude)
# Test without t_stop
targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0])
histogram = es.time_histogram(self.spiketrains, binsize=1 * pq.s,
t_start=0 * pq.s)
assert_array_equal(targ, histogram[:, 0].magnitude)
# Test without t_start
histogram = es.time_histogram(self.spiketrains, binsize=1 * pq.s,
t_stop=10 * pq.s)
assert_array_equal(targ, histogram[:, 0].magnitude)
def test_time_histogram_output(self):
# Normalization mean
histogram = es.time_histogram(self.spiketrains, binsize=pq.s,
output='mean')
targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0], dtype=float) / 2
assert_array_equal(targ.reshape(targ.size, 1), histogram.magnitude)
# Normalization rate
histogram = es.time_histogram(self.spiketrains, binsize=pq.s,
output='rate')
assert_array_equal(histogram.view(pq.Quantity),
targ.reshape(targ.size, 1) * 1 / pq.s)
# Normalization unspecified, raises error
self.assertRaises(ValueError, es.time_histogram, self.spiketrains,
binsize=pq.s, output=' ')
class ComplexityPdfTestCase(unittest.TestCase):
def setUp(self):
self.spiketrain_a = neo.SpikeTrain(
[0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
self.spiketrain_b = neo.SpikeTrain(
[0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
self.spiketrain_c = neo.SpikeTrain(
[0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
self.spiketrains = [
self.spiketrain_a, self.spiketrain_b, self.spiketrain_c]
def tearDown(self):
del self.spiketrain_a
self.spiketrain_a = None
del self.spiketrain_b
self.spiketrain_b = None
def test_complexity_pdf(self):
targ = np.array([0.92, 0.01, 0.01, 0.06])
complexity = es.complexity_pdf(self.spiketrains, binsize=0.1*pq.s)
assert_array_equal(targ, complexity[:, 0].magnitude)
self.assertEqual(1, complexity[:, 0].magnitude.sum())
self.assertEqual(len(self.spiketrains)+1, len(complexity))
self.assertIsInstance(complexity, neo.AnalogSignalArray)
self.assertEqual(complexity.units, 1*pq.dimensionless)
if __name__ == '__main__':
unittest.main()
| 43.00361 | 101 | 0.594317 |
bd84c11e13b68e5d17369d37e755648c2c02f6df | 655 | py | Python | .history/my_classes/FirstClassFunctions/reducing_functions_20210707135727.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/FirstClassFunctions/reducing_functions_20210707135727.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/FirstClassFunctions/reducing_functions_20210707135727.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | """Reducing Functions in Python
These are functions that recombine an iterable recursively, ending up with a single return value
Also called accumulators, aggregators, or folding functions
Example: Finding the maximum value in an iterable
a0, a1, a2, ...,, aN-1
max(a, b) _> maximum of a and b
result =a0
result = max(result, a1)
result = max(result, a2)
...
result = max(result, an-1)
# max value in a0, a1, a2, ..., an-1
the special case of sequences
(i.e. we can use indexes to access elements in the sequence)
Using a loop
"""
l = l[5, 8, 6, 10, 9]
max_value = lambda a, b: a if a > b else b
def max_sequence(sequence())
| 21.833333 | 96 | 0.679389 |
331efde6c68516999db7065bdbbada22ce0ad7c4 | 1,648 | py | Python | departure/provider/transilien/transilien.py | Woll78/departure-python | 86eccd40291ad560d5859a120dd6c99aa5c4bd12 | [
"MIT"
] | 4 | 2020-10-18T18:12:04.000Z | 2022-03-31T19:13:08.000Z | departure/provider/transilien/transilien.py | Woll78/departure-python | 86eccd40291ad560d5859a120dd6c99aa5c4bd12 | [
"MIT"
] | null | null | null | departure/provider/transilien/transilien.py | Woll78/departure-python | 86eccd40291ad560d5859a120dd6c99aa5c4bd12 | [
"MIT"
] | 1 | 2021-11-19T10:37:26.000Z | 2021-11-19T10:37:26.000Z | from . import data, commons, api
def check_params(station_id: str = None):
if not any((station_id)):
return
stations = data.STATIONS
if station_id is not None:
try:
_ = stations[station_id]
except Exception as e:
raise commons.TransilienException(
f"invalid station code {station_id}"
) from e
def stations_by_string(string):
string = str(string).lower()
results = {}
stations = data.STATIONS
# iterate over stations
for station_id in stations:
# match?
if string in stations[station_id]["nom"].lower():
results[station_id] = stations[station_id]
return results
def next_trains(station_id: str):
# check parameters
check_params(station_id)
# get trains departing from station
departures = api.departures(station_id)
if departures is None:
return []
trains = []
for train in departures.iter("train"):
terminus_id = train.find("term").text
trains.append(
{
# extract time from date (formatted as "16/08/2020 20:07")
"time": train.find("date").text[-5:],
"mission": train.find("miss").text,
"terminus_id": terminus_id,
"terminus": terminus_name(terminus_id),
}
)
return trains
def terminus_name(terminus_id: str) -> str:
if terminus_id in data.STATIONS:
return data.STATIONS[terminus_id]["nom"]
if terminus_id in data.STATIONS_FRANCE:
return data.STATIONS_FRANCE[terminus_id]["nom"]
return "???"
| 24.235294 | 74 | 0.595267 |
19901ce297d369045adc63a67a17fc1fce71744a | 6,494 | py | Python | homeassistant/components/wled/__init__.py | timmillwood/core | 7737387efe8f592a913fe8c39a6991f9266a0b78 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/wled/__init__.py | timmillwood/core | 7737387efe8f592a913fe8c39a6991f9266a0b78 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/wled/__init__.py | timmillwood/core | 7737387efe8f592a913fe8c39a6991f9266a0b78 | [
"Apache-2.0"
] | null | null | null | """Support for WLED."""
import asyncio
from datetime import timedelta
import logging
from typing import Any, Dict
from wled import WLED, Device as WLEDDevice, WLEDConnectionError, WLEDError
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_NAME, CONF_HOST
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_SOFTWARE_VERSION,
DOMAIN,
)
SCAN_INTERVAL = timedelta(seconds=10)
WLED_COMPONENTS = (LIGHT_DOMAIN, SENSOR_DOMAIN, SWITCH_DOMAIN)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the WLED components."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up WLED from a config entry."""
# Create WLED instance for this entry
coordinator = WLEDDataUpdateCoordinator(hass, host=entry.data[CONF_HOST])
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
# For backwards compat, set unique ID
if entry.unique_id is None:
hass.config_entries.async_update_entry(
entry, unique_id=coordinator.data.info.mac_address
)
# Set up all platforms for this device/entry.
for component in WLED_COMPONENTS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload WLED config entry."""
# Unload entities for this entry/device.
await asyncio.gather(
*(
hass.config_entries.async_forward_entry_unload(entry, component)
for component in WLED_COMPONENTS
)
)
# Cleanup
del hass.data[DOMAIN][entry.entry_id]
if not hass.data[DOMAIN]:
del hass.data[DOMAIN]
return True
def wled_exception_handler(func):
"""Decorate WLED calls to handle WLED exceptions.
A decorator that wraps the passed in function, catches WLED errors,
and handles the availability of the device in the data coordinator.
"""
async def handler(self, *args, **kwargs):
try:
await func(self, *args, **kwargs)
await self.coordinator.async_refresh()
except WLEDConnectionError as error:
_LOGGER.error("Error communicating with API: %s", error)
self.coordinator.last_update_success = False
self.coordinator.update_listeners()
except WLEDError as error:
_LOGGER.error("Invalid response from API: %s", error)
return handler
class WLEDDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching WLED data from single endpoint."""
def __init__(
self, hass: HomeAssistant, *, host: str,
):
"""Initialize global WLED data updater."""
self.wled = WLED(host, session=async_get_clientsession(hass))
super().__init__(
hass, _LOGGER, name=DOMAIN, update_interval=SCAN_INTERVAL,
)
def update_listeners(self) -> None:
"""Call update on all listeners."""
for update_callback in self._listeners:
update_callback()
async def _async_update_data(self) -> WLEDDevice:
"""Fetch data from WLED."""
try:
return await self.wled.update()
except WLEDError as error:
raise UpdateFailed(f"Invalid response from API: {error}")
class WLEDEntity(Entity):
"""Defines a base WLED entity."""
def __init__(
self,
*,
entry_id: str,
coordinator: WLEDDataUpdateCoordinator,
name: str,
icon: str,
enabled_default: bool = True,
) -> None:
"""Initialize the WLED entity."""
self._enabled_default = enabled_default
self._entry_id = entry_id
self._icon = icon
self._name = name
self._unsub_dispatcher = None
self.coordinator = coordinator
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self) -> str:
"""Return the mdi icon of the entity."""
return self._icon
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self.coordinator.last_update_success
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enabled_default
@property
def should_poll(self) -> bool:
"""Return the polling requirement of the entity."""
return False
async def async_added_to_hass(self) -> None:
"""Connect to dispatcher listening for entity data notifications."""
self.coordinator.async_add_listener(self.async_write_ha_state)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect from update signal."""
self.coordinator.async_remove_listener(self.async_write_ha_state)
async def async_update(self) -> None:
"""Update WLED entity."""
await self.coordinator.async_request_refresh()
class WLEDDeviceEntity(WLEDEntity):
"""Defines a WLED device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this WLED device."""
return {
ATTR_IDENTIFIERS: {(DOMAIN, self.coordinator.data.info.mac_address)},
ATTR_NAME: self.coordinator.data.info.name,
ATTR_MANUFACTURER: self.coordinator.data.info.brand,
ATTR_MODEL: self.coordinator.data.info.product,
ATTR_SOFTWARE_VERSION: self.coordinator.data.info.version,
}
| 31.371981 | 93 | 0.68494 |
10ad1a608f82daaf34cfd2e062e80a1ceae562cd | 8,329 | py | Python | iaso/management/commands/tree_importer.py | ekhalilbsq/iaso | e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee | [
"MIT"
] | 29 | 2020-12-26T07:22:19.000Z | 2022-03-07T13:40:09.000Z | iaso/management/commands/tree_importer.py | ekhalilbsq/iaso | e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee | [
"MIT"
] | 150 | 2020-11-09T15:03:27.000Z | 2022-03-07T15:36:07.000Z | iaso/management/commands/tree_importer.py | ekhalilbsq/iaso | e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee | [
"MIT"
] | 4 | 2020-11-09T10:38:13.000Z | 2021-10-04T09:42:47.000Z | from django.core.management.base import BaseCommand
import csv
from iaso.models import OrgUnit, OrgUnitType, DataSource, SourceVersion, Project
from django.contrib.gis.geos import Point
from uuid import uuid4
from django.db import models, transaction
from unidecode import unidecode
import json
DEFAULT_DATA_DICT = {
"name": "Nom",
"source_ref": "Référence externe",
"latitude": "Latitude",
"longitude": "Longitude",
"parents": ["parent 4", "parent 3", "parent 2", "parent 1"],
}
def get_or_create(
unit_dict,
name,
org_unit_type,
parent_id,
version_id,
longitude,
latitude,
source_ref,
save=True,
):
id_string = "%s%s" % (name, parent_id)
org_unit = unit_dict.get(id_string, None)
if save and org_unit is None:
org_units = OrgUnit.objects.filter(
name=name,
parent_id=parent_id,
version_id=version_id,
org_unit_type=org_unit_type,
)
# if org_units.count() > 1:
# print("POTENTIAL PROBLEM WITH DUPLICATE NAMES %s parent_id %s" % (name, parent_id))
if org_units.count() > 0:
org_unit = org_units.first()
if org_unit is None:
org_unit = OrgUnit()
org_unit.org_unit_type = org_unit_type
org_unit.name = name.strip()
org_unit.version_id = version_id
org_unit.source_ref = source_ref
org_unit.validation_status = "VALID"
org_unit.parent_id = parent_id
if longitude and latitude:
longitude = float(longitude)
latitude = float(latitude)
org_unit.location = Point(x=longitude, y=latitude, z=0, srid=4326)
if save:
org_unit.save(skip_calculate_path=True)
# print("save")
unit_dict[id_string] = org_unit
return org_unit
def get_or_create_org_unit_type(name, project):
out = OrgUnitType.objects.filter(projects=project, name=name).first()
if not out:
out, created = OrgUnitType.objects.get_or_create(name=name, short_name=name[:4])
out.projects.add(project)
return out
class Command(BaseCommand):
help = "Import a complete tree from a csv file"
def add_arguments(self, parser):
parser.add_argument("--org_unit_csv_file", type=str)
parser.add_argument("--source_name", type=str)
parser.add_argument("--data_dict", type=str, required=False)
parser.add_argument("--version_number", type=int)
parser.add_argument("--project_id", type=int)
parser.add_argument("--main_org_unit_name", type=str)
parser.add_argument("--validation_status", type=str)
# parser.add_argument("project", type=str)
def handle(self, *args, **options):
with transaction.atomic():
file_name = options.get("org_unit_csv_file")
data_dict_name = options.get("data_dict")
source_name = options.get("source_name")
version = options.get("version_number")
project_id = options.get("project_id")
main_org_unit_name = options.get("main_org_unit_name")
source, created = DataSource.objects.get_or_create(name=source_name)
source.projects.add(project_id)
version, created = SourceVersion.objects.get_or_create(number=version, data_source=source)
org_unit_dicts = {}
previous_outs = []
project = Project.objects.get(id=project_id)
main_out = get_or_create_org_unit_type(name=main_org_unit_name, project=project)
print("Creating Org Unit Types")
data_dict = json.loads(open(data_dict_name, "r").read())
for parent in data_dict["parents"]:
out = get_or_create_org_unit_type(name=parent, project=project)
out.projects.add(project)
if previous_outs:
for p in previous_outs:
p.sub_unit_types.add(out)
p.sub_unit_types.add(main_out)
previous_outs.append(out)
d = {"type": out, "units": {}}
org_unit_dicts[parent] = d
main_out.projects.add(project)
leaf_units = []
parent_units = []
top_org_units = set([])
print("Inserting all units")
index = 0
with open(file_name, encoding="utf-8-sig") as csvfile:
csv_reader = csv.reader(csvfile, delimiter=";")
index = 1
for row in csv_reader:
if index % 1000 == 0:
print("index", index)
if index == 1:
headers = row
col_indices = {headers[i].strip(): i for i in range(len(headers))}
print("col_indices", col_indices)
else:
try:
previous_parent = None
for parent in data_dict["parents"]:
type = org_unit_dicts[parent]["type"]
name = row[col_indices[parent]]
simplified_name = unidecode(name).lower().replace("neant", "").strip()
if simplified_name:
top = False
if not previous_parent:
top = True
previous_parent = get_or_create(
org_unit_dicts[parent]["units"],
name,
type,
previous_parent.id if previous_parent else None,
version.id,
None,
None,
None,
)
parent_units.append(previous_parent)
if top:
top_org_units.add(previous_parent)
name = row[col_indices[data_dict["name"]]]
source_ref = row[col_indices[data_dict["source_ref"]]]
latitude = row[col_indices[data_dict["latitude"]]]
longitude = row[col_indices[data_dict["longitude"]]]
# print("previous_parent", previous_parent)
unit = get_or_create(
{},
name,
main_out,
previous_parent.id,
version.id,
longitude,
latitude,
source_ref,
save=False,
)
leaf_units.append(unit)
index += 1
except Exception as e:
print("Error %s for row %d" % (e, index), row)
break
index = index + 1
print("bulk_creating leafs")
OrgUnit.objects.bulk_create(leaf_units)
print("computing paths for parents")
top_parents = OrgUnit.objects.filter(id__in=[u.id for u in parent_units]).filter(parent=None)
for ou in top_parents:
print("computing for", ou)
ou.save(force_recalculate=True)
# print("bulk updating parents")
# OrgUnit.objects.bulk_update(top_parents, ['path'])
print("computing paths for children")
ou_with_parents = OrgUnit.objects.filter(id__in=[u.id for u in leaf_units]).select_related("parent")
index = 0
for ou in ou_with_parents:
if index % 1000 == 0:
print("index", index)
ou.calculate_paths()
index += 1
print("bulk updating children")
OrgUnit.objects.bulk_update(ou_with_parents, ["path"])
| 39.473934 | 112 | 0.509425 |
963186328c814088eb97a706d1fb4565adc03501 | 1,370 | py | Python | src/molecule/status.py | prity-k/molecule | 4fe7e046b8d24dda91c783e7477331ea66b7dbc1 | [
"MIT"
] | 1,599 | 2015-11-18T01:40:26.000Z | 2018-10-29T16:42:52.000Z | src/molecule/status.py | prity-k/molecule | 4fe7e046b8d24dda91c783e7477331ea66b7dbc1 | [
"MIT"
] | 1,232 | 2015-11-18T16:56:02.000Z | 2018-10-27T03:51:50.000Z | src/molecule/status.py | prity-k/molecule | 4fe7e046b8d24dda91c783e7477331ea66b7dbc1 | [
"MIT"
] | 290 | 2015-11-19T18:16:41.000Z | 2018-10-29T18:09:13.000Z | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Status Module."""
from typing import NamedTuple
class Status(NamedTuple):
"""Scenario status information."""
instance_name: str
driver_name: str
provisioner_name: str
scenario_name: str
created: bool
converged: bool
| 40.294118 | 79 | 0.753285 |
77711f14c4c39b1345e46a393f5f0ac1712717ca | 3,850 | py | Python | database_helper.py | Bluewolf787/Terminal-Login | bbbdabd7714e70cab99d8c93cdda5ca867823e49 | [
"MIT"
] | null | null | null | database_helper.py | Bluewolf787/Terminal-Login | bbbdabd7714e70cab99d8c93cdda5ca867823e49 | [
"MIT"
] | null | null | null | database_helper.py | Bluewolf787/Terminal-Login | bbbdabd7714e70cab99d8c93cdda5ca867823e49 | [
"MIT"
] | null | null | null | import sqlite3
# This function is used to connect to the DB
def connect():
try:
return sqlite3.connect('test.db') # Return the connection
except (Exception, sqlite3.ProgrammingError) as error:
print(error)
# This function is used to create a users table in the DB
def create_users_table():
try:
connection = connect() # Create a connection to the DB
cursor = connection.cursor() # Create a SQLite cursor object
# Create the table
cursor.execute('''CREATE TABLE IF NOT EXISTS users (
id INTEGER NOT NULL UNIQUE,
username TEXT NOT NULL UNIQUE,
key BLOB NOT NULL,
salt BLOB NOT NULL,
PRIMARY KEY(id AUTOINCREMENT))''')
connection.commit() # Save (commit) changes to the DB
connection.close() # Close the connection to the DB
except (Exception, sqlite3.ProgrammingError) as error:
print('SQL Error: %s' % error)
# This function is used to store data in the users table
def save_user(username, key, salt):
try:
connection = connect() # Create a connection to the DB
cursor = connection.cursor() # Create a SQLite cursor object
# Store a new user in the table users with username and the key and salt from hashed password
cursor.execute('INSERT INTO users (username, key, salt) VALUES (?, ?, ?)', (username, key, salt,))
connection.commit() # Save (commit) changes to the DB
connection.close() # Close the connection to the DB
except (Exception, sqlite3.ProgrammingError) as error:
print('SQL Error: %s' % error)
# This function is used to check if a specific username is in the table users
def check_username(username):
try:
connection = connect() # Create a connection to the DB
cursor = connection.cursor() # Create a SQLite cursor object
# Select the id from the row with the specific username
cursor.execute('SELECT id FROM users WHERE username = ?', (username,))
data = cursor.fetchone() # Store the tuple from the SQL query in the variable data
connection.close() # Close the connection to the DB
if data is None: # Check if the data variable has a value
return False # If not then return False
else:
return True # Else return True
except (Exception, sqlite3.ProgrammingError) as error:
print('SQL Error: %s' % error)
# This function is used to get the key for a specific user
def get_key(username):
try:
connection = connect() # Create a connection to the DB
cursor = connection.cursor() # Create a SQLite cursor object
# Select the key from the row with the specific username
cursor.execute('SELECT key FROM users WHERE username = ?', (username,))
key = cursor.fetchone() # Store the tuple from the SQL query in the variable key
connection.close() # Close the connection to the DB
return key[0] # Return the first value from the tuple variable key
except (Exception, sqlite3.ProgrammingError) as error:
print('SQL Error: %s' % error)
# This function is used to get the salt for a specific user
def get_salt(username):
try:
connection = connect() # Create a connection to the DB
cursor = connection.cursor() # Create a SQLite cursor object
# Select the salt from the row with the specific username
cursor.execute('SELECT salt FROM users WHERE username = ?', (username,))
salt = cursor.fetchone() # Store the tuple from the SQL query in the variable salt
connection.close() # Close the connection to the DB
return salt[0] # Return the first value from the tuple variable salt
except (Exception, sqlite3.ProgrammingError) as error:
print('SQL Error: %s' % error) | 42.307692 | 108 | 0.660779 |
78c5b7831b043d1cffd421202252cb6ca0acc2ad | 1,091 | py | Python | jirani/migrations/0010_auto_20181018_1212.py | samsoluoch/Neighborhood | 8a05b3882290765e06c72c1609767ec36056ba24 | [
"MIT"
] | null | null | null | jirani/migrations/0010_auto_20181018_1212.py | samsoluoch/Neighborhood | 8a05b3882290765e06c72c1609767ec36056ba24 | [
"MIT"
] | null | null | null | jirani/migrations/0010_auto_20181018_1212.py | samsoluoch/Neighborhood | 8a05b3882290765e06c72c1609767ec36056ba24 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-18 09:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jirani', '0009_auto_20181018_0906'),
]
operations = [
migrations.RenameModel(
old_name='Location',
new_name='Neighborhood',
),
migrations.AlterModelOptions(
name='profile',
options={'ordering': ['email']},
),
migrations.RenameField(
model_name='profile',
old_name='location',
new_name='neighborhood',
),
migrations.RemoveField(
model_name='profile',
name='bio',
),
migrations.AddField(
model_name='profile',
name='email',
field=models.CharField(max_length=30, null=True),
),
migrations.AlterField(
model_name='business',
name='phone_number',
field=models.IntegerField(),
),
]
| 25.372093 | 61 | 0.543538 |
d70f28ae827fcc4ac75638c153aa7765f4116496 | 19,652 | py | Python | trainval_net_ori.py | yangdb/RD-IOD | 64beb2e1efe823185adc0feb338a900f1a7df7a7 | [
"AFL-1.1"
] | 1 | 2022-03-11T03:08:04.000Z | 2022-03-11T03:08:04.000Z | trainval_net_ori.py | yangdb/RD-IOD | 64beb2e1efe823185adc0feb338a900f1a7df7a7 | [
"AFL-1.1"
] | 3 | 2022-03-07T03:04:34.000Z | 2022-03-25T12:28:09.000Z | trainval_net_ori.py | yangdb/RD-IOD | 64beb2e1efe823185adc0feb338a900f1a7df7a7 | [
"AFL-1.1"
] | null | null | null | # --------------------------------------------------------
# Pytorch multi-GPU Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import argparse
import pprint
import pdb
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.utils.data.sampler import Sampler
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list
from model.utils.net_utils import adjust_learning_rate, save_checkpoint, clip_gradient
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res101',
default='vgg16', type=str)
parser.add_argument('--start_epoch', dest='start_epoch',
help='starting epoch',
default=1, type=int)
parser.add_argument('--epochs', dest='max_epochs',
help='number of epochs to train',
default=20, type=int)
parser.add_argument('--disp_interval', dest='disp_interval',
help='number of iterations to display',
default=100, type=int)
parser.add_argument('--checkpoint_interval', dest='checkpoint_interval',
help='number of iterations to display',
default=10000, type=int)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load model to finetune', default="",
type=str)
parser.add_argument('--save_dir', dest='save_dir',
help='directory to save models', default="models_ori",
type=str)
parser.add_argument('--nw', dest='num_workers',
help='number of worker to load data',
default=0, type=int)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--bs', dest='batch_size',
help='batch_size',
default=1, type=int)
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
# config optimization
parser.add_argument('--o', dest='optimizer',
help='training optimizer',
default="sgd", type=str)
parser.add_argument('--lr', dest='lr',
help='starting learning rate',
default=0.001, type=float)
parser.add_argument('--lr_decay_step', dest='lr_decay_step',
help='step to do learning rate decay, unit is epoch',
default=5, type=int)
parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma',
help='learning rate decay ratio',
default=0.1, type=float)
# set training session
parser.add_argument('--s', dest='session',
help='training session',
default=1, type=int)
# resume trained model
parser.add_argument('--r', dest='resume',
help='resume checkpoint or not',
default=False, type=bool)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load model',
default=18, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load model',
default=33101, type=int)
# log and diaplay
parser.add_argument('--use_tfb', dest='use_tfboard',
help='whether use tensorboard',
action='store_true')
args = parser.parse_args()
return args
class sampler(Sampler):
def __init__(self, train_size, batch_size):
self.num_data = train_size
self.num_per_batch = int(train_size / batch_size)
self.batch_size = batch_size
self.range = torch.arange(0,batch_size).view(1, batch_size).long()
self.leftover_flag = False
if train_size % batch_size:
self.leftover = torch.arange(self.num_per_batch*batch_size, train_size).long()
self.leftover_flag = True
def __iter__(self):
rand_num = torch.randperm(self.num_per_batch).view(-1,1) * self.batch_size
self.rand_num = rand_num.expand(self.num_per_batch, self.batch_size) + self.range
self.rand_num_view = self.rand_num.view(-1)
if self.leftover_flag:
self.rand_num_view = torch.cat((self.rand_num_view, self.leftover),0)
return iter(self.rand_num_view)
def __len__(self):
return self.num_data
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "pascal_voc_0712_all":
args.imdb_name = "voc_2007_trainval_all+voc_2012_trainval_all"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "pascal_voc_07_all":
args.imdb_name = "voc_2007_trainval_all"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "pascal_voc_0712_incre":
args.imdb_name = "voc_2007_trainval_incre+voc_2012_trainval_incre"
args.imdbval_name = "voc_2007_test_incre"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_trainval+voc_2012_trainval'
args.imdbval_name_org = "voc_2007_test"
elif args.dataset == "pascal_voc_07_incre":
args.imdb_name = "voc_2007_trainval_incre"
args.imdbval_name = "voc_2007_test_incre"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_trainval'
args.imdbval_name_org = "voc_2007_test"
#elif args.dataset == "pascal_voc_07_15":
# args.imdb_name = "voc_07_15_train"
# args.imdbval_name = "voc_07_15_test"
# args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "pascal_voc_07_15":
args.imdb_name = "voc_2007_5_incre"
args.imdbval_name = "voc_2007_15_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_15_train'
args.imdbval_name_org = "voc_2007_15_test"
elif args.dataset == "pascal_voc_07_15_plant":
args.imdb_name = "voc_2007_15_plant"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_15_train'
args.imdbval_name_org = "voc_2007_test"
elif args.dataset == "pascal_voc_07_10":
args.imdb_name = "voc_2007_10_incre"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.imdb_name_org = 'voc_2007_10_train'
args.imdbval_name_org = "voc_2007_test"
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "imagenet":
args.imdb_name = "imagenet_train"
args.imdbval_name = "imagenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '30']
elif args.dataset == "vg":
# train sizes: train, smalltrain, minitrain
# train scale: ['150-50-20', '150-50-50', '500-150-80', '750-250-150', '1750-700-450', '1600-400-20']
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
args.cfg_file = "cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
np.random.seed(cfg.RNG_SEED)
#torch.backends.cudnn.benchmark = True
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# train set
# -- Note: Use validation set and disable the flipped to enable faster loading.
cfg.TRAIN.USE_FLIPPED = True
cfg.USE_GPU_NMS = args.cuda
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdb_name)
train_size = len(roidb)
print('{:d} roidb entries'.format(len(roidb)))
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
output_dir = args.save_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(output_dir):
os.makedirs(output_dir)
sampler_batch = sampler(train_size, args.batch_size)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
imdb.num_classes, training=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,
sampler=sampler_batch, num_workers=args.num_workers)
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.cuda:
cfg.CUDA = True
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(imdb.classes, pretrained=True, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(imdb.classes, 101, pretrained=True, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(imdb.classes, 50, pretrained=True, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(imdb.classes, 152, pretrained=True, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
lr = cfg.TRAIN.LEARNING_RATE
lr = args.lr
#tr_momentum = cfg.TRAIN.MOMENTUM
#tr_momentum = args.momentum
params = []
for key, value in dict(fasterRCNN.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params':[value],'lr':lr*(cfg.TRAIN.DOUBLE_BIAS + 1), \
'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{'params':[value],'lr':lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
if args.cuda:
fasterRCNN.cuda()
if args.optimizer == "adam":
lr = lr * 0.1
optimizer = torch.optim.Adam(params)
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
if args.resume:
load_name = os.path.join(output_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
print("loading checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
args.session = checkpoint['session']
args.start_epoch = checkpoint['epoch']
fasterRCNN.load_state_dict(checkpoint['model'])
#optimizer.load_state_dict(checkpoint['optimizer'])
#lr = optimizer.param_groups[0]['lr']
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print("loaded checkpoint %s" % (load_name))
if args.load_dir!='':
load_dir = args.load_dir #'models/vgg16/pascal_voc_0712'
load_name_base = load_dir#os.path.join(load_dir,
#'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
print("load checkpoint %s" % (load_name_base))
if args.cuda > 0:
checkpoint_org = torch.load(load_name_base)
else:
checkpoint_org = torch.load(load_name_base, map_location=(lambda storage, loc: storage))
frcnn_model_dict = fasterRCNN.state_dict()
pretrained_dict = {k: v for k, v in checkpoint_org['model'].items()}# if 'cls' not in k and 'bbox' not in k}
pretrained_dict = {k: v for k, v in checkpoint_org['model'].items() if 'RCNN_bbox' not in k and 'RCNN_cls' not in k} ################## split old and new cls
frcnn_model_dict.update(pretrained_dict)
fasterRCNN.load_state_dict(frcnn_model_dict)
################################ init old class weights #################################################
from model.utils.config import cfg
n_new_classes=cfg.NEW_CLASSES
new_parameters_boxw = torch.cat((checkpoint_org['model']['RCNN_bbox_pred.weight'],
fasterRCNN.state_dict()['RCNN_bbox_pred.weight'][-n_new_classes*4:]),dim=0)
new_parameters_boxb = torch.cat((checkpoint_org['model']['RCNN_bbox_pred.bias'],
fasterRCNN.state_dict()['RCNN_bbox_pred.bias'][-n_new_classes*4:]),dim=0)
new_parameters_clsw = torch.cat((checkpoint_org['model']['RCNN_cls_score.weight'],
fasterRCNN.state_dict()['RCNN_cls_score.weight'][-n_new_classes:]),
dim=0)
new_parameters_clsb = torch.cat((checkpoint_org['model']['RCNN_cls_score.bias'],
fasterRCNN.state_dict()['RCNN_cls_score.bias'][-n_new_classes:]), dim=0)
pretrained_dict['RCNN_bbox_pred.weight']=new_parameters_boxw
pretrained_dict['RCNN_bbox_pred.bias'] = new_parameters_boxb
pretrained_dict['RCNN_cls_score.weight'] = new_parameters_clsw
pretrained_dict['RCNN_cls_score.bias'] = new_parameters_clsb
frcnn_model_dict.update(pretrained_dict)
fasterRCNN.load_state_dict(frcnn_model_dict)
#########################################################################################################
'''
for k, v in fasterRCNN.named_parameters():
if 'base' in k:
v.requires_grad = False # 固定参数
'''
if args.mGPUs:
fasterRCNN = nn.DataParallel(fasterRCNN)
iters_per_epoch = int(train_size / args.batch_size)
if args.use_tfboard:
from tensorboardX import SummaryWriter
logger = SummaryWriter("logs")
for epoch in range(args.start_epoch, args.max_epochs + 1):
# setting to train mode
fasterRCNN.train()
loss_temp = 0
start = time.time()
if epoch % (args.lr_decay_step + 1) == 0:
adjust_learning_rate(optimizer, args.lr_decay_gamma)
lr *= args.lr_decay_gamma
data_iter = iter(dataloader)
for step in range(iters_per_epoch):
data = next(data_iter)
with torch.no_grad():
im_data.resize_(data[0].size()).copy_(data[0])
im_info.resize_(data[1].size()).copy_(data[1])
gt_boxes.resize_(data[2].size()).copy_(data[2])
num_boxes.resize_(data[3].size()).copy_(data[3])
fasterRCNN.zero_grad()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label,_ = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
loss = rpn_loss_cls.mean() + rpn_loss_box.mean() \
+ RCNN_loss_cls.mean() + RCNN_loss_bbox.mean()
loss_temp += loss.item()
# backward
optimizer.zero_grad()
loss.backward()
if args.net == "vgg16":
clip_gradient(fasterRCNN, 10.)
optimizer.step()
if step % args.disp_interval == 0:
end = time.time()
if step > 0:
loss_temp /= (args.disp_interval + 1)
if args.mGPUs:
loss_rpn_cls = rpn_loss_cls.mean().item()
loss_rpn_box = rpn_loss_box.mean().item()
loss_rcnn_cls = RCNN_loss_cls.mean().item()
loss_rcnn_box = RCNN_loss_bbox.mean().item()
fg_cnt = torch.sum(rois_label.data.ne(0))
bg_cnt = rois_label.data.numel() - fg_cnt
else:
loss_rpn_cls = rpn_loss_cls.item()
loss_rpn_box = rpn_loss_box.item()
loss_rcnn_cls = RCNN_loss_cls.item()
loss_rcnn_box = RCNN_loss_bbox.item()
fg_cnt = torch.sum(rois_label.data.ne(0))
bg_cnt = rois_label.data.numel() - fg_cnt
print("[session %d][epoch %2d][iter %4d/%4d] loss: %.4f, lr: %.2e" \
% (args.session, epoch, step, iters_per_epoch, loss_temp, lr))
print("\t\t\tfg/bg=(%d/%d), time cost: %f" % (fg_cnt, bg_cnt, end-start))
print("\t\t\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box %.4f" \
% (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box))
if args.use_tfboard:
info = {
'loss': loss_temp,
'loss_rpn_cls': loss_rpn_cls,
'loss_rpn_box': loss_rpn_box,
'loss_rcnn_cls': loss_rcnn_cls,
'loss_rcnn_box': loss_rcnn_box
}
logger.add_scalars("logs_s_{}/losses".format(args.session), info, (epoch - 1) * iters_per_epoch + step)
loss_temp = 0
start = time.time()
save_name = os.path.join(output_dir, 'faster_rcnn_{}_{}_{}.pth'.format(args.session, epoch, step))
save_checkpoint({
'session': args.session,
'epoch': epoch + 1,
'model': fasterRCNN.module.state_dict() if args.mGPUs else fasterRCNN.state_dict(),
'optimizer': optimizer.state_dict(),
'pooling_mode': cfg.POOLING_MODE,
'class_agnostic': args.class_agnostic,
}, save_name)
print('save model: {}'.format(save_name))
if args.use_tfboard:
logger.close()
| 41.812766 | 162 | 0.631539 |
3c9238ce7c081f48a1efefb84c3e4c3e6b9b232b | 1,781 | py | Python | htdocs/views.py | aodarc/bysines_blog | c860af7a9625f454964baf8eae4b5f4a4586cf76 | [
"Apache-2.0"
] | 1 | 2016-09-16T23:38:02.000Z | 2016-09-16T23:38:02.000Z | htdocs/views.py | aodarc/bysines_blog | c860af7a9625f454964baf8eae4b5f4a4586cf76 | [
"Apache-2.0"
] | null | null | null | htdocs/views.py | aodarc/bysines_blog | c860af7a9625f454964baf8eae4b5f4a4586cf76 | [
"Apache-2.0"
] | null | null | null | from django import http
from django.conf import settings
from django.core.urlresolvers import translate_url
from django.utils.http import is_safe_url
from django.utils.translation import (
LANGUAGE_SESSION_KEY, check_for_language,
)
LANGUAGE_QUERY_PARAMETER = 'language'
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.GET.get('next', '/')
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'GET':
lang_code = request.GET.get(LANGUAGE_QUERY_PARAMETER)
if lang_code and check_for_language(lang_code):
next_trans = translate_url(next, lang_code)
if next_trans != next:
response = http.HttpResponseRedirect(next_trans)
if hasattr(request, 'session'):
request.session[LANGUAGE_SESSION_KEY] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN)
return response
| 41.418605 | 79 | 0.663111 |
bbfcfd4d55373439b19ca12bc74f00deb402c8ad | 229 | py | Python | screen.py | Zmotive/xArm_Control | cf0c03fe320772ba8118d29b56c891e63ecca9c3 | [
"MIT"
] | 3 | 2021-01-05T21:57:48.000Z | 2021-03-12T18:15:53.000Z | screen.py | Zmotive/xArm_Control | cf0c03fe320772ba8118d29b56c891e63ecca9c3 | [
"MIT"
] | null | null | null | screen.py | Zmotive/xArm_Control | cf0c03fe320772ba8118d29b56c891e63ecca9c3 | [
"MIT"
] | 1 | 2021-07-29T12:53:29.000Z | 2021-07-29T12:53:29.000Z | from os import system, name
# define our clear function
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
| 17.615385 | 53 | 0.554585 |
d88e2162f150b2a0e26435c730962a68ce3d3693 | 6,324 | py | Python | src/indra_cogex/sources/cbioportal/__init__.py | STIRLIN6/indra_cogex | 552cefd71431b08b8118b2cc0428fd8681e6fc83 | [
"BSD-2-Clause"
] | null | null | null | src/indra_cogex/sources/cbioportal/__init__.py | STIRLIN6/indra_cogex | 552cefd71431b08b8118b2cc0428fd8681e6fc83 | [
"BSD-2-Clause"
] | null | null | null | src/indra_cogex/sources/cbioportal/__init__.py | STIRLIN6/indra_cogex | 552cefd71431b08b8118b2cc0428fd8681e6fc83 | [
"BSD-2-Clause"
] | null | null | null | import re
import logging
import pandas as pd
from collections import defaultdict
from pathlib import Path
from typing import Union
import gilda
import pystow
from indra.databases import hgnc_client
from indra_cogex.representation import Node, Relation
from indra_cogex.sources.processor import Processor
logger = logging.getLogger(__name__)
class CcleMutationsProcessor(Processor):
name = "ccle_mutations"
def __init__(
self,
path: Union[str, Path, None] = None,
):
default_path = pystow.join(
"indra",
"cogex",
"cbioportal",
"ccle_broad_2019",
name="data_mutations_extended.txt",
)
if not path:
path = default_path
elif isinstance(path, str):
path = Path(path)
self.df = pd.read_csv(path, sep="\t", comment="#")
def get_nodes(self):
for hgnc_symbol in sorted(set(self.df["Hugo_Symbol"])):
hgnc_id = hgnc_client.get_hgnc_id(hgnc_symbol)
if not hgnc_id:
continue
yield Node(db_ns="HGNC", db_id=hgnc_id, labels=["BioEntity"])
for cell_line in sorted(set(self.df["Tumor_Sample_Barcode"])):
yield Node(db_ns="CCLE", db_id=cell_line, labels=["BioEntity"])
def get_relations(self):
for index, row in self.df.iterrows():
if not pd.isna(row["HGVSp_Short"]):
hgnc_id = hgnc_client.get_hgnc_id(row["Hugo_Symbol"])
cell_line_id = row["Tumor_Sample_Barcode"]
if not hgnc_id:
continue
yield Relation(
source_ns="HGNC",
source_id=hgnc_id,
target_ns="CCLE",
target_id=cell_line_id,
rel_type="mutated_in",
data={"HGVSp_Short": row["HGVSp_Short"], "source": "ccle"},
)
class CcleCnaProcessor(Processor):
name = "ccle_cna"
def __init__(
self,
path: Union[str, Path, None] = None,
):
default_path = pystow.join(
"indra", "cogex", "cbioportal", "ccle_broad_2019", name="data_CNA.txt"
)
if not path:
path = default_path
elif isinstance(path, str):
path = Path(path)
self.df = pd.read_csv(path, sep="\t")
def get_nodes(self):
# Collect all gene symbols from both tables
for hgnc_symbol in sorted(set(self.df["Hugo_Symbol"])):
hgnc_id = hgnc_client.get_hgnc_id(hgnc_symbol)
if not hgnc_id:
continue
yield Node(db_ns="HGNC", db_id=hgnc_id, labels=["BioEntity"])
for cell_line in sorted(set(self.df.columns.values[1:])):
yield Node(db_ns="CCLE", db_id=cell_line, labels=["BioEntity"])
def get_relations(self):
for index, row in self.df.iterrows():
hgnc_id = hgnc_client.get_hgnc_id(row["Hugo_Symbol"])
if not hgnc_id:
continue
for cell_line in self.df.columns.values[1:]:
if row[cell_line] != 0:
yield Relation(
source_ns="HGNC",
source_id=hgnc_id,
target_ns="CCLE",
target_id=cell_line,
rel_type="copy_number_altered_in",
data={"CNA": row[cell_line], "source": "ccle"},
)
class CcleDrugResponseProcessor(Processor):
name = "ccle_drug"
def __init__(self, path: Union[str, Path, None] = None):
default_path = pystow.join(
"indra",
"cogex",
"cbioportal",
"ccle_broad_2019",
name="data_drug_treatment_IC50.txt",
)
if not path:
path = default_path
elif isinstance(path, str):
path = Path(path)
self.df = pd.read_csv(path, sep="\t")
self.drug_mappings = {}
def get_nodes(self):
drugs = self.get_drug_mappings()
for db_ns, db_id in drugs.values():
if db_ns and db_id:
yield Node(db_ns, db_id, labels=["BioEntity"])
for cell_line in list(self.df.columns[5:]):
yield Node("CCLE", cell_line, labels=["BioEntity"])
def get_relations(self):
cell_lines = self.df.columns[5:]
for _, row in self.df.iterrows():
drug = row["ENTITY_STABLE_ID"]
drug_ns, drug_id = self.drug_mappings.get(drug, (None, None))
if drug_ns and drug_id:
for cell_line in cell_lines:
if not pd.isna(row[cell_line]) and row[cell_line] < 10:
yield Relation(
"CCLE",
cell_line,
drug_ns,
drug_id,
rel_type="sensitive_to",
data={"IC50": row[cell_line], "source": "ccle"},
)
def get_drug_mappings(self):
self.drug_mappings = {}
for _, row in self.df.iterrows():
# We skip ones of the form "Afatinib 1/2" because we use the
# corresponding "Afatinib 2/2" entries instead.
if re.match(r"^(.+) 1/2$", row["NAME"]):
continue
elif re.match(r"^(.+) 2/2$", row["NAME"]):
to_ground = [row["ENTITY_STABLE_ID"].rsplit("-", 1)[0]]
else:
to_ground = [row["ENTITY_STABLE_ID"]]
match = re.search(r"Synonyms:(.+)", row["DESCRIPTION"])
if match:
syns = match.groups()[0]
if syns != "None":
to_ground += [syn.strip() for syn in syns.split(",")]
db_ns, db_id = self.ground_drug(to_ground)
self.drug_mappings[row["ENTITY_STABLE_ID"]] = (db_ns, db_id)
return self.drug_mappings
def ground_drug(self, names):
for name in names:
matches = gilda.ground(name)
if matches:
db_ns, db_id = matches[0].term.db, matches[0].term.id
return db_ns, db_id
logger.info("Could not match %s" % str(names))
return None, None
| 33.109948 | 82 | 0.528937 |
abb8ebf97cc206ff0ba62a48c6add688ee06c5a4 | 8,731 | py | Python | marcottievents/models/common/overview.py | soccermetrics/marcotti-events | 759f3a4ee130fa155fd48e216ef6197a69ee2a33 | [
"MIT"
] | 22 | 2015-12-14T20:38:32.000Z | 2020-12-26T07:40:26.000Z | marcottievents/models/common/overview.py | soccermetrics/marcotti-events | 759f3a4ee130fa155fd48e216ef6197a69ee2a33 | [
"MIT"
] | null | null | null | marcottievents/models/common/overview.py | soccermetrics/marcotti-events | 759f3a4ee130fa155fd48e216ef6197a69ee2a33 | [
"MIT"
] | 6 | 2015-12-20T05:08:57.000Z | 2020-06-28T23:57:31.000Z | import uuid
from datetime import date
from sqlalchemy import (case, select, cast, Column, Integer, Numeric, Date,
String, Sequence, ForeignKey, Unicode, Index)
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.schema import CheckConstraint
from marcottievents.models import GUID
from marcottievents.models.common import BaseSchema
import marcottievents.models.common.enums as enums
class Countries(BaseSchema):
"""
Countries data model.
Countries are defined as FIFA-affiliated national associations.
"""
__tablename__ = "countries"
id = Column(GUID, primary_key=True, default=uuid.uuid4)
name = Column(Unicode(60))
code = Column(String(3))
confederation = Column(enums.ConfederationType.db_type())
Index('countries_indx', 'name')
def __repr__(self):
return u"<Country(id={0}, name={1}, trigram={2}, confed={3})>".format(
self.id, self.name, self.code, self.confederation.value).encode('utf-8')
class Years(BaseSchema):
"""
Years data model.
"""
__tablename__ = "years"
id = Column(Integer, Sequence('year_id_seq', start=100), primary_key=True)
yr = Column(Integer, unique=True)
Index('years_indx', 'yr')
def __repr__(self):
return "<Year(yr={0})>".format(self.yr)
class Seasons(BaseSchema):
"""
Seasons data model.
"""
__tablename__ = "seasons"
id = Column(Integer, Sequence('season_id_seq', start=100), primary_key=True)
start_year_id = Column(Integer, ForeignKey('years.id'))
end_year_id = Column(Integer, ForeignKey('years.id'))
start_year = relationship('Years', foreign_keys=[start_year_id])
end_year = relationship('Years', foreign_keys=[end_year_id])
Index('seasons_indx', 'start_year_id', 'end_year_id')
@hybrid_property
def name(self):
"""
List year(s) that make up season. Seasons over calendar year will be of form YYYY;
seasons over two years will be of form YYYY-YYYY.
"""
if self.start_year.yr == self.end_year.yr:
return self.start_year.yr
else:
return "{0}-{1}".format(self.start_year.yr, self.end_year.yr)
@name.expression
def name(cls):
"""
List year(s) that make up season. Seasons over calendar year will be of form YYYY;
seasons over two years will be of form YYYY-YYYY.
This expression allows `name` to be used as a query parameter.
"""
yr1 = select([Years.yr]).where(cls.start_year_id == Years.id).as_scalar()
yr2 = select([Years.yr]).where(cls.end_year_id == Years.id).as_scalar()
return cast(yr1, String) + case([(yr1 == yr2, '')], else_='-'+cast(yr2, String))
@hybrid_property
def reference_date(self):
"""
Define the reference date that is used to calculate player ages.
+------------------------+---------------------+
| Season type | Reference date |
+========================+=====================+
| European (Split years) | 30 June |
+------------------------+---------------------+
| Calendar-year | 31 December |
+------------------------+---------------------+
:return: Date object that expresses reference date.
"""
if self.start_year.yr == self.end_year.yr:
return date(self.end_year.yr, 12, 31)
else:
return date(self.end_year.yr, 6, 30)
def __repr__(self):
return "<Season({0})>".format(self.name)
class Competitions(BaseSchema):
"""
Competitions common data model.
"""
__tablename__ = 'competitions'
id = Column(GUID, primary_key=True, default=uuid.uuid4)
name = Column(Unicode(80))
level = Column(Integer)
discriminator = Column('type', String(20))
Index('competitions_indx', 'name', 'level')
__mapper_args__ = {
'polymorphic_identity': 'competitions',
'polymorphic_on': discriminator
}
class DomesticCompetitions(Competitions):
"""
Domestic Competitions data model, inherited from Competitions model.
"""
__mapper_args__ = {'polymorphic_identity': 'domestic'}
country_id = Column(GUID, ForeignKey('countries.id'))
country = relationship('Countries', backref=backref('competitions'))
def __repr__(self):
return u"<DomesticCompetition(name={0}, country={1}, level={2})>".format(
self.name, self.country.name, self.level).encode('utf-8')
class InternationalCompetitions(Competitions):
"""
International Competitions data model, inherited from Competitions model.
"""
__mapper_args__ = {'polymorphic_identity': 'international'}
confederation = Column(enums.ConfederationType.db_type())
def __repr__(self):
return u"<InternationalCompetition(name={0}, confederation={1})>".format(
self.name, self.confederation.value).encode('utf-8')
class Venues(BaseSchema):
__tablename__ = 'venues'
id = Column(GUID, primary_key=True, default=uuid.uuid4)
name = Column(Unicode(60), doc="The name of the match venue")
city = Column(Unicode(60), doc="Name of city/locality where venue resides")
region = Column(Unicode(60), doc="Name of administrative region (state, province, etc) where venue resides")
latitude = Column(Numeric(9, 6), CheckConstraint("latitude >= -90.000000 AND latitude <= 90.000000"),
default=0.000000, doc="Venue latitude in decimal degrees")
longitude = Column(Numeric(9, 6), CheckConstraint("longitude >= -180.000000 AND longitude <= 180.000000"),
default=0.000000, doc="Venue longitude in decimal degrees")
altitude = Column(Integer, CheckConstraint("altitude >= -200 AND altitude <= 4500"),
default=0, doc="Venue altitude in meters")
country_id = Column(GUID, ForeignKey('countries.id'))
country = relationship('Countries', backref=backref('venues'))
timezone_id = Column(Integer, ForeignKey('timezones.id'))
timezone = relationship('Timezones', backref=backref('venues'))
Index('venues_indx', 'name', 'city', 'country_id')
def __repr__(self):
return u"<Venue(name={0}, city={1}, country={2})>".format(
self.name, self.city, self.country.name).encode('utf-8')
class VenueHistory(BaseSchema):
__tablename__ = 'venue_histories'
id = Column(GUID, primary_key=True, default=uuid.uuid4)
eff_date = Column(Date, doc="Effective date of venue configuration")
length = Column(Integer, CheckConstraint("length >= 90 AND length <= 120"),
default=105, doc="Length of venue playing surface in meters")
width = Column(Integer, CheckConstraint("width >= 45 AND width <= 90"),
default=68, doc="Width of venue playing surface in meters")
capacity = Column(Integer, CheckConstraint("capacity >= 0"),
default=0, doc="Total venue capacity (seated and unseated)")
seats = Column(Integer, CheckConstraint("seats >= 0"),
default=0, doc="Total seats at venue")
venue_id = Column(GUID, ForeignKey('venues.id'))
venue = relationship('Venues', backref=backref('histories'))
surface_id = Column(Integer, ForeignKey('surfaces.id'))
surface = relationship('Surfaces', backref=backref('venues'))
def __repr__(self):
return u"<VenueHistory(name={0}, date={1}, length={2}, width={3}, capacity={4})>".format(
self.venue.name, self.eff_date.isoformat(), self.length, self.width, self.capacity).encode('utf-8')
class Timezones(BaseSchema):
__tablename__ = 'timezones'
id = Column(Integer, Sequence('timezone_id_seq', start=1000), primary_key=True)
name = Column(Unicode(80), doc="Name of the time zone geographic region", nullable=False)
offset = Column(Numeric(4, 2), doc="Offset of the time zone region from UTC, in decimal hours", nullable=False)
confederation = Column(enums.ConfederationType.db_type())
Index('timezones_indx', 'name')
def __repr__(self):
return u"<Timezone(name={0}, offset={1:+1.2f}, confederation={2})>".format(
self.name, self.offset, self.confederation.value).encode('utf-8')
class Surfaces(BaseSchema):
__tablename__ = 'surfaces'
id = Column(Integer, Sequence('surface_id_seq', start=10), primary_key=True)
description = Column(Unicode(60), nullable=False)
type = Column(enums.SurfaceType.db_type())
def __repr__(self):
return u"<Surface(description={0}, type={1})>".format(self.description, self.type.description).encode('utf-8')
| 36.839662 | 118 | 0.640133 |
89e4859ceed12595af12487412a2e2e34f34b6a9 | 130 | py | Python | python/coursera_python/MICHIGAN/WEB/test/find_1.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 16 | 2018-11-26T08:39:42.000Z | 2019-05-08T10:09:52.000Z | python/coursera_python/MICHIGAN/WEB/test/find_1.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 8 | 2020-05-04T06:29:26.000Z | 2022-02-12T05:33:16.000Z | python/coursera_python/MICHIGAN/WEB/test/find_1.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 5 | 2020-02-11T16:02:21.000Z | 2021-02-05T07:48:30.000Z | # To use the find function
hand = open('test.txt')
for line in hand:
line=line.rstrip()
if line.find("from")>=0:
print(line)
| 16.25 | 26 | 0.661538 |
68b93f53a5d3ddeee03146793b4dd4e86f49d96a | 9,764 | py | Python | machine_learning_course/LAB03/main.py | arekmula/MachineLearningCourse | 5966dd2ad0ee23ef8f84d218a9f64e345900402e | [
"MIT"
] | null | null | null | machine_learning_course/LAB03/main.py | arekmula/MachineLearningCourse | 5966dd2ad0ee23ef8f84d218a9f64e345900402e | [
"MIT"
] | null | null | null | machine_learning_course/LAB03/main.py | arekmula/MachineLearningCourse | 5966dd2ad0ee23ef8f84d218a9f64e345900402e | [
"MIT"
] | null | null | null | from sklearn import datasets, svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
from mlxtend.plotting import plot_decision_regions
from matplotlib import pyplot as plt
from matplotlib import gridspec
import pandas as pd
import pickle
import itertools
import numpy as np
def check_different_classificator(clf, clf_name, x_train, x_test, y_train, y_test, gs, grid, figure, plot=False):
clf.fit(x_train, y_train)
score = clf.score(x_test, y_test)
# score = metrics.accuracy_score()
if plot:
try:
ax = plt.subplot(gs[grid[0], grid[1]])
figure = plot_decision_regions(x_train, y_train.values, clf=clf, legend=2)
plt.title(clf_name)
except ValueError:
ax = plt.subplot(gs[grid[0], grid[1]])
figure = plot_decision_regions(x_train.values, y_train.values, clf=clf, legend=2)
plt.title(clf_name)
return score
def todo1():
iris = datasets.load_iris(as_frame=True)
print(iris.frame.describe())
x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=42,
stratify=iris.target)
min_max_scaler = MinMaxScaler()
# Dobieramy i dopasowujemy te dane tylko na zbiorze treningowym. Transform należy używać już na obywdówch zbiorach
min_max_scaler.fit(x_train)
x_train_min_max_scaled = min_max_scaler.transform(x_train)
standard_scaler = StandardScaler()
standard_scaler.fit(x_train)
x_train_standard_scaled = standard_scaler.transform(x_train)
fig, axs = plt.subplots(2, 2)
axs[0, 0].scatter(x_train.loc[:, "sepal length (cm)"], x_train.loc[:, "sepal width (cm)"], color='r')
axs[0, 0].set_title('Dane wejsciowe')
axs[0, 0].set_xlabel('Sepal length (m)')
axs[0, 0].set_ylabel('Sepal width (m)')
axs[0, 0].axvline(x=0)
axs[0, 0].axhline(y=0)
axs[0, 1].scatter(x_train_standard_scaled[:, 0], x_train_standard_scaled[:, 1], color='g')
axs[0, 1].set_title('Dane po standaryzacji')
axs[0, 1].set_xlabel('Sepal length')
axs[0, 1].set_ylabel('Sepal width')
axs[0, 1].axvline(x=0)
axs[0, 1].axhline(y=0)
axs[1, 0].scatter(x_train_min_max_scaled[:, 0], x_train_min_max_scaled[:, 1], color='b')
axs[1, 0].set_title('Dane po min-max scaling')
axs[1, 0].set_xlabel('Sepal length')
axs[1, 0].set_ylabel('Sepal width')
axs[1, 0].axvline(x=0)
axs[1, 0].axhline(y=0)
fig.suptitle('Iris features')
plt.show()
clf = svm.SVC()
clf.fit(x_train_standard_scaled[:, 0:2], y_train)
plot_decision_regions(x_train_standard_scaled[:, 0:2], y_train.values, clf=clf, legend=2)
plt.xlabel('sepal length normalized')
plt.ylabel('sepal length normalized')
plt.title('SVM on Iris')
plt.show()
def todo2():
iris = datasets.load_iris(as_frame=True)
print(iris.frame.describe())
x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=42,
stratify=iris.target)
min_max_scaler = MinMaxScaler()
min_max_scaler.fit(x_train)
standard_scaler = StandardScaler()
standard_scaler.fit(x_train)
x_train_standard_scaled = standard_scaler.transform(x_train)
x_test_standard_scaled = standard_scaler.transform(x_test)
x_train_min_max_scaled = min_max_scaler.transform(x_train)
x_test_min_max_scaled = min_max_scaler.transform(x_test)
classificators = [svm.SVC(), LinearRegression(), RandomForestClassifier(), DecisionTreeClassifier()]
classificators_names = ["svc", "linear_regression", "random_forest", "decision_tree"]
score_dict = {}
gs = gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(10, 8))
for classifier, clf_name, grd in zip(classificators, classificators_names, itertools.product([0, 1], repeat=2)):
score_dict[clf_name] = check_different_classificator(classifier, clf_name,
x_train.loc[:, "sepal length (cm)":"sepal width (cm)"],
x_test.loc[:, "sepal length (cm)":"sepal width (cm)"],
y_train, y_test, gs, grd, fig, plot=True)
print(f"Wyniki klasyfikatorow wytrenowanych na nadanych nieprzeskalowanych: {score_dict}")
plt.suptitle("Pola decyzyjne dla klasyfikatorów wytrenowanych na danych nieprzeskalowanych")
fig1 = plt.figure(figsize=(10, 8))
for classifier, clf_name, grd in zip(classificators, classificators_names, itertools.product([0, 1], repeat=2)):
score_dict[clf_name] = check_different_classificator(classifier, clf_name,
x_train_min_max_scaled[:, 0:2],
x_test_min_max_scaled[:, 0:2],
y_train, y_test, gs, grd, fig1, plot=True)
print(f"Wyniki klasyfikatorow wytrenowanych nadanych po min-max scalingu: {score_dict}")
plt.suptitle("Pola decyzyjne dla klasyfikatorów wytrenowanych na danych po min-max scalingu")
fig2 = plt.figure(figsize=(10, 8))
for classifier, clf_name, grd in zip(classificators, classificators_names, itertools.product([0, 1], repeat=2)):
score_dict[clf_name] = check_different_classificator(classifier, clf_name,
x_train_standard_scaled[:, 0:2],
x_test_standard_scaled[:, 0:2],
y_train, y_test, gs, grd, fig2, plot=True)
print(f"Wyniki klasyfikatorow wytrenowanych nadanych po standard scalingu: {score_dict}")
plt.suptitle("Pola decyzyjne dla klasyfikatorów wytrenowanych na danych po standard scalingu")
plt.show()
def todo3():
iris = datasets.load_iris(as_frame=True)
print(iris.frame.describe())
x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=42,
stratify=iris.target)
min_max_scaler = MinMaxScaler()
min_max_scaler.fit(x_train)
standard_scaler = StandardScaler()
standard_scaler.fit(x_train)
x_train_standard_scaled = standard_scaler.transform(x_train)
x_test_standard_scaled = standard_scaler.transform(x_test)
x_train_min_max_scaled = min_max_scaler.transform(x_train)
x_test_min_max_scaled = min_max_scaler.transform(x_test)
classificators = [svm.SVC(), LinearRegression(), RandomForestClassifier(), DecisionTreeClassifier()]
classificators_names = ["svc", "linear_regression", "random_forest", "decision_tree"]
score_dict = {}
gs = gridspec.GridSpec(2, 2)
fig1 = plt.figure(figsize=(10, 8))
for classifier, clf_name, grd in zip(classificators, classificators_names, itertools.product([0, 1], repeat=2)):
score_dict[clf_name] = check_different_classificator(classifier, clf_name,
x_train_min_max_scaled[:, 0:2],
x_test_min_max_scaled[:, 0:2],
y_train, y_test, gs, grd, fig1, plot=False)
print(f"Wyniki klasyfikatorow wytrenowanych na 2 cechach: {score_dict}")
for classifier, clf_name, grd in zip(classificators, classificators_names, itertools.product([0, 1], repeat=2)):
score_dict[clf_name] = check_different_classificator(classifier, clf_name,
x_train_min_max_scaled[:, 0:4],
x_test_min_max_scaled[:, 0:4],
y_train, y_test, gs, grd, fig1, plot=False)
print(f"Wyniki klasyfikatorow wytrenowanych na 4 cechach: {score_dict}")
def todo4():
iris = datasets.load_iris(as_frame=True)
print(iris.frame.describe())
x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=42,
stratify=iris.target)
min_max_scaler = MinMaxScaler()
min_max_scaler.fit(x_train)
x_train_min_max_scaled = min_max_scaler.transform(x_train)
x_test_min_max_scaled = min_max_scaler.transform(x_test)
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
clf = GridSearchCV(svm.SVC(), param_grid=tuned_parameters)
clf.fit(x_train_min_max_scaled, y_train)
print(f"SVC best params: {clf.best_params_}")
tuned_parameters = [{'criterion': ['gini', 'entropy'], 'splitter': ["best", "random"]}]
clf = GridSearchCV(DecisionTreeClassifier(), param_grid=tuned_parameters)
clf.fit(x_train_min_max_scaled, y_train)
print(f"DecisionTree best params: {clf.best_params_}")
print(f"DecisionTree score: {clf.score(x_test_min_max_scaled, y_test)}")
filename = 'svc_model.sav'
pickle.dump(clf, open(filename, 'wb'))
clf_loaded = pickle.load(open(filename, 'rb'))
print(f"DecisionTree score: {clf_loaded.score(x_test_min_max_scaled, y_test)}")
todo1()
todo2()
todo3()
todo4()
| 45.203704 | 118 | 0.635293 |
9391d07a63a40ed8b5d33fdffabaa018e777ad68 | 4,517 | py | Python | qa/rpc-tests/mempool_reorg.py | bcgssystem/bcgt | 17b12766577615d48017da72f16129d25025fe8f | [
"MIT"
] | null | null | null | qa/rpc-tests/mempool_reorg.py | bcgssystem/bcgt | 17b12766577615d48017da72f16129d25025fe8f | [
"MIT"
] | null | null | null | qa/rpc-tests/mempool_reorg.py | bcgssystem/bcgt | 17b12766577615d48017da72f16129d25025fe8f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(DigiByteTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
start_count = self.nodes[0].getblockcount()
# Mine three blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = create_tx(self.nodes[0], coinbase_txids[1], node1_address, 49.99)
spend_102_raw = create_tx(self.nodes[0], coinbase_txids[2], node0_address, 49.99)
spend_103_raw = create_tx(self.nodes[0], coinbase_txids[3], node0_address, 49.99)
# Create a block-height-locked transaction which will be invalid after reorg
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 49.99})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_tx(self.nodes[0], spend_102_id, node1_address, 49.98)
spend_103_1_raw = create_tx(self.nodes[0], spend_103_id, node1_address, 49.98)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
for node in self.nodes:
node.invalidateblock(last_block[0])
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| 44.284314 | 122 | 0.684747 |
20003072495e282316a02bfc24153b241c1f5f8f | 3,617 | py | Python | src/02_video_read_write.py | fpecek/opencv-introduction | 7a6cf39b69e8a238364c36736c6750390554e8e3 | [
"Apache-2.0"
] | null | null | null | src/02_video_read_write.py | fpecek/opencv-introduction | 7a6cf39b69e8a238364c36736c6750390554e8e3 | [
"Apache-2.0"
] | null | null | null | src/02_video_read_write.py | fpecek/opencv-introduction | 7a6cf39b69e8a238364c36736c6750390554e8e3 | [
"Apache-2.0"
] | null | null | null | '''
OpenCV provides two clases for working with video: VideoCapture and VideoWriter
'''
import numpy as np
import matplotlib.pyplot as plt
import cv2
import time
#unique interface for diverse number of video sources
usb_cam_address = "/dev/video0" #or just 0
file_address = "img/movie.mp4"
stream_url_address = "rtsp://admin:CAMpass001@172.16.50.201:554/Streaming/Channels/101?transportmode=unicast&profile=Profile_1"
def simple_capture(address):
"""
capture frames from a video device and display them
"""
capture = cv2.VideoCapture(address)
if not capture.isOpened():
print(f"Cannot opet camera '{address}'")
return
while True:
#capture next frame from the capture device
ret, frame = capture.read()
if not ret:
break
cv2.imshow("frame", frame)
#enable stepping out from the loop by pressing the q key
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
def respect_the_fps_and_loop(file_address):
'''
We will play the video file in it's specified FPS and start from begging after last frame displayed
'''
capture = cv2.VideoCapture(file_address)
if not capture.isOpened():
print(f"Cannot opet camera '{file_address}'")
return
frame_rate = capture.get(cv2.CAP_PROP_FPS)
inter_frame_ms = 1000/frame_rate
num_frames = capture.get(cv2.CAP_PROP_FRAME_COUNT)
frame_index = -1
timestamp = None
while True:
if frame_index == num_frames - 1:
capture.set(cv2.CAP_PROP_POS_FRAMES, 0)
frame_index = -1
# capture next frame from the capture device
ret, frame = capture.read()
if not ret:
break
frame_index+=1
if timestamp:
elapsed_ms = time.time() - timestamp
sleep_ms = inter_frame_ms - elapsed_ms
if sleep_ms > 0:
time.sleep(sleep_ms/1000)
cv2.imshow("frame", frame)
timestamp = time.time()
# enable stepping out from the loop by pressing the q key
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
def write_video_as_grayscale(address):
'''
capture frames from a video device, convert each frame to grayscale and display them
write greyscale video files to new video file
we will use h264 video codec with mp4 container type to save the video file
'''
capture = cv2.VideoCapture(address)
if not capture.isOpened():
print(f"Cannot opet camera '{address}'")
return
# why use XVID codec with avi container?
# h264 with mp4 container would be better solution
# https://github.com/skvark/opencv-python/issues/100#issuecomment-394159998
fourcc = cv2.VideoWriter_fourcc(*'XVID')
frame_rate = capture.get(cv2.CAP_PROP_FPS)
frame_height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
out = cv2.VideoWriter('movie_grey.avi', fourcc, frame_rate, (frame_width, frame_height))
while True:
# capture next frame from the capture device
ret, frame = capture.read()
if not ret:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
out.write(frame)
cv2.imshow("frame", frame)
# enable stepping out from the loop by pressing the q kframe_heighty
if cv2.waitKey(1) == ord('q'):
break
capture.release()
out.release()
cv2.destroyAllWindows()
| 28.480315 | 127 | 0.651092 |
0362bc7f1ef6b6aaacf0ae21cb270f196ba06a1e | 1,328 | py | Python | setup.py | proxyport/scrapy-proxyport | cd96f0abf503f89d0b31d40bebce3bd6a0668e64 | [
"MIT"
] | null | null | null | setup.py | proxyport/scrapy-proxyport | cd96f0abf503f89d0b31d40bebce3bd6a0668e64 | [
"MIT"
] | null | null | null | setup.py | proxyport/scrapy-proxyport | cd96f0abf503f89d0b31d40bebce3bd6a0668e64 | [
"MIT"
] | null | null | null | import os
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
NAME = 'scrapyproxyport'
DESCRIPTION = 'Proxy Port integration for Scrapy.'
URL = 'https://github.com/proxyport/scrapy-proxyport'
EMAIL = 'proxyportcom@gmail.com'
AUTHOR = 'Proxy Port'
REQUIRES_PYTHON = '>=3.6.0'
README = ''
VERSION = ''
REQUIRED = ['proxyport']
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
with open(os.path.join(here, 'scrapyproxyport', '__version__.py')) as f:
globs = dict()
exec(f.read(), globs)
VERSION = globs['__version__']
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=README,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
install_requires=REQUIRED,
url=URL,
packages=find_packages(exclude=['tests.*']),
include_package_data=True,
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
)
| 26.039216 | 72 | 0.669428 |
d67ee5c237adbedf882d3f9a8866a4e929c86a6c | 657 | py | Python | examples/HELICS/setup.py | cloudcomputingabc/volttron | 6495e26e3185a7af8d0d79ad2586bdf8ea83992d | [
"Apache-2.0",
"BSD-2-Clause"
] | 406 | 2015-01-20T03:08:53.000Z | 2022-03-31T20:59:07.000Z | examples/HELICS/setup.py | cloudcomputingabc/volttron | 6495e26e3185a7af8d0d79ad2586bdf8ea83992d | [
"Apache-2.0",
"BSD-2-Clause"
] | 2,031 | 2015-01-05T21:35:45.000Z | 2022-03-29T21:44:36.000Z | examples/HELICS/setup.py | cloudcomputingabc/volttron | 6495e26e3185a7af8d0d79ad2586bdf8ea83992d | [
"Apache-2.0",
"BSD-2-Clause"
] | 219 | 2015-01-20T14:53:57.000Z | 2022-03-06T00:37:41.000Z | from setuptools import setup, find_packages
MAIN_MODULE = 'agent'
# Find the agent package that contains the main module
packages = find_packages('.')
agent_package = 'helics_example'
# Find the version number from the main module
agent_module = agent_package + '.' + MAIN_MODULE
_temp = __import__(agent_module, globals(), locals(), ['__version__'], 0)
__version__ = _temp.__version__
# Setup
setup(
name=agent_package + 'agent',
version=__version__,
install_requires=['volttron'],
packages=packages,
entry_points={
'setuptools.installation': [
'eggsecutable = ' + agent_module + ':main',
]
}
)
| 24.333333 | 73 | 0.684932 |
eccdeefc68ceb73e42111d1001a709402a15a8c8 | 1,023 | py | Python | rdmo/questions/migrations/0043_django2.py | Raspeanut/rdmo | 9f785010a499c372a2f8368ccf76d2ea4150adcb | [
"Apache-2.0"
] | 77 | 2016-08-09T11:40:20.000Z | 2022-03-06T11:03:26.000Z | rdmo/questions/migrations/0043_django2.py | Raspeanut/rdmo | 9f785010a499c372a2f8368ccf76d2ea4150adcb | [
"Apache-2.0"
] | 377 | 2016-07-01T13:59:36.000Z | 2022-03-30T13:53:19.000Z | rdmo/questions/migrations/0043_django2.py | Raspeanut/rdmo | 9f785010a499c372a2f8368ccf76d2ea4150adcb | [
"Apache-2.0"
] | 47 | 2016-06-23T11:32:19.000Z | 2022-03-01T11:34:37.000Z | # Generated by Django 2.2rc1 on 2019-03-26 13:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('questions', '0042_remove_null_true'),
]
operations = [
migrations.AlterModelOptions(
name='catalog',
options={'ordering': ('order',), 'verbose_name': 'Catalog', 'verbose_name_plural': 'Catalogs'},
),
migrations.AlterModelOptions(
name='question',
options={'ordering': ('questionset', 'order'), 'verbose_name': 'Question', 'verbose_name_plural': 'Questions'},
),
migrations.AlterModelOptions(
name='questionset',
options={'ordering': ('section', 'order'), 'verbose_name': 'Question set', 'verbose_name_plural': 'Question set'},
),
migrations.AlterModelOptions(
name='section',
options={'ordering': ('catalog__order', 'order'), 'verbose_name': 'Section', 'verbose_name_plural': 'Sections'},
),
]
| 34.1 | 126 | 0.597263 |
0bb950e4046a19e5a2e74b131cb17161e61889a4 | 1,356 | py | Python | covidDataset.py | weishancc/COVID19-SA | d4c8e2ed70df67e21e49671d9905312be89e5647 | [
"MIT"
] | 2 | 2020-12-23T01:37:55.000Z | 2022-03-10T09:24:05.000Z | covidDataset.py | weishancc/COVID19-SA | d4c8e2ed70df67e21e49671d9905312be89e5647 | [
"MIT"
] | null | null | null | covidDataset.py | weishancc/COVID19-SA | d4c8e2ed70df67e21e49671d9905312be89e5647 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Turn #Covid-19 of ig posts into Bert input format, which contains:
- tokens_tensor
- segments_tensor
- label_tensor
"""
import torch
from torch.utils.data import Dataset
class CovidDataset(Dataset):
# Init
def __init__(self, df, tokenizer):
self.df = df
self.len = len(self.df)
self.tokenizer = tokenizer # BERT tokenizer
# Return data for train/test
def __getitem__(self, idx):
if self.df.shape[0] == 618: # test set
_, text = self.df.iloc[idx, :].values
label_tensor = None
else:
label, text = self.df.iloc[idx, :].values
label_tensor = torch.tensor(label)
# Establish tokens_tensor
word_pieces = ['[CLS]']
text = self.tokenizer.tokenize(text)
word_pieces += text
len_w = len(word_pieces)
# Covert token series to id series
ids = self.tokenizer.convert_tokens_to_ids(word_pieces)
tokens_tensor = torch.tensor(ids)
# We have only a sentence so, segments_tensor contains only 1
segments_tensor = torch.tensor([1] * len_w, dtype=torch.long)
return (tokens_tensor, segments_tensor, label_tensor)
def __len__(self):
return self.len
| 27.673469 | 70 | 0.584808 |
be3257100615b308971a6423435a8cd272d3d5a3 | 6,214 | py | Python | docs/source/conf.py | aquatix/webhaak | c0636133256105e340df570a64c34170b2a94357 | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | aquatix/webhaak | c0636133256105e340df570a64c34170b2a94357 | [
"Apache-2.0"
] | 3 | 2017-12-06T13:44:23.000Z | 2021-03-25T09:19:48.000Z | docs/source/conf.py | aquatix/webhaak | c0636133256105e340df570a64c34170b2a94357 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
sys.path.insert(0, os.path.abspath('../../..'))
# -- Project information -----------------------------------------------------
project = 'webhaak'
copyright = '2019, Michiel Scholten'
author = 'Michiel Scholten'
# The short X.Y version
version = ''
# Make sure the working directory is our project
cwd = os.path.dirname(os.path.realpath(__file__))
try:
version = subprocess.check_output(["git", "describe", "--always", "--tags"], stderr=None, cwd=cwd).strip()
except subprocess.CalledProcessError:
version = ''
try:
# byte string needs to be converted to a string
version = version.decode("utf-8")
except AttributeError:
# version already was a str
pass
# The full version, including alpha/beta/rc tags
#release = ''
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
'sphinx.ext.todo',
'recommonmark',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'webhaakdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'webhaak.tex', 'webhaak Documentation',
'Michiel Scholten', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'webhaak', 'webhaak Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'webhaak', 'webhaak Documentation',
author, 'webhaak', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 29.875 | 110 | 0.648214 |
51d00f328f605b4d6b13f961d92723e7f951204a | 997 | py | Python | src/arrays/anagram.py | tmbothe/Data-Structures-and-algorithms | ba6bd0b302d16f84694acee1e1eb5d7f948ceeb2 | [
"MIT"
] | null | null | null | src/arrays/anagram.py | tmbothe/Data-Structures-and-algorithms | ba6bd0b302d16f84694acee1e1eb5d7f948ceeb2 | [
"MIT"
] | null | null | null | src/arrays/anagram.py | tmbothe/Data-Structures-and-algorithms | ba6bd0b302d16f84694acee1e1eb5d7f948ceeb2 | [
"MIT"
] | null | null | null | from nose.tools import assert_equal
def anagram(s1,s2):
s1= s1.lower().replace(" ","")
s2= s2.lower().replace(" ","")
if len(s1)==0 or len(s2) ==0:
return False
elif len(s1) != len(s2):
return False
result={}
for letter in s1:
if letter in result:
result[letter]+=1
else:
result[letter]=1
for letter in s2:
if letter in result:
result[letter]-=1
else:
result[letter]=1
for letter in result:
if result[letter] !=0:
return False
return True
class AnagramTest(object):
def test(self,sol):
assert_equal(sol('go go go','gggooo'),True)
assert_equal(sol('abc','cba'),True)
assert_equal(sol('hi man','hi man'),True)
assert_equal(sol('aabbcc','aabbc'),False)
assert_equal(sol('123','1 2'),False)
print("ALL TEST CASES PASSED")
# Run Tests
t = AnagramTest()
t.test(anagram) | 22.659091 | 53 | 0.539619 |
97d9422d75b045bc50a662149d3a3d8c2f67c364 | 2,008 | py | Python | pyrlang/match.py | wayfair-contribs/Pyrlang | 7599a9906840d6e8442b3382f7d3cdcb2208cd12 | [
"Apache-2.0"
] | 312 | 2018-09-25T08:14:04.000Z | 2022-03-30T09:01:52.000Z | pyrlang/match.py | wayfair-contribs/Pyrlang | 7599a9906840d6e8442b3382f7d3cdcb2208cd12 | [
"Apache-2.0"
] | 36 | 2018-09-24T11:04:33.000Z | 2021-09-20T14:37:12.000Z | pyrlang/match.py | wayfair-contribs/Pyrlang | 7599a9906840d6e8442b3382f7d3cdcb2208cd12 | [
"Apache-2.0"
] | 41 | 2018-11-06T20:29:32.000Z | 2021-11-29T15:09:53.000Z | # Copyright 2018, Erlang Solutions Ltd, and S2HC Sweden AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def _make_pattern(p):
if isinstance(p, Pattern):
return p
return Pattern(*p)
def _simple_match(data):
return True
def _simple_run(data):
return data
class Match(object):
"""
Match class to mimic an aid in the erlang pattern matching used in calls
and, in our case more importantly, in receive
"""
def __init__(self, patterns=None):
if not patterns:
# make catch all that returns raw data
patterns = [(None, None)]
self._patterns = [_make_pattern(p) for p in patterns]
def __call__(self, data):
for p in self._patterns:
if not p.match(data):
continue
return p
return False
def match(self, data):
return self(data)
class Pattern(object):
def __init__(self, match_fun=None, run_fun=None):
if not match_fun:
match_fun = _simple_match
if not run_fun:
run_fun = _simple_run
if not callable(match_fun):
raise AttributeError("match fun {} not callable".format(match_fun))
if not callable(run_fun):
raise AttributeError("run fun {} not callable".format(run_fun))
self.__match_fun = match_fun
self.__run_fun = run_fun
def match(self, data):
return self.__match_fun(data)
def run(self, data):
return self.__run_fun(data)
| 27.506849 | 79 | 0.654382 |
4bb13f0b6ace5908035db98ca9ef23ec148ab61c | 2,124 | py | Python | script/Inference/others/demo_KSD.py | lamfeeling/Stein-Density-Ratio-Estimation | f3b8a3975d99ace5875e603414e0e6d989ceb1d6 | [
"MIT"
] | 8 | 2019-08-28T13:24:32.000Z | 2019-11-01T04:04:45.000Z | script/Inference/others/demo_KSD.py | anewgithubname/Stein-Density-Ratio-Estimation | f3b8a3975d99ace5875e603414e0e6d989ceb1d6 | [
"MIT"
] | null | null | null | script/Inference/others/demo_KSD.py | anewgithubname/Stein-Density-Ratio-Estimation | f3b8a3975d99ace5875e603414e0e6d989ceb1d6 | [
"MIT"
] | null | null | null | from sdre.helper import *
from scipy.optimize import minimize, Bounds, NonlinearConstraint
from multiprocessing import Pool
from socket import gethostname
from time import time
from kgof.kernel import KGauss
import kgof.util as util
from scipy import io as sio
d = 2
n = 100
dimTheta = 2
logpBar = lambda x,theta:-sum(x**2 + theta*ones([d,1])*x, 0) / 2
gLogP = grad(logpBar)
def KSD(XData, k1, k2, k3, k4, theta):
g = gLogP(XData, theta)
t2 = zeros([n,n])
for i in range(d):
t2 = t2 + k2[i]*g[i,:]
t3 = zeros([n,n])
for i in range(d):
t3 = t3 + (k3[i].T*g[i,:]).T
return sum((dot(g.T,g)*k1 + t2 + t3 + k4).flatten())/n**2
def callbackF(Xi):
global Nfeval
print("iter {0:4d}, theta norm: {1:5.4f}".format(Nfeval, linalg.norm(Xi)))
Nfeval += 1
def infer(seed, XData):
random.seed(seed); print('seed:', seed)
n0 = XData.shape[1]
idx = random.permutation(n0)
XData = XData[:,idx[:n]]
sig2 = util.meddistance(XData.T, subsample=1000)**2
kG = KGauss(sig2)
k1 = kG.eval(XData.T,XData.T)
k4 = kG.gradXY_sum(XData.T,XData.T)
k2 = []
for i in range(d):
k2.append(kG.gradX_Y(XData.T, XData.T,i))
k3 = []
for i in range(d):
k3.append(kG.gradY_X(XData.T, XData.T,i))
obj = lambda theta:KSD(XData, k1, k2, k3, k4, array([theta]).T)
grad_obj = grad(obj)
hess_obj = jacobian(grad_obj)
x0 = random.randn(dimTheta)
t0 = time()
res = minimize(obj, x0, jac=grad_obj, method='BFGS',callback=callbackF,
options={'disp': True, 'maxiter': 10000})
print('elapsed:', time() - t0)
theta = res.x
print('estimate', theta)
print('\noptimizaiton result', res.status)
if res.status < 0:
return -1
sio.savemat('out/nn %s %d.mat' % (gethostname(), seed),
{'theta': theta, 'status': res.status})
return obj(res.x)
if __name__ == '__main__':
global Nfeval
Nfeval = 1
XData = random.standard_normal((d, n))+2
infer(1, XData)
| 26.55 | 79 | 0.572505 |
578d89434381644d3417fcef2b5fa450ddf89855 | 2,473 | py | Python | tests/local_server.py | EigenLab/WangDaChui | fc032949626c17f9050817c87b67ec933f6cee54 | [
"MIT"
] | 7 | 2018-08-13T15:10:20.000Z | 2021-04-12T12:54:36.000Z | tests/local_server.py | EigenLab/WangDaChui | fc032949626c17f9050817c87b67ec933f6cee54 | [
"MIT"
] | null | null | null | tests/local_server.py | EigenLab/WangDaChui | fc032949626c17f9050817c87b67ec933f6cee54 | [
"MIT"
] | 3 | 2018-08-13T13:48:35.000Z | 2021-05-19T06:38:55.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
启动一个本地服务,用于测试
@author:nikan(859905874@qq.com)
@file: local_server.py
@time: 2018/8/1 上午2:49
"""
import time
import random
from flask import Flask, request
from flask_httpauth import HTTPBasicAuth, HTTPDigestAuth, MultiAuth
from gevent.pywsgi import WSGIServer
from tests.config import TEST_PORT, USER
app = Flask(__name__)
basic_auth = HTTPBasicAuth()
digest_auth = HTTPDigestAuth()
auth = MultiAuth(digest_auth, basic_auth)
app.config['SECRET_KEY'] = 'secret'
@basic_auth.get_password
def get_pw(username):
if username in USER['user']:
return USER['password']
else:
from tests import test_benchs
test_benchs.local_server_exception.append('No auth')
return None
@digest_auth.get_password
def get_pw(username):
if username in USER['user']:
return USER['password']
else:
from tests import test_benchs
test_benchs.local_server_exception.append('No auth')
return None
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/test_auth')
@auth.login_required
def auth_test():
print('hahahashabi')
return 'Auth OK!'
@app.route('/random_sleep')
def hello_sleep():
sleep_second = random.randint(2, 3)
time.sleep(sleep_second)
return 'Hello, Sleep for {}s'.format(sleep_second)
@app.route('/test_get')
def get():
return 'i love h'
@app.route('/test_get2')
def get2():
return 'i love h'
@app.route('/test_post_data', methods=['POST'])
def post_data():
data = request.get_data()
if not data:
from tests import test_benchs
test_benchs.local_server_exception.append('No data')
return 'test_post_data {}'.format(data)
@app.route('/test_post_json', methods=['POST'])
def post_json():
json = request.get_json()
if not json:
from tests import test_benchs
test_benchs.local_server_exception.append('No json')
return 'test_post_json {}'.format(json)
# @app.route('/test_post_file', methods=['POST'])
# def test_post_file():
# file = request.files
# return 'test_post_file'
@app.route('/test_put_data', methods=['PUT'])
def put_data():
return 'test_put_data'
@app.route('/test_delete', methods=['DELETE'])
def delete():
return 'test_delete'
def run():
print('Testing server is serving on {}...'.format(TEST_PORT))
WSGIServer(('localhost', TEST_PORT), app).serve_forever()
if __name__ == '__main__':
run()
| 20.957627 | 67 | 0.683785 |
e5562f35f0e512bf15f45029b7e10beabeb96338 | 1,096 | py | Python | plugins/beebeeto/utils/payload/webshell/webshell.py | aliluyala/PocHunter | ff2d7e745eabd81ffb77920fe00813b17fc432cf | [
"MIT"
] | 95 | 2016-07-05T12:44:25.000Z | 2022-01-24T09:16:44.000Z | plugins/beebeeto/utils/payload/webshell/webshell.py | sigma-random/PocHunter | ff2d7e745eabd81ffb77920fe00813b17fc432cf | [
"MIT"
] | 2 | 2016-10-24T09:35:24.000Z | 2017-07-28T08:50:31.000Z | plugins/beebeeto/utils/payload/webshell/webshell.py | sigma-random/PocHunter | ff2d7e745eabd81ffb77920fe00813b17fc432cf | [
"MIT"
] | 39 | 2016-06-13T07:47:39.000Z | 2020-11-26T00:53:48.000Z | #author: fyth
import requests
class Webshell:
_password = ''
_content = ''
_check_statement = ''
_keyword = ''
_check_data = {}
def __init__(self, pwd='', content='', check='', keyword=''):
if pwd:
self._password = pwd
if content:
self._content = content
if check:
self._check_statement = check
if keyword:
self._keyword = keyword
self._check_data[self._password] = self._check_statement
def set_pwd(self, pwd):
self._password = pwd
def get_pwd(self):
return self._password
def get_content(self):
return self._content.format(self._password)
def check(self, url):
try:
content = requests.post(url, data=self._check_data, timeout=10).content
return self._keyword in content
except requests.Timeout:
return False
class VerifyShell(Webshell):
def __init__(self, content='', keyword=''):
Webshell.__init__(self, content=content, keyword=keyword)
self._check_data = {}
| 24.909091 | 83 | 0.596715 |
032329fcbba3051654fc6bd9822c540a3e9f8c44 | 13,691 | py | Python | baselines/a2c/a2c.py | llach/baselines | 22e72ad9d4c9bc61e98186d6dcb09bf53892a8c2 | [
"MIT"
] | null | null | null | baselines/a2c/a2c.py | llach/baselines | 22e72ad9d4c9bc61e98186d6dcb09bf53892a8c2 | [
"MIT"
] | null | null | null | baselines/a2c/a2c.py | llach/baselines | 22e72ad9d4c9bc61e98186d6dcb09bf53892a8c2 | [
"MIT"
] | 1 | 2018-08-15T14:32:26.000Z | 2018-08-15T14:32:26.000Z | import time
import json
import functools
import datetime
import numpy as np
import tensorflow as tf
from baselines import logger
from baselines.common import set_global_seeds, explained_variance
from baselines.common import tf_util, colorize
from baselines.common.policies import build_policy
from forkan.common.utils import log_alg
from forkan.common.csv_logger import CSVLogger
from forkan.common.tf_utils import vector_summary, scalar_summary
from baselines.a2c.utils import Scheduler, find_trainable_variables
from baselines.a2c.runner import Runner
from baselines.common.tf_util import get_session
import datetime
from tensorflow import losses
from tqdm import tqdm
import matplotlib.pyplot as plt
class Model(object):
"""
We use this class to :
__init__:
- Creates the step_model
- Creates the train_model
train():
- Make the training part (feedforward and retropropagation of gradients)
save/load():
- Save load the model
"""
def __init__(self, policy, env, nsteps,
ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4,
alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear'):
sess = tf_util.get_session()
nenvs = env.num_envs
nbatch = nenvs*nsteps
with tf.variable_scope('a2c_model', reuse=tf.AUTO_REUSE):
# step_model is used for sampling
step_model = policy(nenvs, 1, sess)
# train_model is used to train our network
train_model = policy(nbatch, nsteps, sess)
A = tf.placeholder(train_model.action.dtype, train_model.action.shape)
ADV = tf.placeholder(tf.float32, [nbatch])
R = tf.placeholder(tf.float32, [nbatch])
LR = tf.placeholder(tf.float32, [])
# Calculate the loss
# Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss
# Policy loss
neglogpac = train_model.pd.neglogp(A)
# L = A(s,a) * -logpi(a|s)
pg_loss = tf.reduce_mean(ADV * neglogpac)
# Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.
entropy = tf.reduce_mean(train_model.pd.entropy())
# Value loss
vf_loss = losses.mean_squared_error(tf.squeeze(train_model.vf), R)
loss = pg_loss - entropy*ent_coef + vf_loss * vf_coef
# Update parameters using loss
# 1. Get the model parameters
params = find_trainable_variables("a2c_model")
# 2. Calculate the gradients
grads = tf.gradients(loss, params)
if max_grad_norm is not None:
# Clip the gradients (normalize)
grads, grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
# zip aggregate each gradient with parameters associated
# For instance zip(ABCD, xyza) => Ax, By, Cz, Da
# 3. Make op for one policy and value update step of A2C
trainer = tf.train.RMSPropOptimizer(learning_rate=LR, decay=alpha, epsilon=epsilon)
_train = trainer.apply_gradients(grads)
lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
def train(obs, states, rewards, masks, actions, values):
# Here we calculate advantage A(s,a) = R + yV(s') - V(s)
# rewards = R + yV(s')
advs = rewards - values
for step in range(len(obs)):
cur_lr = lr.value()
td_map = {train_model.X:obs, A:actions, ADV:advs, R:rewards, LR:cur_lr}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
policy_loss, value_loss, policy_entropy, _ = sess.run(
[pg_loss, vf_loss, entropy, _train],
td_map
)
return policy_loss, value_loss, policy_entropy
self.train = train
self.train_model = train_model
self.step_model = step_model
self.step = self.step_model.step
self.value = step_model.value
self.initial_state = step_model.initial_state
self.save = functools.partial(tf_util.save_variables, sess=sess)
self.load = functools.partial(tf_util.load_variables, sess=sess)
tf.global_variables_initializer().run(session=sess)
def learn(
network,
env,
seed=None,
nsteps=5,
total_timesteps=int(80e7),
vf_coef=0.5,
ent_coef=0.01,
max_grad_norm=0.5,
lr=7e-4,
lrschedule='linear',
epsilon=1e-5,
alpha=0.99,
gamma=0.99,
reward_average=20,
log_interval=100,
load_path=None,
env_id=None,
play=False,
save=True,
tensorboard=False,
**network_kwargs):
'''
Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm.
Parameters:
-----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See baselines.common/policies.py/lstm for more details on using recurrent nets in policies
env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py)
seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible)
nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int, total number of timesteps to train on (default: 80M)
vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5)
ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01)
max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5)
lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)
lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and
returns fraction of the learning rate (specified as lr) as output
epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)
alpha: float, RMSProp decay parameter (default: 0.99)
gamma: float, reward discounting parameter (default: 0.99)
log_interval: int, specifies how frequently the logs are printed out (default: 100)
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
if hasattr(env, 'vae_name'):
vae = env.vae_name.split('lat')[0][:-1]
else:
vae = None
savepath, env_id_lower = log_alg('a2c-debug', env_id, locals(), vae, num_envs=env.num_envs, save=save, lr=lr)
csv_header = ['timestamp', "nupdates", "total_timesteps", "fps", "policy_entropy", "value_loss",
"explained_variance", "mean_reward [{}]".format(reward_average), "nepisodes"]
csv = CSVLogger('{}progress.csv'.format(savepath), *csv_header)
# Get the nb of env
nenvs = env.num_envs
policy = build_policy(env, network, **network_kwargs)
# Calculate the batch_size
nbatch = nenvs * nsteps
# Instantiate the model object (that creates step_model and train_model)
model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon,
total_timesteps=total_timesteps, lrschedule=lrschedule)
if load_path is not None:
print('loading model ... ')
model.load(load_path)
if play:
return model
runner = Runner(env, model, nsteps=nsteps, gamma=gamma)
# Start total timer
tstart = time.time()
episode_rewards = []
current_rewards = [0.0] * nenvs
nepisodes = 0
best_rew = -np.infty
if tensorboard:
print('logging to tensorboard')
s = get_session()
import os
fw = tf.summary.FileWriter('{}/a2c/{}/'.format(os.environ['HOME'], savepath.split('/')[-2]), s.graph)
ft = None
vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
for v in vars:
if v.name == 'enc-conv-3/kernel:0': ft = v
def ftv_out():
if ft is not None:
ftv = np.mean(s.run([ft]), axis=-1)
ftv = np.mean(ftv)
print(ftv)
return ftv
else:
0
pl_ph = tf.placeholder(tf.float32, (), name='policy-loss')
pe_ph = tf.placeholder(tf.float32, (), name='policy-entropy')
vl_ph = tf.placeholder(tf.float32, (), name='value-loss')
rew_ph = tf.placeholder(tf.float32, (), name='reward')
ac_ph = tf.placeholder(tf.float32, (nbatch, 1), name='actions')
ac_clip_ph = tf.placeholder(tf.float32, (nbatch, 1), name='actions')
weight_ph = tf.placeholder(tf.float32, (), name='encoder-kernel')
scalar_summary('encoder-conv-kernel', weight_ph)
tf.summary.histogram('actions-hist', ac_ph)
tf.summary.histogram('actions-hist-clipped', ac_clip_ph)
scalar_summary('reward', rew_ph)
scalar_summary('value-loss', vl_ph)
scalar_summary('policy-loss', pl_ph)
scalar_summary('policy-entropy', pe_ph)
vector_summary('actions', ac_ph)
merged_ = tf.summary.merge_all()
for update in range(1, total_timesteps//nbatch+1):
# Get mini batch of experiences
obs, states, rewards, masks, actions, values, dones, raw_rewards = runner.run()
policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)
for n, (rs, ds) in enumerate(zip(raw_rewards, dones)):
rs = rs.tolist()
ds = ds.tolist()
for r, d in zip(rs, ds):
if d:
episode_rewards.append(current_rewards[n])
current_rewards[n] = 0.0
else:
current_rewards[n] += r
if len(episode_rewards) > reward_average:
mrew = np.mean(episode_rewards[-reward_average:])
else:
mrew = -np.infty
if np.any(dones):
nepisodes += 1
if mrew > best_rew:
logger.log('model improved from {} to {}. saving ...'.format(best_rew, mrew))
model.save('{}weights_best'.format(savepath))
best_rew = mrew
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, rewards)
# Calculate the fps (frame per second)
nseconds = time.time() - tstart
fps = int((update * nbatch) / nseconds)
csv.writeline(datetime.datetime.now().isoformat(), update, update*nbatch, fps, float(policy_entropy), float(value_loss), float(ev),
float(mrew), nepisodes)
if tensorboard:
summary = s.run(merged_, feed_dict={
pl_ph: policy_loss,
pe_ph: policy_entropy,
vl_ph: value_loss,
rew_ph: mrew,
ac_ph: actions,
ac_clip_ph: np.clip(actions, -2, 2),
weight_ph: ftv_out(),
})
fw.add_summary(summary, update)
if update % log_interval == 0 or update == 1:
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", update*nbatch)
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("explained_variance", float(ev))
logger.record_tabular("mean_reward [{}]".format(reward_average), float(mrew))
logger.record_tabular("nepisodes", nepisodes)
logger.dump_tabular()
perc = ((update * nbatch) / total_timesteps) * 100
steps2go = total_timesteps - (update * nbatch)
secs2go = steps2go / fps
min2go = secs2go / 60
hrs = int(min2go // 60)
mins = int(min2go) % 60
print(colorize('ETA: {}h {}min | done {}% '.format(hrs, mins, int(perc)), color='cyan'))
model.save('{}weights_latest'.format(savepath))
return model
| 37.612637 | 186 | 0.627785 |
e75894db5fe16380c2247f7684b6f509d986422f | 13,438 | py | Python | out/python/openapi_client/model/forecast_item.py | energychain/corrently-api | 757b31214b1a82fed3bac4fcd1f51b54a58e7f8f | [
"MIT"
] | null | null | null | out/python/openapi_client/model/forecast_item.py | energychain/corrently-api | 757b31214b1a82fed3bac4fcd1f51b54a58e7f8f | [
"MIT"
] | null | null | null | out/python/openapi_client/model/forecast_item.py | energychain/corrently-api | 757b31214b1a82fed3bac4fcd1f51b54a58e7f8f | [
"MIT"
] | null | null | null | """
Corrently.io
*Corrently - from italian corrente, which is energy* # Introduction The Corrently ecosystem gets maintained by [STROMDAO GmbH](https://www.stromdao.de/) to support green energy services for prosumers, grid operators, regulators, integrators or any other party with an emerging need of consensus driven management. As the [energy product Corrently](https://www.corrently.de/) got first launched in Germany parts of this documentation provide simple translations for better understanding. [Released SKDs for Download](https://github.com/energychain/corrently-api/releases) # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: dev@stromdao.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from openapi_client.exceptions import ApiAttributeError
class ForecastItem(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'epochtime': (int,), # noqa: E501
'time_stamp': (int,), # noqa: E501
'gsi': (int,), # noqa: E501
'scaled': (bool,), # noqa: E501
'sci': (int,), # noqa: E501
'energyprice': (float,), # noqa: E501
'co2_g_oekostrom': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'epochtime': 'epochtime', # noqa: E501
'time_stamp': 'timeStamp', # noqa: E501
'gsi': 'gsi', # noqa: E501
'scaled': 'scaled', # noqa: E501
'sci': 'sci', # noqa: E501
'energyprice': 'energyprice', # noqa: E501
'co2_g_oekostrom': 'co2_g_oekostrom', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""ForecastItem - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
epochtime (int): Timestamps in Seconds. [optional] # noqa: E501
time_stamp (int): Timestamp in Standard Milliseconds. [optional] # noqa: E501
gsi (int): Actual GreenPowerIndex for given Timestamp (between 0-100). [optional] # noqa: E501
scaled (bool): Indicates if scaling is in operation to predict values. [optional] # noqa: E501
sci (int): Subindex just for Solar Energy. [optional] # noqa: E501
energyprice (float): Local/regional energyprice modification (cent per kWh or euro per MWh).. [optional] # noqa: E501
co2_g_oekostrom (int): CO2 footprint in Gramm per kwh (only Green Power). [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ForecastItem - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
epochtime (int): Timestamps in Seconds. [optional] # noqa: E501
time_stamp (int): Timestamp in Standard Milliseconds. [optional] # noqa: E501
gsi (int): Actual GreenPowerIndex for given Timestamp (between 0-100). [optional] # noqa: E501
scaled (bool): Indicates if scaling is in operation to predict values. [optional] # noqa: E501
sci (int): Subindex just for Solar Energy. [optional] # noqa: E501
energyprice (float): Local/regional energyprice modification (cent per kWh or euro per MWh).. [optional] # noqa: E501
co2_g_oekostrom (int): CO2 footprint in Gramm per kwh (only Green Power). [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 47.822064 | 589 | 0.584164 |
66209ebd81ea57fad38eb2ff05808474acf92f63 | 1,405 | py | Python | built.py | harmy/terraform-aws-lambda | d2a2f4bb223e086d4e0698a000aac62528b5104d | [
"MIT"
] | 2 | 2019-01-29T13:56:35.000Z | 2022-01-18T02:36:31.000Z | built.py | harmy/terraform-aws-lambda | d2a2f4bb223e086d4e0698a000aac62528b5104d | [
"MIT"
] | null | null | null | built.py | harmy/terraform-aws-lambda | d2a2f4bb223e086d4e0698a000aac62528b5104d | [
"MIT"
] | null | null | null | #!/usr/bin/env python2.7
from __future__ import print_function
import json
import os
import subprocess
import sys
# Parse the query.
query = json.load(sys.stdin)
build_command = query['build_command']
filename_old = query['filename_old']
filename_new = query['filename_new']
# If the old filename (from the Terraform state) matches the new filename
# (from hash.py) then the source code has not changed and thus the zip file
# should not have changed.
if filename_old == filename_new:
if os.path.exists(filename_new):
# Update the file time so it doesn't get cleaned up,
# which would result in an unnecessary rebuild.
os.utime(filename_new, None)
else:
# If the file is missing, then it was probably generated on another
# machine, or it was created a long time ago and cleaned up. This is
# expected behaviour. However if Terraform needs to upload the file
# (e.g. someone manually deleted the Lambda function via the AWS
# console) then it is possible that Terraform will try to upload
# the missing file. I don't know how to tell if Terraform is going
# to try to upload the file or not, so always ensure the file exists.
subprocess.check_output(build_command, shell=True)
# Output the filename to Terraform.
json.dump({
'filename': filename_new,
}, sys.stdout, indent=2)
sys.stdout.write('\n')
| 35.125 | 77 | 0.712456 |
1e7b3f1d1a69ff8e30aecb2145273b643ce84bbd | 918 | py | Python | src/staffordFile.py | JacobMannix/docker_development | c3f340ed62552078e7a83832aba4ec1da217dce0 | [
"MIT"
] | null | null | null | src/staffordFile.py | JacobMannix/docker_development | c3f340ed62552078e7a83832aba4ec1da217dce0 | [
"MIT"
] | null | null | null | src/staffordFile.py | JacobMannix/docker_development | c3f340ed62552078e7a83832aba4ec1da217dce0 | [
"MIT"
] | null | null | null | # Jacob Mannix [09-09-2020]
# stafford_file function to initially find the postTitle value and it it doesnt exist it will create one with a default value.
# Import Dependencies
import os
# Function
def staffordFile(file_path):
postTitle = []
# Check if file exists
if os.path.isfile(file_path):
# if file exists open it
with open(file_path, 'r') as file:
for line in file:
postTitle.append(str(line))
postTitle = postTitle[0]
return postTitle
else:
# if file doesn't exist create it with a default value
# yesterday_date = date.today() - timedelta(days = 1)
# end_title = ' SK Modified® Feature Results'
# default_title = yesterday_date + end_title
post_title = 'no previous race title'
with open(file_path, "w") as output:
output.write(str(post_title))
return post_title | 34 | 126 | 0.639434 |
733bfa46fb9b9e7ef619ec4281b9b626ca55b5d3 | 2,592 | py | Python | conftest.py | BrotherGelo/Software-Testing_first_task | c46d5c000fdad2030c119e4cad3a8ba0a8d8aa3f | [
"Apache-2.0"
] | null | null | null | conftest.py | BrotherGelo/Software-Testing_first_task | c46d5c000fdad2030c119e4cad3a8ba0a8d8aa3f | [
"Apache-2.0"
] | null | null | null | conftest.py | BrotherGelo/Software-Testing_first_task | c46d5c000fdad2030c119e4cad3a8ba0a8d8aa3f | [
"Apache-2.0"
] | null | null | null | from fixture.application import Application
import pytest
import json
import os.path
import importlib
import jsonpickle
from fixture.db import DbFixture
from fixture.orm import ORMFixture
fixture = None
target = None
def load_config(file):
global target
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file)
with open(config_file) as conf:
target = json.load(conf)
return target
@pytest.fixture(scope="session")
def orm():
return ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="")
@pytest.fixture
def app(request):
global fixture
browser = request.config.getoption("--browser")
web_config = load_config(request.config.getoption("--target"))['web']
if fixture is None or not fixture.is_valid():
fixture = Application(browser=browser, base_url=web_config['baseURL'])
fixture.session.ensure_login(username=web_config['username'], password=web_config['password'])
return fixture
@pytest.fixture(scope="session", autouse=True)
def stop(request):
def fin():
fixture.session.ensure_logout()
fixture.destroy()
request.addfinalizer(fin)
return fixture
@pytest.fixture
def check_ui(request):
return request.config.getoption("--check_ui")
@pytest.fixture(scope="session")
def db(request):
db_config = load_config(request.config.getoption("--target"))['db']
dbfixture = DbFixture(host=db_config['host'], name=db_config['name'], user=db_config['user'], password=db_config['password'])
def fin():
dbfixture.destroy()
request.addfinalizer(fin)
return dbfixture
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="firefox")
parser.addoption("--target", action="store", default="target.json")
parser.addoption("--check_ui", action="store_true")
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("data_"):
testdata = load_from_module(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
elif fixture.startswith("json_"):
testdata = load_from_json(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
def load_from_module(module):
return importlib.import_module("data.%s" % module).testdata
def load_from_json(file):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/%s.json" % file)) as f:
return jsonpickle.decode(f.read()) | 30.494118 | 129 | 0.699074 |
f1df767f88132177919617cd5d2d5987554c3909 | 490 | py | Python | stackdio/api/volumes/migrations/0005_0_8_0_migrations.py | hdmillerdr/stackdio | 84be621705031d147e104369399b872d5093ef64 | [
"Apache-2.0"
] | 9 | 2015-12-18T22:44:55.000Z | 2022-02-07T19:34:44.000Z | stackdio/api/volumes/migrations/0005_0_8_0_migrations.py | hdmillerdr/stackdio | 84be621705031d147e104369399b872d5093ef64 | [
"Apache-2.0"
] | 77 | 2015-01-12T17:49:38.000Z | 2017-02-24T17:57:46.000Z | stackdio/api/volumes/migrations/0005_0_8_0_migrations.py | hdmillerdr/stackdio | 84be621705031d147e104369399b872d5093ef64 | [
"Apache-2.0"
] | 11 | 2015-01-23T15:50:19.000Z | 2022-02-07T19:34:45.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-03-14 16:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('volumes', '0004_0_8_0_migrations'),
]
operations = [
migrations.AlterField(
model_name='volume',
name='volume_id',
field=models.CharField(blank=True, max_length=64, verbose_name='Volume ID'),
),
]
| 23.333333 | 88 | 0.628571 |
fa030a3b22cdfc79cbffb1b67f515e1792960903 | 6,619 | py | Python | venv/Lib/site-packages/sqlalchemy/orm/scoping.py | svercillo/flaskwebapi | 48e3417c25fc25166203cb88f959345f548a38bc | [
"Apache-2.0"
] | 2 | 2020-05-27T19:53:05.000Z | 2020-05-27T19:53:07.000Z | venv/Lib/site-packages/sqlalchemy/orm/scoping.py | svercillo/flaskwebapi | 48e3417c25fc25166203cb88f959345f548a38bc | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/sqlalchemy/orm/scoping.py | svercillo/flaskwebapi | 48e3417c25fc25166203cb88f959345f548a38bc | [
"Apache-2.0"
] | null | null | null | # orm/scoping.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import class_mapper
from . import exc as orm_exc
from .session import Session
from .. import exc as sa_exc
from ..util import ScopedRegistry
from ..util import ThreadLocalRegistry
from ..util import warn
__all__ = ["scoped_session"]
class scoped_session(object):
"""Provides scoped management of :class:`.Session` objects.
See :ref:`unitofwork_contextual` for a tutorial.
"""
session_factory = None
"""The `session_factory` provided to `__init__` is stored in this
attribute and may be accessed at a later time. This can be useful when
a new non-scoped :class:`.Session` or :class:`_engine.Connection` to the
database is needed."""
def __init__(self, session_factory, scopefunc=None):
"""Construct a new :class:`.scoped_session`.
:param session_factory: a factory to create new :class:`.Session`
instances. This is usually, but not necessarily, an instance
of :class:`.sessionmaker`.
:param scopefunc: optional function which defines
the current scope. If not passed, the :class:`.scoped_session`
object assumes "thread-local" scope, and will use
a Python ``threading.local()`` in order to maintain the current
:class:`.Session`. If passed, the function should return
a hashable token; this token will be used as the key in a
dictionary in order to store and retrieve the current
:class:`.Session`.
"""
self.session_factory = session_factory
if scopefunc:
self.registry = ScopedRegistry(session_factory, scopefunc)
else:
self.registry = ThreadLocalRegistry(session_factory)
def __call__(self, **kw):
r"""Return the current :class:`.Session`, creating it
using the :attr:`.scoped_session.session_factory` if not present.
:param \**kw: Keyword arguments will be passed to the
:attr:`.scoped_session.session_factory` callable, if an existing
:class:`.Session` is not present. If the :class:`.Session` is present
and keyword arguments have been passed,
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if kw:
if self.registry.has():
raise sa_exc.InvalidRequestError(
"Scoped session is already present; "
"no new arguments may be specified."
)
else:
sess = self.session_factory(**kw)
self.registry.set(sess)
return sess
else:
return self.registry()
def remove(self):
"""Dispose of the current :class:`.Session`, if present.
This will first call :meth:`.Session.close` method
on the current :class:`.Session`, which releases any existing
transactional/connection resources still being held; transactions
specifically are rolled back. The :class:`.Session` is then
discarded. Upon next usage within the same scope,
the :class:`.scoped_session` will produce a new
:class:`.Session` object.
"""
if self.registry.has():
self.registry().close()
self.registry.clear()
def configure(self, **kwargs):
"""reconfigure the :class:`.sessionmaker` used by this
:class:`.scoped_session`.
See :meth:`.sessionmaker.configure`.
"""
if self.registry.has():
warn(
"At least one scoped session is already present. "
" configure() can not affect sessions that have "
"already been created."
)
self.session_factory.configure(**kwargs)
def query_property(self, query_cls=None):
"""return a class property which produces a :class:`_query.Query`
object
against the class and the current :class:`.Session` when called.
e.g.::
Session = scoped_session(sessionmaker())
class MyClass(object):
query = Session.query_property()
# after mappers are defined
result = MyClass.query.filter(MyClass.name=='foo').all()
Produces instances of the session's configured query class by
default. To override and use a custom implementation, provide
a ``query_cls`` callable. The callable will be invoked with
the class's mapper as a positional argument and a session
keyword argument.
There is no limit to the number of query properties placed on
a class.
"""
class query(object):
def __get__(s, instance, owner):
try:
mapper = class_mapper(owner)
if mapper:
if query_cls:
# custom query class
return query_cls(mapper, session=self.registry())
else:
# session's configured query class
return self.registry().query(mapper)
except orm_exc.UnmappedClassError:
return None
return query()
ScopedSession = scoped_session
"""Old name for backwards compatibility."""
def instrument(name):
def do(self, *args, **kwargs):
return getattr(self.registry(), name)(*args, **kwargs)
return do
for meth in Session.public_methods:
setattr(scoped_session, meth, instrument(meth))
def makeprop(name):
def set_(self, attr):
setattr(self.registry(), name, attr)
def get(self):
return getattr(self.registry(), name)
return property(get, set_)
for prop in (
"bind",
"dirty",
"deleted",
"new",
"identity_map",
"is_active",
"autoflush",
"no_autoflush",
"info",
"autocommit",
):
setattr(scoped_session, prop, makeprop(prop))
def clslevel(name):
def do(cls, *args, **kwargs):
return getattr(Session, name)(*args, **kwargs)
return classmethod(do)
for prop in ("close_all", "object_session", "identity_key"):
setattr(scoped_session, prop, clslevel(prop))
| 32.131068 | 80 | 0.59163 |
31be9724809cc6eb8f12c3ce4a0ac2dc8e080fed | 5,346 | py | Python | bak_spectrgram_analysis.py | Accioy/voice-singal-classification | b6744af9732fd38c41cf3cbf11c170a962cee6c7 | [
"MIT"
] | null | null | null | bak_spectrgram_analysis.py | Accioy/voice-singal-classification | b6744af9732fd38c41cf3cbf11c170a962cee6c7 | [
"MIT"
] | null | null | null | bak_spectrgram_analysis.py | Accioy/voice-singal-classification | b6744af9732fd38c41cf3cbf11c170a962cee6c7 | [
"MIT"
] | null | null | null | import numpy as np
from scipy import signal
import scipy.io.wavfile as wav
import os
import time
import sys
#import matplotlib.pyplot as plt
#import skimage.io
ROOT_DIR=os.path.abspath('.')
wav_path = os.path.join(ROOT_DIR,"hd_signal_keras_train")
def get_wav_files(wav_path):
wav_files = []
for (dirpath, dirnames, filenames) in os.walk(wav_path):
for filename in filenames:
if filename.endswith('.wav') or filename.endswith('.WAV'):
filename_path = os.sep.join([dirpath, filename])
if os.stat(filename_path).st_size < 240000: # 剔除掉一些小文件
continue
wav_files.append(filename_path)
return wav_files
wav_files = get_wav_files(wav_path)
train_x = []
train_y = []
sample_frequencies=[]
segment_times=[]
begin_time = time.time()
for i, onewav in enumerate(wav_files):
if i % 5 == 4: # 运行5个路径名后。
gaptime = time.time() - begin_time
percent = float(i) * 100 / len(wav_files)
eta_time = gaptime * 100 / (percent + 0.01) - gaptime
strprogress = "[" + "=" * int(percent // 2) + ">" + "-" * int(50 - percent // 2) + "]"
str_log = ("%.2f %% %s %s/%s \t used:%d s eta:%d s" % (percent, strprogress, i, len(wav_files), gaptime, eta_time))
sys.stdout.write('\r' + str_log)
elements = onewav.split("\\")
for x in elements:
if x == 'diode':
label = 0
elif x == 'metalnode':
label = 1
(rate, data) = wav.read(onewav)
#注意!考虑到所有音频数据左声道信号非常清晰,而右声道信号很弱很难分辨,因此此处仅采用左声道的数据
data=np.transpose(data)[0]
sample_frequency,segment_time,spectrogram=signal.spectrogram(data)
sample_frequencies.append(sample_frequency)
segment_times.append(segment_time)
train_x.append(spectrogram)
train_y.append(label)
len_freq=[]
len_time=[]
for i in sample_frequencies:
len_freq.append(len(i))
for i in segment_times:
len_time.append(len(i))
# print("\n")
# print(max(len_freq),min(len_freq),max(len_time),min(len_time))
# #结果:129 129 1429 833
from keras.utils.np_utils import to_categorical
train_y=to_categorical(train_y, num_classes=2) # (201,2)
train_y = np.asarray(train_y)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(train_x, train_y, test_size=0.3, random_state=0,shuffle=True,stratify=train_y)
#print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
max_time=max(len_time)
max_freq=max(len_freq) #其实频谱图纵坐标长度(即len_freq)都是一样的
#补零使得所有样本shape相同
#train_x = [np.concatenate([i, np.zeros(( max_freq, max_time - i.shape[1]))],axis=1) for i in train_x]
#为兼容其他数据,设定max_time=2000
max_time=2000
train_x = [np.concatenate([i, np.zeros(( max_freq, max_time - i.shape[1]))],axis=1) for i in train_x]
train_x = np.asarray(train_x)
# train_x = train_x[:,:,:,np.newaxis]
train_x = np.transpose(train_x,axes=(0,2,1))
from keras.models import Sequential,load_model
from keras.layers import Conv1D,MaxPool1D,Conv2D,MaxPool2D,Flatten,Dense,BatchNormalization
from keras.callbacks import EarlyStopping
from keras.optimizers import RMSprop
from keras.metrics import categorical_accuracy
task='evaluate' #train or evaluate or predict
if task=='train':
#2d卷积模型
# model = Sequential()
# model.add(Conv2D(filters=64, kernel_size=[5,5], input_shape=(max_freq, max_time,1)))
# model.add(MaxPool2D([3,3]))
# model.add(Conv2D(32, [3,3]))
# model.add(MaxPool2D([3,3]))
# model.add(Conv2D(16, [3,3]))
# model.add(Flatten())
# model.add(Dense(32, activation='relu'))
# model.add(Dense(2, activation='softmax'))
# model.compile(loss="categorical_crossentropy", optimizer='adam', metrics=[categorical_accuracy])
#沿时间卷积模型
model = Sequential()
model.add(Conv1D(max_freq, 10, input_shape=(max_time,max_freq),activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool1D(4))
model.add(Conv1D(max_freq, 4,activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool1D(4))
model.add(Flatten())
model.add(Dense(max_freq, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(loss="categorical_crossentropy", optimizer='adam', metrics=[categorical_accuracy])
#沿频率卷积模型
# model = Sequential()
# model.add(Conv1D(700, 3, input_shape=(max_freq,max_time),activation='relu'))
# model.add(BatchNormalization())
# model.add(MaxPool1D(3))
# model.add(Conv1D(350, 3,activation='relu'))
# model.add(BatchNormalization())
# model.add(MaxPool1D(3))
# model.add(Flatten())
# model.add(Dense(128, activation='relu'))
# model.add(Dense(2, activation='softmax'))
# model.compile(loss="categorical_crossentropy", optimizer='adam', metrics=[categorical_accuracy])
model.fit(x_train, y_train,batch_size=10, epochs=20)
# 保存模型。
model.save('voice_recog_spectrogram.h5')
print(model.summary())
elif task=='evaluate':
model=load_model('voice_recog_spectrogram.h5')
accuracy = model.evaluate(x_test, y_test, batch_size=1)
print('test loss and accuracy:',accuracy)
elif task=='predict':
model=load_model('voice_recog_spectrogram.h5')
result=model.predict_on_batch(x_test)
print(result)
# from keras.utils.vis_utils import plot_model
# plot_model(model,to_file="model_1.png",show_shapes=True) | 34.490323 | 130 | 0.689862 |
f0f1f403d3276b25e851ffe401070c7307b237d1 | 10,583 | py | Python | sdk/python/pulumi_azure_nextgen/media/v20180701/live_event.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/media/v20180701/live_event.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/media/v20180701/live_event.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['LiveEvent']
class LiveEvent(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
auto_start: Optional[pulumi.Input[bool]] = None,
cross_site_access_policies: Optional[pulumi.Input[pulumi.InputType['CrossSiteAccessPoliciesArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
encoding: Optional[pulumi.Input[pulumi.InputType['LiveEventEncodingArgs']]] = None,
input: Optional[pulumi.Input[pulumi.InputType['LiveEventInputArgs']]] = None,
live_event_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
preview: Optional[pulumi.Input[pulumi.InputType['LiveEventPreviewArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
stream_options: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vanity_url: Optional[pulumi.Input[bool]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The Live Event.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The Media Services account name.
:param pulumi.Input[bool] auto_start: The flag indicates if the resource should be automatically started on creation.
:param pulumi.Input[pulumi.InputType['CrossSiteAccessPoliciesArgs']] cross_site_access_policies: The Live Event access policies.
:param pulumi.Input[str] description: The Live Event description.
:param pulumi.Input[pulumi.InputType['LiveEventEncodingArgs']] encoding: The Live Event encoding.
:param pulumi.Input[pulumi.InputType['LiveEventInputArgs']] input: The Live Event input.
:param pulumi.Input[str] live_event_name: The name of the Live Event.
:param pulumi.Input[str] location: The Azure Region of the resource.
:param pulumi.Input[pulumi.InputType['LiveEventPreviewArgs']] preview: The Live Event preview.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the Azure subscription.
:param pulumi.Input[Sequence[pulumi.Input[str]]] stream_options: The options to use for the LiveEvent. This value is specified at creation time and cannot be updated.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[bool] vanity_url: Specifies whether to use a vanity url with the Live Event. This value is specified at creation time and cannot be updated.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_name is None:
raise TypeError("Missing required property 'account_name'")
__props__['account_name'] = account_name
__props__['auto_start'] = auto_start
__props__['cross_site_access_policies'] = cross_site_access_policies
__props__['description'] = description
__props__['encoding'] = encoding
if input is None:
raise TypeError("Missing required property 'input'")
__props__['input'] = input
if live_event_name is None:
raise TypeError("Missing required property 'live_event_name'")
__props__['live_event_name'] = live_event_name
__props__['location'] = location
__props__['preview'] = preview
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['stream_options'] = stream_options
__props__['tags'] = tags
__props__['vanity_url'] = vanity_url
__props__['created'] = None
__props__['last_modified'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['resource_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:media/latest:LiveEvent"), pulumi.Alias(type_="azure-nextgen:media/v20180330preview:LiveEvent"), pulumi.Alias(type_="azure-nextgen:media/v20180601preview:LiveEvent"), pulumi.Alias(type_="azure-nextgen:media/v20190501preview:LiveEvent"), pulumi.Alias(type_="azure-nextgen:media/v20200501:LiveEvent")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(LiveEvent, __self__).__init__(
'azure-nextgen:media/v20180701:LiveEvent',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'LiveEvent':
"""
Get an existing LiveEvent resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return LiveEvent(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def created(self) -> pulumi.Output[str]:
"""
The exact time the Live Event was created.
"""
return pulumi.get(self, "created")
@property
@pulumi.getter(name="crossSiteAccessPolicies")
def cross_site_access_policies(self) -> pulumi.Output[Optional['outputs.CrossSiteAccessPoliciesResponse']]:
"""
The Live Event access policies.
"""
return pulumi.get(self, "cross_site_access_policies")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The Live Event description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def encoding(self) -> pulumi.Output[Optional['outputs.LiveEventEncodingResponse']]:
"""
The Live Event encoding.
"""
return pulumi.get(self, "encoding")
@property
@pulumi.getter
def input(self) -> pulumi.Output['outputs.LiveEventInputResponse']:
"""
The Live Event input.
"""
return pulumi.get(self, "input")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> pulumi.Output[str]:
"""
The exact time the Live Event was last modified.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The Azure Region of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def preview(self) -> pulumi.Output[Optional['outputs.LiveEventPreviewResponse']]:
"""
The Live Event preview.
"""
return pulumi.get(self, "preview")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the Live Event.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> pulumi.Output[str]:
"""
The resource state of the Live Event.
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter(name="streamOptions")
def stream_options(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The options to use for the LiveEvent. This value is specified at creation time and cannot be updated.
"""
return pulumi.get(self, "stream_options")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="vanityUrl")
def vanity_url(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether to use a vanity url with the Live Event. This value is specified at creation time and cannot be updated.
"""
return pulumi.get(self, "vanity_url")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 42.163347 | 386 | 0.641123 |
6bfa93b6ea96b192fe3f31cf455e8a239c249193 | 10,193 | py | Python | resources/usr/local/lib/python2.7/dist-packages/sklearn/utils/tests/test_extmath.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | null | null | null | resources/usr/local/lib/python2.7/dist-packages/sklearn/utils/tests/test_extmath.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | null | null | null | resources/usr/local/lib/python2.7/dist-packages/sklearn/utils/tests/test_extmath.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | 1 | 2020-05-28T23:01:44.000Z | 2020-05-28T23:01:44.000Z | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import logistic_sigmoid
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_true(np.all(mode == mode_result))
assert_true(np.all(score.ravel() == w[:, :5].sum(1)))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
"""Check that extmath.randomized_svd is consistent with linalg.svd"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_randomized_svd_low_rank_with_noise():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
"""Check that transposing the design matrix has limit impact"""
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
"""Check if cartesian product delivers the right results"""
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
"""Check correctness and robustness of logistic sigmoid implementation"""
naive_logsig = lambda x: 1 / (1 + np.exp(-x))
naive_log_logsig = lambda x: np.log(naive_logsig(x))
# Simulate the previous Cython implementations of logistic_sigmoid based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
def stable_logsig(x):
out = np.zeros_like(x)
positive = x > 0
negative = x <= 0
out[positive] = 1. / (1 + np.exp(-x[positive]))
out[negative] = np.exp(x[negative]) / (1. + np.exp(x[negative]))
return out
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(logistic_sigmoid(x), naive_logsig(x))
assert_array_almost_equal(logistic_sigmoid(x, log=True),
naive_log_logsig(x))
assert_array_almost_equal(logistic_sigmoid(x), stable_logsig(x),
decimal=16)
extreme_x = np.array([-100, 100], dtype=np.float)
assert_array_almost_equal(logistic_sigmoid(extreme_x), [0, 1])
assert_array_almost_equal(logistic_sigmoid(extreme_x, log=True), [-100, 0])
assert_array_almost_equal(logistic_sigmoid(extreme_x),
stable_logsig(extreme_x),
decimal=16)
| 35.764912 | 79 | 0.636123 |
1cac68cf5116dee21b9c3caf8776911821ccdc2b | 688 | py | Python | tests/test_smoke/test_english.py | imanearaf/rasa-nlu-examples | 3e3cfc4e743715c9104d5771b14ecfab2a02da11 | [
"Apache-2.0"
] | null | null | null | tests/test_smoke/test_english.py | imanearaf/rasa-nlu-examples | 3e3cfc4e743715c9104d5771b14ecfab2a02da11 | [
"Apache-2.0"
] | null | null | null | tests/test_smoke/test_english.py | imanearaf/rasa-nlu-examples | 3e3cfc4e743715c9104d5771b14ecfab2a02da11 | [
"Apache-2.0"
] | null | null | null | import pytest
from rasa.train import train_nlu
# Take heed! Pytest fails if you use a function that starts with "test"
from rasa.test import test_nlu as run_nlu
english_yml_files = [
"stanza-tokenizer-config.yml",
"fasttext-config.yml",
"printer-config.yml",
"bytepair-config.yml",
"gensim-config.yml",
"lang-detect-ft-config.yml",
]
@pytest.mark.fasttext
@pytest.mark.parametrize("fp", english_yml_files)
def test_run_train_test_command_english(fp):
mod = train_nlu(
nlu_data="tests/data/nlu/en/nlu.md",
config=f"tests/configs/{fp}",
output="models",
)
run_nlu(model=f"models/{mod}", nlu_data="tests/data/nlu/en/nlu.md")
| 24.571429 | 71 | 0.690407 |
7d31e844aa623373ab69846be6dc1b9e03094477 | 1,030 | py | Python | gevent_zeromq/__init__.py | ContinuumIO/gevent-zeromq | b6d4e4ec802dbd477e2fe4793a4ab9079174f94b | [
"BSD-3-Clause"
] | 2 | 2017-09-17T18:32:38.000Z | 2019-08-26T03:10:07.000Z | gevent_zeromq/__init__.py | ContinuumIO/gevent-zeromq | b6d4e4ec802dbd477e2fe4793a4ab9079174f94b | [
"BSD-3-Clause"
] | null | null | null | gevent_zeromq/__init__.py | ContinuumIO/gevent-zeromq | b6d4e4ec802dbd477e2fe4793a4ab9079174f94b | [
"BSD-3-Clause"
] | 3 | 2021-02-23T09:24:52.000Z | 2021-02-23T09:25:04.000Z | # -*- coding: utf-8 -*-
"""gevent_zmq - gevent compatibility with zeromq.
Usage
-----
Instead of importing zmq directly, do so in the following manner:
..
from gevent_zeromq import zmq
Any calls that would have blocked the current thread will now only block the
current green thread.
This compatibility is accomplished by ensuring the nonblocking flag is set
before any blocking operation and the ØMQ file descriptor is polled internally
to trigger needed events.
"""
import gevent_zeromq.core as zmq
zmq.Context = zmq._Context
zmq.Socket = zmq._Socket
zmq.Poller = zmq._Poller
def monkey_patch(test_suite=False):
"""
Monkey patches `zmq.Context` and `zmq.Socket`
If test_suite is True, the pyzmq test suite will be patched for
compatibility as well.
"""
ozmq = __import__('zmq')
ozmq.Socket = zmq.Socket
ozmq.Context = zmq.Context
ozmq.Poller = zmq.Poller
if test_suite:
from gevent_zeromq.tests import monkey_patch_test_suite
monkey_patch_test_suite()
| 24.52381 | 78 | 0.728155 |
3dd97d9efa44300d52a3c37d77c90c2b6a252bcd | 21,198 | py | Python | gui_db.py | Astray909/lab_lot_tracking | 1ce5929d53aca325f8f2540e35683129191d65d3 | [
"MIT"
] | null | null | null | gui_db.py | Astray909/lab_lot_tracking | 1ce5929d53aca325f8f2540e35683129191d65d3 | [
"MIT"
] | null | null | null | gui_db.py | Astray909/lab_lot_tracking | 1ce5929d53aca325f8f2540e35683129191d65d3 | [
"MIT"
] | null | null | null | from tkinter import *
from tkinter import messagebox
from tkinter.ttk import Treeview
from tkinter.filedialog import asksaveasfile
import sqlite3, csv
class Database:
def __init__(self, db):
self.conn = sqlite3.connect(db)
self.cur = self.conn.cursor()
self.cur.execute(
"CREATE TABLE IF NOT EXISTS entries (id INTEGER PRIMARY KEY,\
UID text,\
YEAR integer,\
WEEK integer,\
DEPT text,\
TESTER text,\
PROGRAM text,\
BOX text,\
PRODUCT text,\
DATECODE text,\
LOT text,\
TEST text,\
PACKAGE text,\
HOUR text,\
STACK_TRAY text,\
DEVICE_NUM text,\
QTY integer,\
RECEIVED_FROM text,\
WOR_FORM integer,\
RECEIVED_ORDER_DATE text,\
TEST_START_DATE text,\
TOTAL_TIME_CONSUMED text,\
DATE_OUT text,\
STATUS text,\
COMMENTS text,\
PRINT_LABEL text)")
self.conn.commit()
def fetch(self, UID=''):
self.cur.execute(
"SELECT * FROM entries WHERE UID LIKE ?", ('%'+UID+'%',))
rows = self.cur.fetchall()
return rows
def fetch2(self, query):
self.cur.execute(query)
rows = self.cur.fetchall()
return rows
def insert(self, UID, YEAR, WEEK, DEPT, TESTER, PROGRAM, BOX, PRODUCT, DATECODE, LOT, TEST, PACKAGE, HOUR, STACK_TRAY, DEVICE_NUM, QTY, RECEIVED_FROM, WOR_FORM, RECEIVED_ORDER_DATE, TEST_START_DATE, TOTAL_TIME_CONSUMED, DATE_OUT, STATUS, COMMENTS, PRINT_LABEL):
self.cur.execute("INSERT INTO entries VALUES (NULL, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(UID, YEAR, WEEK, DEPT, TESTER, PROGRAM, BOX, PRODUCT, DATECODE, LOT, TEST, PACKAGE, HOUR, STACK_TRAY, DEVICE_NUM, QTY, RECEIVED_FROM, WOR_FORM, RECEIVED_ORDER_DATE, TEST_START_DATE, TOTAL_TIME_CONSUMED, DATE_OUT, STATUS, COMMENTS, PRINT_LABEL))
self.conn.commit()
def remove(self, id):
self.cur.execute("DELETE FROM entries WHERE id=?", (id,))
self.conn.commit()
def update(self, id, UID, YEAR, WEEK, DEPT, TESTER, PROGRAM, BOX, PRODUCT, DATECODE, LOT, TEST, PACKAGE, HOUR, STACK_TRAY, DEVICE_NUM, QTY, RECEIVED_FROM, WOR_FORM, RECEIVED_ORDER_DATE, TEST_START_DATE, TOTAL_TIME_CONSUMED, DATE_OUT, STATUS, COMMENTS, PRINT_LABEL):
self.cur.execute("UPDATE entries SET UID = ?, YEAR = ?, WEEK = ?, DEPT = ?, TESTER = ?, PROGRAM = ?, BOX = ?, PRODUCT = ?, DATECODE = ?, LOT = ?, TEST = ?, PACKAGE = ?, HOUR = ?, STACK_TRAY = ?, DEVICE_NUM = ?, QTY = ?, RECEIVED_FROM = ?, WOR_FORM = ?, RECEIVED_ORDER_DATE = ?, TEST_START_DATE = ?, TOTAL_TIME_CONSUMED = ?, DATE_OUT = ?, STATUS = ?, COMMENTS = ?, PRINT_LABEL = ? WHERE id = ?",
(UID, YEAR, WEEK, DEPT, TESTER, PROGRAM, BOX, PRODUCT, DATECODE, LOT, TEST, PACKAGE, HOUR, STACK_TRAY, DEVICE_NUM, QTY, RECEIVED_FROM, WOR_FORM, RECEIVED_ORDER_DATE, TEST_START_DATE, TOTAL_TIME_CONSUMED, DATE_OUT, STATUS, COMMENTS, PRINT_LABEL, id))
self.conn.commit()
def __del__(self):
self.conn.close()
db = Database("X:\\PLC\\Prod Docs\\Qual\\qrw_script\\dataAnalysis\\dailylist.db")
def populate_list(UID=''):
for i in entry_tree_view.get_children():
entry_tree_view.delete(i)
for row in db.fetch(UID):
entry_tree_view.insert('', 'end', values=row)
def populate_list2(query='select * from entries'):
for i in entry_tree_view.get_children():
entry_tree_view.delete(i)
for row in db.fetch2(query):
entry_tree_view.insert('', 'end', values=row)
def add_entry():
if UID_text.get() == '' or YEAR_text.get() == '' or WEEK_text.get() == '' or DEPT_text.get() == '' or TESTER_text.get() == '' or PROGRAM_text.get() == '' or BOX_text.get() == '' or PRODUCT_text.get() == '' or DATECODE_text.get() == '' or LOT_text.get() == '' or TEST_text.get() == '' or PACKAGE_text.get() == '' or HOUR_text.get() == '' or STACK_TRAY_text.get() == '' or DEVICE_NUM_text.get() == '' or QTY_text.get() == '' or RECEIVED_FROM_text.get() == '' or WOR_FORM_text.get() == '' or RECEIVED_ORDER_DATE_text.get() == '' or TEST_START_DATE_text.get() == '' or TOTAL_TIME_CONSUMED_text.get() == '' or DATE_OUT_text.get() == '' or STATUS_text.get() == '' or COMMENTS_text.get() == '' or PRINT_LABEL_text.get() == '':
messagebox.showerror('Required Fields', 'Please include all fields')
return
db.insert(UID_text.get(), YEAR_text.get(), WEEK_text.get(), DEPT_text.get(), TESTER_text.get(), PROGRAM_text.get(), BOX_text.get(), PRODUCT_text.get(), DATECODE_text.get(), LOT_text.get(), TEST_text.get(), PACKAGE_text.get(), HOUR_text.get(), STACK_TRAY_text.get(), DEVICE_NUM_text.get(), QTY_text.get(), RECEIVED_FROM_text.get(), WOR_FORM_text.get(), RECEIVED_ORDER_DATE_text.get(), TEST_START_DATE_text.get(), TOTAL_TIME_CONSUMED_text.get(), DATE_OUT_text.get(), STATUS_text.get(), COMMENTS_text.get(), PRINT_LABEL_text.get())
clear_text()
populate_list()
def select_entry(event):
try:
global selected_item
index = entry_tree_view.selection()[0]
selected_item = entry_tree_view.item(index)['values']
UID_entry.delete(0, END)
UID_entry.insert(END, selected_item[1])
YEAR_entry.delete(0, END)
YEAR_entry.insert(END, selected_item[2])
WEEK_entry.delete(0, END)
WEEK_entry.insert(END, selected_item[3])
DEPT_entry.delete(0, END)
DEPT_entry.insert(END, selected_item[4])
TESTER_entry.delete(0, END)
TESTER_entry.insert(END, selected_item[5])
PROGRAM_entry.delete(0, END)
PROGRAM_entry.insert(END, selected_item[6])
BOX_entry.delete(0, END)
BOX_entry.insert(END, selected_item[7])
PRODUCT_entry.delete(0, END)
PRODUCT_entry.insert(END, selected_item[8])
DATECODE_entry.delete(0, END)
DATECODE_entry.insert(END, selected_item[9])
LOT_entry.delete(0, END)
LOT_entry.insert(END, selected_item[10])
TEST_entry.delete(0, END)
TEST_entry.insert(END, selected_item[11])
PACKAGE_entry.delete(0, END)
PACKAGE_entry.insert(END, selected_item[12])
HOUR_entry.delete(0, END)
HOUR_entry.insert(END, selected_item[13])
STACK_TRAY_entry.delete(0, END)
STACK_TRAY_entry.insert(END, selected_item[14])
DEVICE_NUM_entry.delete(0, END)
DEVICE_NUM_entry.insert(END, selected_item[15])
QTY_entry.delete(0, END)
QTY_entry.insert(END, selected_item[16])
RECEIVED_FROM_entry.delete(0, END)
RECEIVED_FROM_entry.insert(END, selected_item[17])
WOR_FORM_entry.delete(0, END)
WOR_FORM_entry.insert(END, selected_item[18])
RECEIVED_ORDER_DATE_entry.delete(0, END)
RECEIVED_ORDER_DATE_entry.insert(END, selected_item[19])
TEST_START_DATE_entry.delete(0, END)
TEST_START_DATE_entry.insert(END, selected_item[20])
TOTAL_TIME_CONSUMED_entry.delete(0, END)
TOTAL_TIME_CONSUMED_entry.insert(END, selected_item[21])
DATE_OUT_entry.delete(0, END)
DATE_OUT_entry.insert(END, selected_item[22])
STATUS_entry.delete(0, END)
STATUS_entry.insert(END, selected_item[23])
COMMENTS_entry.delete(0, END)
COMMENTS_entry.insert(END, selected_item[24])
PRINT_LABEL_entry.delete(0, END)
PRINT_LABEL_entry.insert(END, selected_item[25])
except IndexError:
pass
def remove_entry():
db.remove(selected_item[0])
clear_text()
populate_list()
def update_entry():
db.update(selected_item[0], UID_text.get(), YEAR_text.get(), WEEK_text.get(), DEPT_text.get(), TESTER_text.get(), PROGRAM_text.get(), BOX_text.get(), PRODUCT_text.get(), DATECODE_text.get(), LOT_text.get(), TEST_text.get(), PACKAGE_text.get(), HOUR_text.get(), STACK_TRAY_text.get(), DEVICE_NUM_text.get(), QTY_text.get(), RECEIVED_FROM_text.get(), WOR_FORM_text.get(), RECEIVED_ORDER_DATE_text.get(), TEST_START_DATE_text.get(), TOTAL_TIME_CONSUMED_text.get(), DATE_OUT_text.get(), STATUS_text.get(), COMMENTS_text.get(), PRINT_LABEL_text.get())
populate_list()
def clear_text():
UID_entry.delete(0, END)
YEAR_entry.delete(0, END)
WEEK_entry.delete(0, END)
DEPT_entry.delete(0, END)
TESTER_entry.delete(0, END)
PROGRAM_entry.delete(0, END)
BOX_entry.delete(0, END)
PRODUCT_entry.delete(0, END)
DATECODE_entry.delete(0, END)
LOT_entry.delete(0, END)
TEST_entry.delete(0, END)
PACKAGE_entry.delete(0, END)
HOUR_entry.delete(0, END)
STACK_TRAY_entry.delete(0, END)
DEVICE_NUM_entry.delete(0, END)
QTY_entry.delete(0, END)
RECEIVED_FROM_entry.delete(0, END)
WOR_FORM_entry.delete(0, END)
RECEIVED_ORDER_DATE_entry.delete(0, END)
TEST_START_DATE_entry.delete(0, END)
TOTAL_TIME_CONSUMED_entry.delete(0, END)
DATE_OUT_entry.delete(0, END)
STATUS_entry.delete(0, END)
COMMENTS_entry.delete(0, END)
PRINT_LABEL_entry.delete(0, END)
def search_hostname():
UID = UID_search.get()
populate_list(UID)
def execute_query():
query = query_search.get()
populate_list2(query)
def save_csv():
files = [('CSV File', '*.csv')]
file = asksaveasfile(filetypes = files, defaultextension = files)
file = str(file).replace("<_io.TextIOWrapper name='",'').replace("' mode='w' encoding='cp1252'>",'')
with open(str(file), "w", newline='') as myfile:
csvwriter = csv.writer(myfile, delimiter=',')
csvwriter.writerow(['id','UID','YEAR','WEEK','DEPT','TESTER','PROGRAM','BOX','PRODUCT','DATECODE','LOT','TEST','PACKAGE','HOUR','STACK_TRAY','DEVICE_NUM','QTY','RECEIVED_FROM','WOR_FORM','RECEIVED_ORDER_DATE','TEST_START_DATE','TOTAL_TIME_CONSUMED','DATE_OUT','STATUS','COMMENTS','PRINT_LABEL'])
for row_id in entry_tree_view.get_children():
row = entry_tree_view.item(row_id)['values']
csvwriter.writerow(row)
app = Tk()
frame_search = Frame(app)
frame_search.grid(row=0, column=0)
lbl_search = Label(frame_search, text='Search by UID',
font=('bold', 12), pady=20)
lbl_search.grid(row=0, column=0, sticky=W)
UID_search = StringVar()
UID_search_entry = Entry(frame_search, textvariable=UID_search)
UID_search_entry.grid(row=0, column=1)
lbl_search = Label(frame_search, text='Search by Query',
font=('bold', 12), pady=20)
lbl_search.grid(row=1, column=0, sticky=W)
query_search = StringVar()
query_search.set("Select * from entries where YEAR>1")
query_search_entry = Entry(frame_search, textvariable=query_search, width=40)
query_search_entry.grid(row=1, column=1)
frame_fields = Frame(app)
frame_fields.grid(row=1, column=0)
# UID
UID_text = StringVar()
UID_label = Label(frame_fields, text='UID', font=('bold', 12))
UID_label.grid(row=0, column=0, sticky=E)
UID_entry = Entry(frame_fields, textvariable=UID_text)
UID_entry.grid(row=0, column=1, sticky=W)
# YEAR
YEAR_text = StringVar()
YEAR_label = Label(frame_fields, text='YEAR', font=('bold', 12))
YEAR_label.grid(row=0, column=2, sticky=E)
YEAR_entry = Entry(frame_fields, textvariable=YEAR_text)
YEAR_entry.grid(row=0, column=3, sticky=W)
# WEEK
WEEK_text = StringVar()
WEEK_label = Label(frame_fields, text='WEEK', font=('bold', 12))
WEEK_label.grid(row=0, column=4, sticky=E)
WEEK_entry = Entry(frame_fields, textvariable=WEEK_text)
WEEK_entry.grid(row=0, column=5, sticky=W)
# DEPT
DEPT_text = StringVar()
DEPT_label = Label(frame_fields, text='DEPT', font=('bold', 12))
DEPT_label.grid(row=0, column=6, sticky=E)
DEPT_entry = Entry(frame_fields, textvariable=DEPT_text)
DEPT_entry.grid(row=0, column=7, sticky=W)
# TESTER
TESTER_text = StringVar()
TESTER_label = Label(frame_fields, text='TESTER', font=('bold', 12))
TESTER_label.grid(row=0, column=8, sticky=E)
TESTER_entry = Entry(frame_fields, textvariable=TESTER_text)
TESTER_entry.grid(row=0, column=9, sticky=W)
# PROGRAM
PROGRAM_text = StringVar()
PROGRAM_label = Label(frame_fields, text='PROGRAM', font=('bold', 12))
PROGRAM_label.grid(row=1, column=0, sticky=E)
PROGRAM_entry = Entry(frame_fields, textvariable=PROGRAM_text)
PROGRAM_entry.grid(row=1, column=1, sticky=W)
# BOX
BOX_text = StringVar()
BOX_label = Label(frame_fields, text='BOX', font=('bold', 12))
BOX_label.grid(row=1, column=2, sticky=E)
BOX_entry = Entry(frame_fields, textvariable=BOX_text)
BOX_entry.grid(row=1, column=3, sticky=W)
# PRODUCT
PRODUCT_text = StringVar()
PRODUCT_label = Label(frame_fields, text='PRODUCT', font=('bold', 12))
PRODUCT_label.grid(row=1, column=4, sticky=E)
PRODUCT_entry = Entry(frame_fields, textvariable=PRODUCT_text)
PRODUCT_entry.grid(row=1, column=5, sticky=W)
# DATECODE
DATECODE_text = StringVar()
DATECODE_label = Label(frame_fields, text='DATECODE', font=('bold', 12))
DATECODE_label.grid(row=1, column=6, sticky=E)
DATECODE_entry = Entry(frame_fields, textvariable=DATECODE_text)
DATECODE_entry.grid(row=1, column=7, sticky=W)
# LOT
LOT_text = StringVar()
LOT_label = Label(frame_fields, text='LOT', font=('bold', 12))
LOT_label.grid(row=1, column=8, sticky=E)
LOT_entry = Entry(frame_fields, textvariable=LOT_text)
LOT_entry.grid(row=1, column=9, sticky=W)
# TEST
TEST_text = StringVar()
TEST_label = Label(frame_fields, text='TEST', font=('bold', 12))
TEST_label.grid(row=2, column=0, sticky=E)
TEST_entry = Entry(frame_fields, textvariable=TEST_text)
TEST_entry.grid(row=2, column=1, sticky=W)
# PACKAGE
PACKAGE_text = StringVar()
PACKAGE_label = Label(frame_fields, text='PACKAGE', font=('bold', 12))
PACKAGE_label.grid(row=2, column=2, sticky=E)
PACKAGE_entry = Entry(frame_fields, textvariable=PACKAGE_text)
PACKAGE_entry.grid(row=2, column=3, sticky=W)
# HOUR
HOUR_text = StringVar()
HOUR_label = Label(frame_fields, text='HOUR', font=('bold', 12))
HOUR_label.grid(row=2, column=4, sticky=E)
HOUR_entry = Entry(frame_fields, textvariable=HOUR_text)
HOUR_entry.grid(row=2, column=5, sticky=W)
# STACK_TRAY
STACK_TRAY_text = StringVar()
STACK_TRAY_label = Label(frame_fields, text='STACK_TRAY', font=('bold', 12))
STACK_TRAY_label.grid(row=2, column=6, sticky=E)
STACK_TRAY_entry = Entry(frame_fields, textvariable=STACK_TRAY_text)
STACK_TRAY_entry.grid(row=2, column=7, sticky=W)
# DEVICE_NUM
DEVICE_NUM_text = StringVar()
DEVICE_NUM_label = Label(frame_fields, text='DEVICE_NUM', font=('bold', 12))
DEVICE_NUM_label.grid(row=2, column=8, sticky=E)
DEVICE_NUM_entry = Entry(frame_fields, textvariable=DEVICE_NUM_text)
DEVICE_NUM_entry.grid(row=2, column=9, sticky=W)
# QTY
QTY_text = StringVar()
QTY_label = Label(frame_fields, text='QTY', font=('bold', 12))
QTY_label.grid(row=3, column=0, sticky=E)
QTY_entry = Entry(frame_fields, textvariable=QTY_text)
QTY_entry.grid(row=3, column=1, sticky=W)
# RECEIVED_FROM
RECEIVED_FROM_text = StringVar()
RECEIVED_FROM_label = Label(frame_fields, text='RECEIVED_FROM', font=('bold', 12))
RECEIVED_FROM_label.grid(row=3, column=2, sticky=E)
RECEIVED_FROM_entry = Entry(frame_fields, textvariable=RECEIVED_FROM_text)
RECEIVED_FROM_entry.grid(row=3, column=3, sticky=W)
# WOR_FORM
WOR_FORM_text = StringVar()
WOR_FORM_label = Label(frame_fields, text='WOR_FORM', font=('bold', 12))
WOR_FORM_label.grid(row=3, column=4, sticky=E)
WOR_FORM_entry = Entry(frame_fields, textvariable=WOR_FORM_text)
WOR_FORM_entry.grid(row=3, column=5, sticky=W)
# RECEIVED_ORDER_DATE
RECEIVED_ORDER_DATE_text = StringVar()
RECEIVED_ORDER_DATE_label = Label(frame_fields, text='RECEIVED_ORDER_DATE', font=('bold', 12))
RECEIVED_ORDER_DATE_label.grid(row=3, column=6, sticky=E)
RECEIVED_ORDER_DATE_entry = Entry(frame_fields, textvariable=RECEIVED_ORDER_DATE_text)
RECEIVED_ORDER_DATE_entry.grid(row=3, column=7, sticky=W)
# TEST_START_DATE
TEST_START_DATE_text = StringVar()
TEST_START_DATE_label = Label(frame_fields, text='TEST_START_DATE', font=('bold', 12))
TEST_START_DATE_label.grid(row=3, column=8, sticky=E)
TEST_START_DATE_entry = Entry(frame_fields, textvariable=TEST_START_DATE_text)
TEST_START_DATE_entry.grid(row=3, column=9, sticky=W)
# TOTAL_TIME_CONSUMED
TOTAL_TIME_CONSUMED_text = StringVar()
TOTAL_TIME_CONSUMED_label = Label(frame_fields, text='TOTAL_TIME_CONSUMED', font=('bold', 12))
TOTAL_TIME_CONSUMED_label.grid(row=4, column=0, sticky=E)
TOTAL_TIME_CONSUMED_entry = Entry(frame_fields, textvariable=TOTAL_TIME_CONSUMED_text)
TOTAL_TIME_CONSUMED_entry.grid(row=4, column=1, sticky=W)
# DATE_OUT
DATE_OUT_text = StringVar()
DATE_OUT_label = Label(frame_fields, text='DATE_OUT', font=('bold', 12))
DATE_OUT_label.grid(row=4, column=2, sticky=E)
DATE_OUT_entry = Entry(frame_fields, textvariable=DATE_OUT_text)
DATE_OUT_entry.grid(row=4, column=3, sticky=W)
# STATUS
STATUS_text = StringVar()
STATUS_label = Label(frame_fields, text='STATUS', font=('bold', 12))
STATUS_label.grid(row=4, column=4, sticky=E)
STATUS_entry = Entry(frame_fields, textvariable=STATUS_text)
STATUS_entry.grid(row=4, column=5, sticky=W)
# COMMENTS
COMMENTS_text = StringVar()
COMMENTS_label = Label(frame_fields, text='COMMENTS', font=('bold', 12))
COMMENTS_label.grid(row=4, column=6, sticky=E)
COMMENTS_entry = Entry(frame_fields, textvariable=COMMENTS_text)
COMMENTS_entry.grid(row=4, column=7, sticky=W)
# PRINT_LABEL
PRINT_LABEL_text = StringVar()
PRINT_LABEL_label = Label(frame_fields, text='PRINT_LABEL', font=('bold', 12))
PRINT_LABEL_label.grid(row=4, column=8, sticky=E)
PRINT_LABEL_entry = Entry(frame_fields, textvariable=PRINT_LABEL_text)
PRINT_LABEL_entry.grid(row=4, column=9, sticky=W)
frame_entry = Frame(app)
frame_entry.grid(row=4, column=0, columnspan=4, rowspan=6, pady=20, padx=20)
columns = ['id','UID','YEAR','WEEK','DEPT','TESTER','PROGRAM','BOX','PRODUCT','DATECODE','LOT','TEST','PACKAGE','HOUR','STACK_TRAY','DEVICE_NUM','QTY','RECEIVED_FROM','WOR_FORM','RECEIVED_ORDER_DATE','TEST_START_DATE','TOTAL_TIME_CONSUMED','DATE_OUT','STATUS','COMMENTS','PRINT_LABEL']
entry_tree_view = Treeview(frame_entry, columns=columns, show="headings")
entry_tree_view.column("id", width=30)
for col in columns[1:]:
entry_tree_view.column(col, width=55)
entry_tree_view.heading(col, text=col)
entry_tree_view.bind('<<TreeviewSelect>>', select_entry)
entry_tree_view.pack(side="left", fill="y")
scrollbar = Scrollbar(frame_entry, orient='vertical')
scrollbar.configure(command=entry_tree_view.yview)
scrollbar.pack(side="right", fill="y")
entry_tree_view.config(yscrollcommand=scrollbar.set)
scrollbar_x = Scrollbar(frame_entry, orient='horizontal')
scrollbar_x.configure(command=entry_tree_view.xview)
scrollbar_x.pack(side="bottom", fill="y")
entry_tree_view.config(yscrollcommand=scrollbar_x.set)
frame_btns = Frame(app)
frame_btns.grid(row=3, column=0)
add_btn = Button(frame_btns, text='Add Entry', width=12, command=add_entry)
add_btn.grid(row=0, column=0, pady=20)
remove_btn = Button(frame_btns, text='Remove Entry',
width=12, command=remove_entry)
remove_btn.grid(row=0, column=1)
update_btn = Button(frame_btns, text='Update Entry',
width=12, command=update_entry)
update_btn.grid(row=0, column=2)
clear_btn = Button(frame_btns, text='Clear Input',
width=12, command=clear_text)
clear_btn.grid(row=0, column=3)
search_btn = Button(frame_search, text='Search',
width=12, command=search_hostname)
search_btn.grid(row=0, column=2)
search_query_btn = Button(frame_search, text='Search Query',
width=12, command=execute_query)
search_query_btn.grid(row=1, column=2)
search_query_btn = Button(frame_btns, text='Save CSV',
width=12, command=save_csv)
search_query_btn.grid(row=0, column=4)
app.title('Test Database')
app.geometry('1500x800')
# Populate data
populate_list()
# Start program
app.mainloop()
| 47.743243 | 723 | 0.661619 |
b1a7ee6d0041500dad3f6b462bd8a6019ca59af1 | 13,461 | py | Python | gym/envs/robotics/hand/move_stepped.py | carlo-/gym | 7e7575601a0df5476ab9b15072c8b65693ce3071 | [
"Python-2.0",
"OLDAP-2.7"
] | 1 | 2021-01-08T18:18:43.000Z | 2021-01-08T18:18:43.000Z | gym/envs/robotics/hand/move_stepped.py | carlo-/gym | 7e7575601a0df5476ab9b15072c8b65693ce3071 | [
"Python-2.0",
"OLDAP-2.7"
] | null | null | null | gym/envs/robotics/hand/move_stepped.py | carlo-/gym | 7e7575601a0df5476ab9b15072c8b65693ce3071 | [
"Python-2.0",
"OLDAP-2.7"
] | 1 | 2019-07-31T18:40:26.000Z | 2019-07-31T18:40:26.000Z | import copy
from enum import Enum
import numpy as np
import gym
from gym import spaces
from gym.envs.robotics import hand_env
from gym.envs.robotics.hand.reach import FINGERTIP_SITE_NAMES
from gym.envs.robotics.hand.move import HandPickAndPlaceEnv, FINGERTIP_BODY_NAMES
from gym.envs.robotics.utils import reset_mocap2body_xpos
from gym.utils import transformations as tf
from gym.utils import kinematics as kin
class HandSteppedTask(Enum):
PICK_AND_PLACE = 1
LIFT_ABOVE_TABLE = 2
def _smooth_step(x):
# https://www.desmos.com/calculator/oygnwcuvaz
return 1 / (1 + np.exp(-20 * (x - 0.20)))
class HandSteppedEnv(gym.GoalEnv):
def __init__(self, *, task: HandSteppedTask=None, render_substeps=False):
super(HandSteppedEnv, self).__init__()
self.metadata = {
'render.modes': ['human'],
}
self._qp_solver = None
self.task = task or HandSteppedTask.LIFT_ABOVE_TABLE
self.render_substeps = render_substeps
self.sim_env = HandPickAndPlaceEnv(reward_type='dense', target_in_the_air_p=1.0,
weld_fingers=False, object_id='box')
self.goal = self._sample_goal()
obs = self._get_obs()
if self.task == HandSteppedTask.PICK_AND_PLACE:
n_actions = 3 * 6
elif self.task == HandSteppedTask.LIFT_ABOVE_TABLE:
n_actions = 3 * 5
else:
raise NotImplementedError
self.action_space = spaces.Box(-1., 1., shape=(n_actions,), dtype='float32')
self.observation_space = spaces.Dict(dict(
desired_goal=spaces.Box(-np.inf, np.inf, shape=obs['achieved_goal'].shape, dtype='float32'),
achieved_goal=spaces.Box(-np.inf, np.inf, shape=obs['achieved_goal'].shape, dtype='float32'),
observation=spaces.Box(-np.inf, np.inf, shape=obs['observation'].shape, dtype='float32'),
))
# Shortcuts
# ----------------------------
@property
def viewer(self):
return self.sim_env.viewer
@property
def sim(self):
return self.sim_env.sim
def get_fingertips_pos(self):
return np.array([self.sim.data.get_site_xpos(name) for name in FINGERTIP_SITE_NAMES])
# Env methods
# ----------------------------
def seed(self, seed=None):
self.sim_env.seed(seed)
def step(self, action: np.ndarray):
import mujoco_py
try:
res = self._step(action)
return res
except mujoco_py.MujocoException:
print("Warning: Environment is recovering from a MujocoException.")
return self.reset(), 0.0, True, dict()
def _step(self, action: np.ndarray):
action = np.clip(action, self.action_space.low, self.action_space.high)
fingers_pos_wrt_obj = action[:(3*5)].reshape(-1, 3) * np.r_[0.03, 0.03, 0.03]
if self.task == HandSteppedTask.LIFT_ABOVE_TABLE:
arm_pos_wrt_world = np.zeros(3)
elif self.task == HandSteppedTask.PICK_AND_PLACE:
arm_pos_wrt_world = action[(3*5):]
else:
raise NotImplementedError
arm_bounds = np.array(self.sim_env.forearm_bounds).T
if self.render_substeps:
tf.render_box(self.viewer, bounds=arm_bounds)
arm_pos_wrt_world *= np.abs(arm_bounds[:, 1] - arm_bounds[:, 0]) / 2.0
arm_pos_wrt_world += arm_bounds.mean(axis=1)
obj_pose = self.sim_env._get_object_pose()
obj_on_ground = obj_pose[2] < 0.37
fingers_pos_targets = np.array([
tf.apply_tf(np.r_[transf, 1., 0., 0., 0.], obj_pose) for transf in fingers_pos_wrt_obj
])
self.sim.model.eq_active[1:] = 0
# self.sim.data.mocap_pos[1:] = fingers_pos_targets[:, :3]
if self.render_substeps:
tf.render_pose(arm_pos_wrt_world.copy(), self.sim_env.viewer, label='arm_t')
# for i, f in enumerate(fingers_pos_targets):
# tf.render_pose(f, self.sim_env.viewer, label=f'f_{i}')
pregrasp_palm_target = fingers_pos_targets[:, :3].mean(axis=0)
pregrasp_palm_target = np.r_[pregrasp_palm_target, 1., 0., 0., 0.]
pregrasp_palm_target = tf.apply_tf(np.r_[-0.01, 0., 0.015, 1., 0., 0., 0.], pregrasp_palm_target)[:3]
# move hand
if len(self.sim_env.get_object_contact_points(other_body='robot')) == 0:
hand_action = np.r_[0., -.5, -np.ones(18)]
hand_action[15:] = (-1., -0.5, 1., -1., 0)
self._move_arm(pregrasp_palm_target, hand_action=hand_action)
# move fingers
self._move_fingers(fingers_pos_targets, max_steps=30)
# move arm
stable_steps = self._move_arm(arm_pos_wrt_world, count_stable_steps=True)
done = obj_on_ground
obs = self._get_obs()
if self.task == HandSteppedTask.LIFT_ABOVE_TABLE:
info = dict()
reward = stable_steps
elif self.task == HandSteppedTask.PICK_AND_PLACE:
info = {'is_success': self.sim_env._is_success(obs['achieved_goal'], self.goal)}
dist_to_goal = np.linalg.norm(self.goal[:3] - self.sim_env._get_object_pose()[:3])
dist_reward = 100 / (1 + dist_to_goal*_smooth_step(dist_to_goal) * 100) # range: [0, 100]
alpha = 0.25
reward = dist_reward * alpha + stable_steps * (1 - alpha)
else:
raise NotImplementedError
return obs, reward, done, info
def render(self, mode='human', keep_markers=False):
markers = []
if keep_markers:
markers = copy.deepcopy(self.viewer._markers)
self.sim_env.render()
for m in markers:
self.viewer.add_marker(**m)
def reset(self):
self.sim.model.eq_active[1:] = 0
self.sim_env.reset()
self.goal = self._sample_goal()
return self._get_obs()
# GoalEnv methods
# ----------------------------
@property
def goal(self):
return self.sim_env.goal
@goal.setter
def goal(self, value):
self.sim_env.goal = value
def compute_reward(self, *args, **kwargs):
raise NotImplementedError
def _sample_goal(self):
new_goal = None
while new_goal is None or new_goal[2] < 0.60:
new_goal = self.sim_env._sample_goal()
return new_goal
def _get_obs(self):
sim_obs = self.sim_env._get_obs()
obj_pos = self.sim_env._get_object_pose()[:3]
fingertips_pos = self.get_fingertips_pos()
obs = np.r_[obj_pos, fingertips_pos.ravel()]
return {
**sim_obs,
'observation': obs,
}
# Hand and arm control
# ----------------------------
def _move_arm(self, grasp_center_target: np.ndarray, hand_action: np.ndarray=None,
threshold=0.01, k=0.1, max_steps=100, count_stable_steps=False):
if hand_action is not None:
hand_action = hand_action.copy()
stable_steps = 0
prev_rel_pos = np.zeros(3)
reset_mocap2body_xpos(self.sim)
for i in range(max_steps):
grasp_center_pos = self.sim_env._get_grasp_center_pose(no_rot=True)[:3]
d = grasp_center_target - grasp_center_pos
if np.linalg.norm(d) < threshold and not count_stable_steps:
break
# set hand action
if hand_action is not None:
hand_env.HandEnv._set_action(self.sim_env, hand_action)
self.sim.data.mocap_pos[0] += d * k
self.sim.step()
if count_stable_steps:
obj_pos = self.sim_env._get_object_pose()[:3]
rel_pos = obj_pos - grasp_center_pos
still = prev_rel_pos is not None and np.all(np.abs(rel_pos - prev_rel_pos) < 0.002)
obj_above_table = len(self.sim_env.get_object_contact_points(other_body='table')) == 0
if still and obj_above_table:
stable_steps += 1
elif i > 10:
break
prev_rel_pos = rel_pos
if self.render_substeps:
self.render(keep_markers=True)
if count_stable_steps:
return stable_steps
def _move_fingers(self, targets: np.ndarray, threshold=0.01, k=0.1, max_steps=100, multiobjective_solver=False):
for _ in range(max_steps):
fingers_pos_curr = self.get_fingertips_pos()
err = np.linalg.norm(fingers_pos_curr[:, :3] - targets[:, :3])
if err < threshold:
break
if multiobjective_solver:
vels = []
for t_pose, c_pos, body in zip(targets, fingers_pos_curr, FINGERTIP_BODY_NAMES):
cart_vel = np.zeros(6)
cart_vel[:3] = (t_pose[:3] - c_pos) * k
vels.append(cart_vel[:3])
sol, opt, ctrl_idx = self._solve_hand_ik_multiobjective_vel(FINGERTIP_BODY_NAMES, vels, no_wrist=False)
if opt:
self.sim.data.ctrl[:] += np.clip(sol, -.5, .5)
else:
for t_pose, c_pos, body in zip(targets, fingers_pos_curr, FINGERTIP_BODY_NAMES):
cart_vel = np.zeros(6)
cart_vel[:3] = (t_pose[:3] - c_pos) * k
sol, opt, ctrl_idx = self._solve_hand_ik_vel(body, cart_vel, no_wrist=True, check_joint_lims=False)
if opt:
self.sim.data.ctrl[ctrl_idx] += np.clip(sol, -.5, .5)
self.sim.step()
if self.render_substeps:
self.render(keep_markers=True)
# IK solvers
# ----------------------------
def _solve_hand_ik_vel(self, ee_body: str, cart_vel: np.ndarray, no_wrist=False,
check_joint_lims=True, no_rot=True):
jac = kin.get_jacobian(self.sim.model, self.sim.data, self.sim.model.body_name2id(ee_body))
ee_initials = ee_body.replace('robot0:', '').replace('distal', '').upper()
jac_idx = []
qpos = []
ctrl_idx = []
joint_limits = []
for i in range(jac.shape[1]):
jnt_id = self.sim.model.dof_jntid[i]
if self.sim.model.jnt_type[jnt_id] != 3:
# only rotational joints
continue
jnt_name = self.sim.model.joint_id2name(jnt_id)
if ee_initials not in jnt_name and no_wrist:
continue
act_name = jnt_name.replace('robot0:', 'robot0:A_')
try:
act_id = self.sim.model.actuator_name2id(act_name)
except ValueError:
continue
jac_idx.append(i)
qpos_addr = self.sim.model.jnt_qposadr[jnt_id]
qpos.append(self.sim.data.qpos[qpos_addr])
ctrl_idx.append(act_id)
joint_limits.append(self.sim.model.jnt_range[jnt_id])
jac = jac[:, jac_idx]
qpos = np.array(qpos)
joint_limits = np.array(joint_limits)
assert qpos.shape[0] == jac.shape[1]
if not check_joint_lims:
joint_limits = None
if no_rot:
cart_vel = cart_vel[:3]
jac = jac[:3]
sol, opt = kin.solve_qp_ik_vel(cart_vel, jac, qpos, joint_lims=joint_limits,
duration=0.1, margin=0.1, solver=self._qp_solver)
return sol, opt, ctrl_idx
def _solve_hand_ik_multiobjective_vel(self, bodies: list, velocities: list, no_wrist=False):
n_end_effectors = len(bodies)
velocities = np.concatenate(velocities)
assert velocities.shape == (n_end_effectors * 3,)
jacobians = []
all_qpos = None
all_ctrl_idx = None
for ee_body in bodies:
jac = kin.get_jacobian(self.sim.model, self.sim.data, self.sim.model.body_name2id(ee_body))
ee_initials = ee_body.replace('robot0:', '').replace('distal', '').upper()
jac_idx = []
qpos = []
ctrl_idx = []
for i in range(jac.shape[1]):
jnt_id = self.sim.model.dof_jntid[i]
if self.sim.model.jnt_type[jnt_id] != 3:
# only rotational joints
continue
jnt_name = self.sim.model.joint_id2name(jnt_id)
if ee_initials not in jnt_name and no_wrist:
continue
act_name = jnt_name.replace('robot0:', 'robot0:A_')
try:
act_id = self.sim.model.actuator_name2id(act_name)
except ValueError:
continue
jac_idx.append(i)
qpos_addr = self.sim.model.jnt_qposadr[jnt_id]
qpos.append(self.sim.data.qpos[qpos_addr])
ctrl_idx.append(act_id)
jac = jac[:, jac_idx]
qpos = np.array(qpos)
assert qpos.shape[0] == jac.shape[1]
jacobians.append(jac[:3])
all_qpos = qpos
all_ctrl_idx = ctrl_idx
jacobians = np.concatenate(jacobians)
sol, opt = kin.solve_qp_ik_vel(velocities, jacobians, all_qpos, solver=self._qp_solver)
return sol, opt, all_ctrl_idx
class HandPickAndPlaceSteppedEnv(HandSteppedEnv):
def __init__(self, **kwargs):
super(HandPickAndPlaceSteppedEnv, self).__init__(task=HandSteppedTask.PICK_AND_PLACE, **kwargs)
| 37.811798 | 119 | 0.585469 |
dc535f995fa5248e9183279163addaab677e98e6 | 7,736 | py | Python | cold_posterior_bnn/core/diagnostics_test.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | cold_posterior_bnn/core/diagnostics_test.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | cold_posterior_bnn/core/diagnostics_test.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for google_research.google_research.cold_posterior_bnn.core.diagnostics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
from absl import logging
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
from cold_posterior_bnn.core import diagnostics
from cold_posterior_bnn.core import sgmcmc_testlib
def _generate_symmetric_alpha_stable_variates(stability, shape):
"""Generate Symmetric-alpha-Stable variates.
Args:
stability: >0, <= 2.0, stability parameter. We must have stability != 1.
shape: shape of the Tensor to generate.
Returns:
sample: tf.Tensor of given shape containing SaS(stability,0,0,0) variates.
"""
# Algorithm of (Chambers et al., 1976)
# https://en.wikipedia.org/wiki/Stable_distribution#Simulation_of_stable_variables
u = tf.random.uniform(shape, minval=-0.5*math.pi, maxval=0.5*math.pi)
w = -tf.math.log(tf.random.uniform(shape)) # ~ Exponential(1)
x1 = tf.math.sin(stability*u) / (tf.math.cos(u)**(1.0/stability))
x2 = (tf.math.cos(u - stability*u)/w)**((1.0-stability)/stability)
sample = x1*x2
return sample
class DiagnosticsTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
[0.25, 0.4, 0.6, 0.75, 0.9, 1.1, 1.25, 1.5, 1.75, 2.0])
def test_stability_estimation(self, stability):
nsamples = 1048576
samples = _generate_symmetric_alpha_stable_variates(stability, (nsamples,))
stability_estimate = 1.0 / (
diagnostics.symmetric_alpha_stable_invstability_estimator(
samples, 0, 16))
self.assertAlmostEqual(stability, stability_estimate, delta=0.025,
msg='Inaccurate stability index estimate: '
'true stability %.3f, estimate %.3f.' % (
stability, stability_estimate))
def test_variable_gradient_stability(self):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(32, input_dim=100))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dense(10))
nsamples = 131072
data = tf.random.normal((nsamples, 100))
labels = tf.reshape(tf.random.categorical(tf.zeros((nsamples, 10)), 1),
(nsamples,))
with tf.GradientTape(persistent=True) as tape:
logits = model(data, training=True)
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
batchsize = 64
# 1. Aggregate estimates
stability_estimates = diagnostics.variable_gradient_stability_estimate(
model, tape, ce, batchsize, nelem_per_piece=8)
self.assertLen(stability_estimates, len(model.trainable_variables))
for stability_est, parameter in zip(stability_estimates,
model.trainable_variables):
logging.info('Parameter "%s" has estimated stability %.5f',
parameter.name, float(stability_est))
self.assertEqual(int(tf.size(stability_est)), 1,
msg='Stability estimate is not scalar.')
self.assertGreaterEqual(float(stability_est), -0.1)
self.assertLessEqual(float(stability_est), 2.1)
# 2. Per-parameter estimates
stability_estimates = diagnostics.variable_gradient_stability_estimate(
model, tape, ce, batchsize, nelem_per_piece=8,
aggregate_variable_estimates=False)
for stability_est, parameter in zip(stability_estimates,
model.trainable_variables):
self.assertEqual(stability_est.shape, parameter.shape)
self.assertAllInRange(stability_est, -1.5, 3.5)
@parameterized.parameters([0.0, 0.25, 2.5])
def test_gradient_noise_estimate(self, noise_sigma):
pmodel = sgmcmc_testlib.Normal2D(noise_sigma=noise_sigma)
model = tf.keras.Sequential([pmodel])
grad_est = diagnostics.GradientNoiseEstimator()
@tf.function
def step_model(count):
for _ in range(count):
with tf.GradientTape() as tape:
nll = model(tf.zeros(1, 1), tf.zeros(1, 1))
gradients = tape.gradient(nll, model.trainable_variables)
grad_est.apply_gradients(zip(gradients, model.trainable_variables))
for _ in range(200):
step_model(50)
precond_dict = grad_est.estimate_fixed_preconditioner(
model, scale_to_min=False)
# Check that the estimated mass closely matches the noise stddev
for name in precond_dict:
mass = precond_dict[name]
logging.info('Variable "%s" estimated mass %.5f, true stddev %.5f',
name, mass, noise_sigma)
self.assertAlmostEqual(mass, noise_sigma, delta=0.02,
msg='Estimates mass %.5f differs from true '
'stddev %.5f' % (mass, noise_sigma))
def _generate_autoregressive_data(self, ar_rate, shape, nitems):
"""Generate an example AR(1) data set with known autocorrelation."""
data = list()
data.append(tf.random.normal(shape))
for t in range(1, nitems):
data.append(ar_rate * data[t - 1] + tf.random.normal(shape))
return data
_AR_RATES = [0.75, 0.9, 0.99, 0.999]
@parameterized.parameters(itertools.product(_AR_RATES))
def test_autocorr_estimation(self, ar_rate):
tf.set_random_seed(1)
shape = (8, 16)
data = self._generate_autoregressive_data(ar_rate, shape, 10000)
acorr = diagnostics.AutoCorrelationEstimator(
shape, nlevels=3, nsteps_per_level=16)
for data_item in data:
# TODO(nowozin): try to @tf.function this
acorr.update(data_item)
test_points = [1, 2, 3, 4, 10, 20, 30]
for tp in test_points:
autocorr_exact = ar_rate**tp
autocorr_estimate = float(tf.reduce_mean(acorr(tp)))
logging.info('AR @ %d (rate=%.4f), exact %.5f, estimate %.5f', tp,
ar_rate, autocorr_exact, autocorr_estimate)
abs_diff = math.fabs(autocorr_exact - autocorr_estimate)
ratio = autocorr_estimate / autocorr_exact
self.assertTrue(
abs_diff < 0.05 or (1.0 / ratio <= 1.4 and ratio <= 1.4),
msg='Autocorrelation error, exact %.5f, estimate %.5f, '
'abs_diff %.5f, ratio %.3f' %
(autocorr_exact, autocorr_estimate, abs_diff, ratio))
# Test the time-to-one-sample (TT1) estimates
# TT1 is difficult to estimate and we only do a small number of samples
# here, therefore the tolerances are quite generous
tt1_exact = 1.0 / (1.0 - ar_rate)
tt1_estimate = acorr.time_to_one_sample()
logging.info('TT1 (rate=%.4f), exact %.5f, estimate %.5f', ar_rate,
tt1_exact, tt1_estimate)
self.assertLess(
tt1_estimate,
4.0 * tt1_exact,
msg='Estimated TT1 %.5f too large, true TT1 %.5f' %
(tt1_estimate, tt1_exact))
self.assertGreater(
tt1_estimate,
tt1_exact / 4.0,
msg='Estimated TT1 %.5f too small, true TT1 %.5f' %
(tt1_estimate, tt1_exact))
if __name__ == '__main__':
tf.enable_eager_execution()
tf.test.main()
| 38.487562 | 84 | 0.676577 |
1abbebd87a16786edaffcc84ecf8b61850f1e30a | 1,959 | py | Python | src/satisfy/objective.py | simone-campagna/satisfy | b5327e937e32c5324c05f6288f59cfaac4a316dc | [
"Apache-2.0"
] | null | null | null | src/satisfy/objective.py | simone-campagna/satisfy | b5327e937e32c5324c05f6288f59cfaac4a316dc | [
"Apache-2.0"
] | null | null | null | src/satisfy/objective.py | simone-campagna/satisfy | b5327e937e32c5324c05f6288f59cfaac4a316dc | [
"Apache-2.0"
] | null | null | null | import abc
import operator
from .constraint import ExpressionConstraint
from .expression import Expression
__all__ = [
'Objective',
'Maximize',
'Minimize',
]
class Objective(abc.ABC):
@abc.abstractmethod
def make_constraint(self, model):
raise NotImplementedError()
@abc.abstractmethod
def add_solution(self, substitution):
raise NotImplementedError()
def evaluate(self, substitution):
return self._expression.evaluate(substitution)
def __repr__(self):
return "{}({!r})".format(type(self).__name__, self._expression)
class MinMaxConstraint(ExpressionConstraint):
def __init__(self, model, expression, op):
self._model = model
self._not_set = model.add_parameter(1)
self._bound = model.add_parameter(0)
super().__init__(self._not_set | op(expression, self._bound))
def set_bound(self, value):
self._not_set.value = 0
self._bound.value = value
if self.is_compiled():
self.compile()
class MinConstraint(MinMaxConstraint):
def __init__(self, model, expression):
super().__init__(model, expression, op=operator.lt)
class MaxConstraint(MinMaxConstraint):
def __init__(self, model, expression):
super().__init__(model, expression, op=operator.gt)
class MinMax(Objective):
def __init__(self, expression):
if not isinstance(expression, Expression):
raise TypeError("{} is not an Expression".format(expression))
self._expression = expression
super().__init__()
def add_solution(self, constraint, substitution):
value = self._expression.evaluate(substitution)
constraint.set_bound(value)
class Minimize(MinMax):
def make_constraint(self, model):
return MinConstraint(model, self._expression)
class Maximize(MinMax):
def make_constraint(self, model):
return MaxConstraint(model, self._expression)
| 26.472973 | 73 | 0.685043 |
762c6d765c189b1afe3a16ab1a6cf53d69959caa | 16,560 | py | Python | S4/S4 Library/simulation/sims/baby/baby.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | S4/S4 Library/simulation/sims/baby/baby.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | S4/S4 Library/simulation/sims/baby/baby.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | from protocolbuffers import Commodities_pb2
from protocolbuffers.Consts_pb2 import MSG_SIM_MOOD_UPDATE
from buffs.tunable import TunableBuffReference
from event_testing.resolver import SingleSimResolver
from interactions.interaction_finisher import FinishingType
from interactions.utils.death import DeathTracker
from objects.components.state import TunableStateValueReference, ObjectState
from objects.game_object import GameObject
from objects.system import create_object
from sims.baby.baby_tuning import BabySkinTone, BabyTuning
from sims.baby.baby_utils import replace_bassinet
from sims.genealogy_tracker import genealogy_caching
from sims.sim_info_lod import SimInfoLODLevel
from sims.sim_info_mixin import HasSimInfoBasicMixin
from sims.sim_info_types import Gender
from sims4.tuning.tunable import TunableReference, TunableList, TunableMapping, TunableEnumEntry, TunableTuple, AutoFactoryInit, HasTunableSingletonFactory, OptionalTunable
from statistics.mood import Mood
from tag import Tag
from ui.ui_dialog_notification import UiDialogNotification, TunableUiDialogNotificationSnippet
from vfx import PlayEffect
import build_buy
import camera
import distributor
import placement
import services
import sims4
import tag
logger = sims4.log.Logger('Baby')
class _BabyRemovalMoment(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'notification': OptionalTunable(description='\n If enabled, specify a notification to show when this moment is\n executed.\n ', tunable=TunableUiDialogNotificationSnippet(description='\n The notification to show when this moment is executed.\n ')), 'vfx': OptionalTunable(description='\n If enabled, play a visual effect when this moment is executed.\n ', tunable=PlayEffect.TunableFactory(description='\n The visual effect to play when this moment is executed.\n ')), 'buff': OptionalTunable(description="\n If enabled, specify a buff to apply to the baby's immediate family\n when this moment is executed.\n ", tunable=TunableBuffReference(description="\n The buff to be applied to the baby's immediate family when this\n moment is executed.\n ")), 'empty_state': TunableStateValueReference(description='\n The state to set on the empty bassinet after this moment is\n executed. This should control any reaction broadcasters that we\n might want to happen when this baby is no longer present.\n ', allow_none=True)}
def execute_removal_moment(self, baby):
baby.is_being_removed = True
sim_info = baby.sim_info
if self.notification is not None:
dialog = self.notification(sim_info, SingleSimResolver(sim_info))
dialog.show_dialog()
if self.vfx is not None:
vfx = self.vfx(baby)
vfx.start()
camera.focus_on_sim(baby, follow=False)
sim_info_manager = services.sim_info_manager()
if self.buff is not None:
with genealogy_caching():
for member_id in sim_info.genealogy.get_immediate_family_sim_ids_gen():
member_info = sim_info_manager.get(member_id)
if member_info.lod != SimInfoLODLevel.MINIMUM:
member_info.add_buff_from_op(self.buff.buff_type, self.buff.buff_reason)
baby.cancel_interactions_running_on_object(FinishingType.TARGET_DELETED, cancel_reason_msg='Baby is being removed.')
empty_bassinet = replace_bassinet(sim_info, safe_destroy=True)
if self.empty_state is not None:
empty_bassinet.set_state(self.empty_state.state, self.empty_state)
client = sim_info.client
if client is not None:
client.set_next_sim_or_none(only_if_this_active_sim_info=sim_info)
client.selectable_sims.remove_selectable_sim_info(sim_info)
sim_info.inject_into_inactive_zone(DeathTracker.DEATH_ZONE_ID, start_away_actions=False, skip_instanced_check=True, skip_daycare=True)
sim_info.household.remove_sim_info(sim_info, destroy_if_empty_household=True)
sim_info.transfer_to_hidden_household()
sim_info.request_lod(SimInfoLODLevel.MINIMUM)
class Baby(GameObject, HasSimInfoBasicMixin):
MAX_PLACEMENT_ATTEMPTS = 8
BASSINET_EMPTY_STATE = TunableStateValueReference(description='\n The state value for an empty bassinet.\n ')
BASSINET_BABY_STATE = TunableStateValueReference(description='\n The state value for a non-empty bassinet.\n ')
BASSINET_BABY_TRAIT_STATES = TunableMapping(description="\n Specify any object states that are determined by the baby's traits. For\n example, tune a state with a geometry state override to handle Alien\n babies having their own geometry state.\n ", key_type=TunableReference(description='\n A trait that would cause babies to have a specific state..\n ', manager=services.trait_manager(), pack_safe=True), value_type=TunableStateValueReference(description='\n The state associated with this trait.\n ', pack_safe=True))
STATUS_STATE = ObjectState.TunableReference(description='\n The state defining the overall status of the baby (e.g. happy, crying,\n sleeping). We use this because we need to reapply this state to restart\n animations after a load.\n ')
BABY_SKIN_TONE_STATE_MAPPING = TunableMapping(description='\n From baby skin tone enum to skin tone state mapping.\n ', key_type=TunableEnumEntry(tunable_type=BabySkinTone, default=BabySkinTone.MEDIUM), value_type=TunableTuple(boy=TunableStateValueReference(), girl=TunableStateValueReference()))
BABY_MOOD_MAPPING = TunableMapping(description='\n From baby state (happy, crying, sleep) to in game mood.\n ', key_type=TunableStateValueReference(), value_type=Mood.TunableReference())
BABY_AGE_UP = TunableTuple(description='\n Multiple settings for baby age up moment.\n ', age_up_affordance=TunableReference(description='\n The affordance to run when baby age up to kid.\n ', manager=services.affordance_manager(), class_restrictions='SuperInteraction'), copy_states=TunableList(description='\n The list of the state we want to copy from the original baby\n bassinet to the new bassinet to play idle.\n ', tunable=TunableReference(manager=services.object_state_manager(), class_restrictions='ObjectState')), idle_state_value=TunableReference(description='\n The state value to apply on the new baby bassinet with the age up\n special idle animation/vfx linked.\n ', manager=services.object_state_manager(), class_restrictions='ObjectStateValue'))
BABY_PLACEMENT_TAGS = TunableList(description='\n When trying to place a baby bassinet on the lot, we attempt to place it\n near other objects on the lot. Those objects are determined in priority\n order by this tuned list. It will try to place next to all objects of\n the matching types, before trying to place the baby in the middle of the\n lot, and then finally trying the mailbox. If all FGL placements fail, we\n put the baby into the household inventory.\n ', tunable=TunableEnumEntry(description='\n Attempt to place the baby near objects with this tag set.\n ', tunable_type=Tag, default=Tag.INVALID))
REMOVAL_MOMENT_STATES = TunableMapping(description='\n A mapping of states to removal moments. When the baby is set to\n specified state, then the removal moment will execute and the object\n (and Sim) will be destroyed.\n ', key_type=TunableStateValueReference(description='\n The state that triggers the removal moment.\n ', pack_safe=True), value_type=_BabyRemovalMoment.TunableFactory(description='\n The moment that will execute when the specified state is triggered.\n '))
FAILED_PLACEMENT_NOTIFICATION = UiDialogNotification.TunableFactory(description='\n The notification to show if a baby could not be spawned into the world\n because FGL failed. This is usually due to the player cluttering their\n lot with objects. Token 0 is the baby.\n ')
@classmethod
def get_baby_skin_tone_state(cls, sim_info):
skin_tone_state_value = None
baby_skin_enum = BabyTuning.get_baby_skin_tone_enum(sim_info)
if baby_skin_enum is not None:
if baby_skin_enum in cls.BABY_SKIN_TONE_STATE_MAPPING:
skin_state_tuple = cls.BABY_SKIN_TONE_STATE_MAPPING[baby_skin_enum]
if sim_info.gender == Gender.FEMALE:
skin_tone_state_value = skin_state_tuple.girl
elif sim_info.gender == Gender.MALE:
skin_tone_state_value = skin_state_tuple.boy
return skin_tone_state_value
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._sim_info = None
self.state_component.state_trigger_enabled = False
self.is_being_removed = False
self.replacement_bassinet = None
self._pending_removal_moment = None
def get_delete_op(self, *args, **kwargs):
if self.replacement_bassinet is not None:
return distributor.ops.ObjectReplace(replacement_obj=self.replacement_bassinet)
return super().get_delete_op(*args, **kwargs)
def may_reserve(self, *args, **kwargs):
if self.is_being_removed:
return False
return super().may_reserve(*args, **kwargs)
def set_sim_info(self, sim_info):
self._sim_info = sim_info
if self._sim_info is not None:
self.state_component.state_trigger_enabled = True
self.enable_baby_state()
@property
def sim_info(self):
return self._sim_info
@property
def sim_id(self):
if self._sim_info is not None:
return self._sim_info.sim_id
return self.id
def get_age_up_addordance(self):
return Baby.BABY_AGE_UP.age_up_affordance
def replace_for_age_up(self, interaction=None):
if interaction is not None:
interaction.set_target(None)
new_bassinet = create_object(self.definition)
new_bassinet.location = self.location
self.replacement_bassinet = new_bassinet
new_bassinet.set_sim_info(self.sim_info)
new_bassinet.copy_state_values(self, state_list=Baby.BABY_AGE_UP.copy_states)
idle_state_value = Baby.BABY_AGE_UP.idle_state_value
new_bassinet.set_state(idle_state_value.state, idle_state_value)
if interaction is not None:
interaction.set_target(new_bassinet)
self.destroy(source=self.sim_info, cause='Replacing bassinet for age up.')
return new_bassinet
def place_in_good_location(self, position=None, routing_surface=None):
plex_service = services.get_plex_service()
is_active_zone_a_plex = plex_service.is_active_zone_a_plex()
def try_to_place_bassinet(position, routing_surface=None, **kwargs):
starting_location = placement.create_starting_location(position=position, routing_surface=routing_surface)
fgl_context = placement.create_fgl_context_for_object(starting_location, self, **kwargs)
(translation, orientation) = placement.find_good_location(fgl_context)
if translation is not None and orientation is not None:
if is_active_zone_a_plex and (routing_surface is None or plex_service.get_plex_zone_at_position(translation, routing_surface.secondary_id) is None):
return False
else:
self.move_to(translation=translation, orientation=orientation)
if routing_surface is not None:
self.move_to(routing_surface=routing_surface)
return True
return False
if position is not None and try_to_place_bassinet(position, routing_surface=routing_surface):
return True
lot = services.active_lot()
for tag in Baby.BABY_PLACEMENT_TAGS:
for (attempt, obj) in enumerate(services.object_manager().get_objects_with_tag_gen(tag)):
position = obj.position
routing_surface = obj.routing_surface
if lot.is_position_on_lot(position) and try_to_place_bassinet(position, routing_surface=routing_surface, max_distance=10):
return
if attempt >= Baby.MAX_PLACEMENT_ATTEMPTS:
break
position = lot.get_default_position()
if not try_to_place_bassinet(position):
self.update_ownership(self.sim_info, make_sim_owner=False)
if not build_buy.move_object_to_household_inventory(self):
logger.error('Failed to place bassinet in household inventory.', owner='rmccord')
if self.is_selectable:
failed_placement_notification = Baby.FAILED_PLACEMENT_NOTIFICATION(self.sim_info, SingleSimResolver(self.sim_info))
failed_placement_notification.show_dialog()
def populate_localization_token(self, *args, **kwargs):
if self.sim_info is not None:
return self.sim_info.populate_localization_token(*args, **kwargs)
logger.warn('self.sim_info is None in baby.populate_localization_token', owner='epanero', trigger_breakpoint=True)
return super().populate_localization_token(*args, **kwargs)
def enable_baby_state(self):
if self._sim_info is None:
return
self.set_state(self.BASSINET_BABY_STATE.state, self.BASSINET_BABY_STATE)
status_state = self.get_state(self.STATUS_STATE)
self.set_state(status_state.state, status_state, force_update=True)
skin_tone_state = self.get_baby_skin_tone_state(self._sim_info)
if skin_tone_state is not None:
self.set_state(skin_tone_state.state, skin_tone_state)
for (trait, trait_state) in self.BASSINET_BABY_TRAIT_STATES.items():
if self._sim_info.has_trait(trait):
self.set_state(trait_state.state, trait_state)
def empty_baby_state(self):
self.set_state(self.BASSINET_EMPTY_STATE.state, self.BASSINET_EMPTY_STATE)
def on_state_changed(self, state, old_value, new_value, from_init):
super().on_state_changed(state, old_value, new_value, from_init)
removal_moment = self.REMOVAL_MOMENT_STATES.get(new_value)
if removal_moment is not None:
if self._sim_info is not None:
removal_moment.execute_removal_moment(self)
else:
self._pending_removal_moment = removal_moment
return
if self.manager is not None and new_value in self.BABY_MOOD_MAPPING:
mood = self.BABY_MOOD_MAPPING[new_value]
mood_msg = Commodities_pb2.MoodUpdate()
mood_msg.sim_id = self.id
mood_msg.mood_key = mood.guid64
mood_msg.mood_intensity = 1
distributor.shared_messages.add_object_message(self, MSG_SIM_MOOD_UPDATE, mood_msg, False)
def load_object(self, object_data, **kwargs):
self._sim_info = services.sim_info_manager().get(self.sim_id)
super().load_object(object_data, **kwargs)
if self._sim_info is not None:
services.daycare_service().refresh_daycare_status(self._sim_info)
def _validate_location(self):
plex_service = services.get_plex_service()
if not plex_service.is_active_zone_a_plex():
return
if plex_service.get_plex_zone_at_position(self.position, self.level) is not None:
return
self.place_in_good_location()
def on_finalize_load(self):
sim_info = services.sim_info_manager().get(self.sim_id)
if sim_info is None or sim_info.household is not services.active_lot().get_household():
replace_bassinet(sim_info, bassinet=self)
else:
self.set_sim_info(sim_info)
self._validate_location()
if self._pending_removal_moment is not None:
self._pending_removal_moment.execute_removal_moment(self)
self._pending_removal_moment = None
super().on_finalize_load()
| 68.148148 | 1,278 | 0.703684 |
65a315eb450e39f249ce470dd6f5266e50bdf78e | 755 | py | Python | profiles_api/migrations/0005_auto_20220108_2217.py | MohamedAboubasha/profiles-rest-api | 0e5ede6b4771396c9f3399d05be06c19b9ac1530 | [
"MIT"
] | null | null | null | profiles_api/migrations/0005_auto_20220108_2217.py | MohamedAboubasha/profiles-rest-api | 0e5ede6b4771396c9f3399d05be06c19b9ac1530 | [
"MIT"
] | null | null | null | profiles_api/migrations/0005_auto_20220108_2217.py | MohamedAboubasha/profiles-rest-api | 0e5ede6b4771396c9f3399d05be06c19b9ac1530 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2022-01-08 22:17
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0004_auto_20220108_2213'),
]
operations = [
migrations.RemoveField(
model_name='image',
name='images_path',
),
migrations.AddField(
model_name='image',
name='image_path',
field=models.CharField(default=django.utils.timezone.now, max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='video',
name='video_path',
field=models.CharField(max_length=255),
),
]
| 25.166667 | 86 | 0.589404 |
bba02f26cd6b14c4c938d7cdb4895e086b36d398 | 967 | py | Python | virtual/lib/python3.6/site-packages/pip/_internal/distributions/__init__.py | Ruterana/clone_instagram | a068587ef1d1a93ec8d1c08086bf11c0fb274b83 | [
"MIT"
] | 102 | 2016-10-09T01:33:00.000Z | 2022-01-28T01:03:23.000Z | virtual/lib/python3.6/site-packages/pip/_internal/distributions/__init__.py | Ruterana/clone_instagram | a068587ef1d1a93ec8d1c08086bf11c0fb274b83 | [
"MIT"
] | 88 | 2020-03-09T01:20:59.000Z | 2021-11-15T17:49:34.000Z | virtual/lib/python3.6/site-packages/pip/_internal/distributions/__init__.py | Krasivaya/Tracks | c18d1c9222dff39e4678d44495a8a7d9434339ff | [
"MIT"
] | 50 | 2017-05-10T06:25:36.000Z | 2021-08-02T20:28:54.000Z | from pip._internal.distributions.source.legacy import SourceDistribution
from pip._internal.distributions.wheel import WheelDistribution
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from pip._internal.distributions.base import AbstractDistribution
from pip._internal.req.req_install import InstallRequirement
def make_distribution_for_install_requirement(install_req):
# type: (InstallRequirement) -> AbstractDistribution
"""Returns a Distribution for the given InstallRequirement
"""
# Editable requirements will always be source distributions. They use the
# legacy logic until we create a modern standard for them.
if install_req.editable:
return SourceDistribution(install_req)
# If it's a wheel, it's a WheelDistribution
if install_req.is_wheel:
return WheelDistribution(install_req)
# Otherwise, a SourceDistribution
return SourceDistribution(install_req)
| 38.68 | 77 | 0.791107 |
a776e5ba58684c4c1e1d45b6eebff4b09e5e4cb0 | 7,157 | py | Python | config_builder.py | joshuabaird/docker-dd-agent | 7a9bcdf8589091d569c2eadb7722ccab2b8b4271 | [
"MIT"
] | null | null | null | config_builder.py | joshuabaird/docker-dd-agent | 7a9bcdf8589091d569c2eadb7722ccab2b8b4271 | [
"MIT"
] | null | null | null | config_builder.py | joshuabaird/docker-dd-agent | 7a9bcdf8589091d569c2eadb7722ccab2b8b4271 | [
"MIT"
] | null | null | null | #!/opt/datadog-agent/embedded/bin/python
'''
This script is used to generate the configuration of the datadog agent.
'''
from os import getenv, environ
import logging
from urllib2 import urlopen, URLError, HTTPError
from socket import getdefaulttimeout, setdefaulttimeout
from ConfigParser import ConfigParser
class ConfBuilder(object):
'''
This class manages the configuration files
'''
def __init__(self):
# excludes from the generic variables parsing the ones that have a
# certain logic warpped around them
self.exclude_from_generic = [
'DD_API_KEY', 'DD_API_KEY_FILE', 'DD_HOME',
'DD_START_AGENT', 'DD_LOGS_STDOUT'
]
dd_agent_root = '/etc/dd-agent'
dd_home = getenv('DD_HOME')
if dd_home is not None:
dd_agent_root = '{}/agent'.format(dd_home)
self.datadog_conf_file = '{}/datadog.conf'.format(dd_agent_root)
# This will store the config parser object that is used in the different functions
self.config = None
def load_config(self, config_file):
'''
Loads a config file using ConfigParser
'''
self.config = ConfigParser()
# import existing config from file
with open(config_file, 'rb') as cfd:
self.config.readfp(cfd)
def save_config(self, config_file):
'''
Saves a ConfigParser object (self.config) to the given file
'''
if self.config is None:
logging.error('config object needs to be created before saving anything')
exit(1)
with open(config_file, 'wb') as cfd:
self.config.write(cfd)
def build_datadog_conf(self):
'''
Builds the datadog.conf based on the environment variables
'''
self.load_config(self.datadog_conf_file)
##### Core config #####
self.set_api_key()
self.set_from_env_mapping('DD_HOSTNAME', 'hostname')
self.set_from_env_mapping('EC2_TAGS', 'collect_ec2_tags')
# The TAGS env variable superseeds DD_TAGS
self.set_from_env_mapping('DD_TAGS', 'tags')
self.set_from_env_mapping('TAGS', 'tags')
self.set_from_env_mapping('DD_COLLECT_LABELS_AS_TAGS', 'docker_labels_as_tags')
# The LOG_LEVEL env variable superseeds DD_LOG_LEVEL
self.set_from_env_mapping('DD_LOG_LEVEL', 'log_level')
self.set_from_env_mapping('LOG_LEVEL', 'log_level')
self.set_from_env_mapping('NON_LOCAL_TRAFFIC', 'non_local_traffic')
self.set_from_env_mapping('DD_URL', 'dd_url')
self.set_from_env_mapping('STATSD_METRIC_NAMESPACE', 'statsd_metric_namespace')
self.set_from_env_mapping('USE_DOGSTATSD', 'use_dogstatsd')
self.set_from_env_mapping('DD_HISTOGRAM_PERCENTILES', 'histogram_percentiles')
##### Proxy config #####
self.set_from_env_mapping('PROXY_HOST', 'proxy_host')
self.set_from_env_mapping('PROXY_PORT', 'proxy_port')
self.set_from_env_mapping('PROXY_USER', 'proxy_user')
self.set_from_env_mapping('PROXY_PASSWORD', 'proxy_password')
##### Service discovery #####
self.set_from_env_mapping('SD_BACKEND', 'service_discovery_backend')
self.set_sd_backend_host()
self.set_from_env_mapping('SD_BACKEND_PORT', 'sd_backend_port')
self.set_from_env_mapping('SD_TEMPLATE_DIR', 'sd_template_dir')
self.set_from_env_mapping('SD_CONSUL_TOKEN', 'consul_token')
self.set_from_env_mapping('SD_BACKEND_USER', 'sd_backend_username')
self.set_from_env_mapping('SD_BACKEND_PASSWORD', 'sd_backend_password')
# Magic trick to automatically add properties not yet define in the doc
self.set_generics('DD_CONF_')
self.save_config(self.datadog_conf_file)
def set_api_key(self):
'''
Used for building datadog.conf
Gets the API key from the environment or the key file
and sets it in the configuration
'''
api_key = getenv('DD_API_KEY', getenv('API_KEY', ''))
keyfile = getenv('DD_API_KEY_FILE')
if keyfile is not None:
try:
with open(keyfile, 'r') as kfile:
api_key = kfile.read()
except IOError:
logging.warning('Unable to read the content of they key file specified in DD_API_KEY_FILE')
if len(api_key) <= 0:
logging.error('You must set API_KEY environment variable or include a DD_API_KEY_FILE to run the Datadog Agent container')
exit(1)
self.set_property('api_key', api_key)
def set_from_env_mapping(self, env_var_name, property_name, section='Main', action=None):
'''
Sets a property using the corresponding environment variable if it exists
It also returns the value in case you want to play with it
If action is specified to 'store_true', whatever the content of the
env variable is (if exists), the value of the property will be true
'''
_val = getenv(env_var_name)
if _val is not None:
if action == 'store_true':
_val = 'true'
self.set_property(property_name, _val, section)
return _val
return None
def set_sd_backend_host(self):
'''
Used for building datadog.conf
Sets sd_config_backend and sd_backend_host depending on the environment
'''
_config_backend = getenv('SD_CONFIG_BACKEND')
if _config_backend is not None:
self.set_property('sd_config_backend', _config_backend)
_backend_host = getenv('SD_BACKEND_HOST')
if _backend_host is not None:
self.set_property('sd_backend_host', _backend_host)
else:
_timeout = getdefaulttimeout()
try:
setdefaulttimeout(1)
_ec2_ip = urlopen('http://169.254.169.254/latest/meta-data/local-ipv4')
self.set_property('sd_backend_host', _ec2_ip.read())
except (URLError, HTTPError):
pass # silent fail on purpose
setdefaulttimeout(_timeout)
def set_generics(self, prefix='DD_CONF_'):
'''
Looks for environment variables starting by the given prefix and consider that the
rest of the variable name is the name of the property to set
'''
for dd_var in environ:
if dd_var.startswith(prefix) and dd_var.upper() not in self.exclude_from_generic:
if len(dd_var) > 0:
self.set_property(dd_var[len(prefix):].lower(), environ[dd_var])
def set_property(self, property_name, property_value, section='Main'):
'''
Sets the given property to the given value in the configuration
'''
if self.config is None:
logging.error('config object needs to be created before setting properties')
exit(1)
self.config.set(section, property_name, property_value)
if __name__ == '__main__':
cfg = ConfBuilder()
cfg.build_datadog_conf()
| 42.60119 | 134 | 0.644125 |
88e8ca636236f2395a4cf3d896876395e0ac6ca5 | 9,607 | py | Python | galois/_fields/_linalg.py | iyanmv/galois | a5e6386a684e3e0b47af608217002795dc25c702 | [
"MIT"
] | 65 | 2021-02-20T04:07:59.000Z | 2022-03-13T10:14:58.000Z | galois/_fields/_linalg.py | iyanmv/galois | a5e6386a684e3e0b47af608217002795dc25c702 | [
"MIT"
] | 303 | 2021-02-22T19:36:25.000Z | 2022-03-31T14:48:15.000Z | galois/_fields/_linalg.py | iyanmv/galois | a5e6386a684e3e0b47af608217002795dc25c702 | [
"MIT"
] | 9 | 2021-03-11T07:40:51.000Z | 2022-03-06T20:13:17.000Z | """
A module that contains linear algebra routines over Galois fields.
"""
import numpy as np
from ._dtypes import DTYPES
def _lapack_linalg(a, b, function, out=None, n_sum=None):
"""
In prime fields GF(p), it's much more efficient to use LAPACK/BLAS implementations of linear algebra
and then reduce modulo p rather than compute manually.
"""
assert type(a).is_prime_field
field = type(a)
characteristic = field.characteristic
# Determine the return data-type which is the minimum of the two inputs' data-types
if np.object_ in [a.dtype, b.dtype]:
return_dtype = np.object_
else:
return_dtype = a.dtype if np.iinfo(a.dtype).max < np.iinfo(b.dtype).max else b.dtype
a = a.view(np.ndarray)
b = b.view(np.ndarray)
# Determine the minimum dtype to hold the entire product and summation without overflowing
if n_sum is None:
n_sum = 1 if len(a.shape) == 0 else max(a.shape)
max_value = n_sum * (characteristic - 1)**2
dtypes = [dtype for dtype in DTYPES if np.iinfo(dtype).max >= max_value]
dtype = np.object_ if len(dtypes) == 0 else dtypes[0]
a = a.astype(dtype)
b = b.astype(dtype)
# Compute result using native numpy LAPACK/BLAS implementation
if function in [np.inner, np.vdot]:
# These functions don't have and `out` keyword argument
c = function(a, b)
else:
c = function(a, b, out=out)
c = c % characteristic # Reduce the result mod p
if np.isscalar(c):
# TODO: Sometimes the scalar c is a float?
c = field(int(c), dtype=return_dtype)
else:
c = c.astype(return_dtype).view(field)
return c
###############################################################################
# Matrix/vector products
###############################################################################
def dot(a, b, out=None): # pylint: disable=unused-argument
"""
https://numpy.org/doc/stable/reference/generated/numpy.dot.html
"""
if not type(a) is type(b):
raise TypeError(f"Operation 'dot' requires both arrays be in the same Galois field, not {type(a)} and {type(b)}.")
if type(a).is_prime_field:
return _lapack_linalg(a, b, np.dot, out=out)
if a.ndim == 0 or b.ndim == 0:
return a * b
elif a.ndim == 1 and b.ndim == 1:
return np.sum(a * b)
elif a.ndim == 2 and b.ndim == 2:
return np.matmul(a, b, out=out)
elif a.ndim >= 2 and b.ndim == 1:
return np.sum(a * b, axis=-1, out=out)
# elif a.dnim >= 2 and b.ndim >= 2:
else:
raise NotImplementedError("Currently 'dot' is only supported up to 2-D matrices. Please open a GitHub issue at https://github.com/mhostetter/galois/issues.")
def vdot(a, b):
"""
https://numpy.org/doc/stable/reference/generated/numpy.vdot.html
"""
if not type(a) is type(b):
raise TypeError(f"Operation 'vdot' requires both arrays be in the same Galois field, not {type(a)} and {type(b)}.")
if type(a).is_prime_field:
return _lapack_linalg(a, b, np.vdot)
a = a.flatten()
b = b.flatten().reshape(a.shape) # This is done to mimic numpy's error scenarios
return np.sum(a * b)
def inner(a, b):
"""
https://numpy.org/doc/stable/reference/generated/numpy.inner.html#numpy.inner
"""
if not type(a) is type(b):
raise TypeError(f"Operation 'inner' requires both arrays be in the same Galois field, not {type(a)} and {type(b)}.")
if type(a).is_prime_field:
return _lapack_linalg(a, b, np.inner)
if a.ndim == 0 or b.ndim == 0:
return a * b
if not a.shape[-1] == b.shape[-1]:
raise ValueError(f"Operation 'inner' requires `a` and `b` to have the same last dimension, not {a.shape} and {b.shape}.")
return np.sum(a * b, axis=-1)
def outer(a, b, out=None): # pylint: disable=unused-argument
"""
https://numpy.org/doc/stable/reference/generated/numpy.outer.html#numpy.outer
"""
if not type(a) is type(b):
raise TypeError(f"Operation 'outer' requires both arrays be in the same Galois field, not {type(a)} and {type(b)}.")
if type(a).is_prime_field:
return _lapack_linalg(a, b, np.outer, out=out, n_sum=1)
else:
return np.multiply.outer(a.ravel(), b.ravel(), out=out)
###############################################################################
# Matrix decomposition routines
###############################################################################
def row_reduce(A, ncols=None):
if not A.ndim == 2:
raise ValueError(f"Only 2-D matrices can be converted to reduced row echelon form, not {A.ndim}-D.")
ncols = A.shape[1] if ncols is None else ncols
A_rre = A.copy()
p = 0 # The pivot
for j in range(ncols):
# Find a pivot in column `j` at or below row `p`
idxs = np.nonzero(A_rre[p:,j])[0]
if idxs.size == 0:
continue
i = p + idxs[0] # Row with a pivot
# Swap row `p` and `i`. The pivot is now located at row `p`.
A_rre[[p,i],:] = A_rre[[i,p],:]
# Force pivot value to be 1
A_rre[p,:] /= A_rre[p,j]
# Force zeros above and below the pivot
idxs = np.nonzero(A_rre[:,j])[0].tolist()
idxs.remove(p)
A_rre[idxs,:] -= np.multiply.outer(A_rre[idxs,j], A_rre[p,:])
p += 1
if p == A_rre.shape[0]:
break
return A_rre
def lu_decompose(A):
if not (A.ndim == 2 and A.shape[0] == A.shape[1]):
raise ValueError(f"Argument `A` must be a square matrix, not {A.shape}.")
field = type(A)
n = A.shape[0]
Ai = A.copy()
L = field.Identity(n)
for i in range(0, n-1):
if Ai[i,i] == 0:
raise ValueError("The LU decomposition of `A` does not exist. Use the LUP decomposition instead.")
l = Ai[i+1:,i] / Ai[i,i]
Ai[i+1:,:] -= np.multiply.outer(l, Ai[i,:])
L[i+1:,i] = l
U = Ai
return L, U
def lup_decompose(A):
if not (A.ndim == 2 and A.shape[0] == A.shape[1]):
raise ValueError(f"Argument `A` must be a square matrix, not {A.shape}.")
field = type(A)
n = A.shape[0]
Ai = A.copy()
L = field.Zeros((n,n))
P = field.Identity(n)
for i in range(0, n-1):
if Ai[i,i] == 0:
idxs = np.nonzero(Ai[i:,i])[0] # The first non-zero entry in column `i` below row `i`
if idxs.size == 0:
L[i,i] = 1
continue
j = i + idxs[0]
# Swap rows `i` and `j`
P[[i,j],:] = P[[j,i],:]
Ai[[i,j],:] = Ai[[j,i],:]
L[[i,j],:] = L[[j,i],:]
l = Ai[i+1:,i] / Ai[i,i]
Ai[i+1:,:] -= np.multiply.outer(l, Ai[i,:]) # Zero out rows below row `i`
L[i,i] = 1 # Set 1 on the diagonal
L[i+1:,i] = l
L[-1,-1] = 1 # Set the final diagonal to 1
U = Ai
return L, U, P
###############################################################################
# Matrix inversions, solutions, rank, etc
###############################################################################
def matrix_rank(A):
A_rre = row_reduce(A)
return np.sum(~np.all(A_rre == 0, axis=1))
def inv(A):
if not (A.ndim == 2 and A.shape[0] == A.shape[1]):
raise np.linalg.LinAlgError(f"Argument `A` must be square, not {A.shape}.")
field = type(A)
n = A.shape[0]
I = field.Identity(n, dtype=A.dtype)
# Concatenate A and I to get the matrix AI = [A | I]
AI = np.concatenate((A, I), axis=-1)
# Perform Gaussian elimination to get the reduced row echelon form AI_rre = [I | A^-1]
AI_rre = row_reduce(AI, ncols=n)
# The rank is the number of non-zero rows of the row reduced echelon form
rank = np.sum(~np.all(AI_rre[:,0:n] == 0, axis=1))
if not rank == n:
raise np.linalg.LinAlgError(f"Argument `A` is singular and not invertible because it does not have full rank of {n}, but rank of {rank}.")
A_inv = AI_rre[:,-n:]
return A_inv
def triangular_det(A):
if not (A.ndim == 2 and A.shape[0] == A.shape[1]):
raise np.linalg.LinAlgError(f"Argument `A` must be square, not {A.shape}.")
idxs = np.arange(0, A.shape[0])
return np.multiply.reduce(A[idxs,idxs])
def det(A):
if not (A.ndim == 2 and A.shape[0] == A.shape[1]):
raise np.linalg.LinAlgError(f"Argument `A` must be square, not {A.shape}.")
n = A.shape[0]
if n == 2:
return A[0,0]*A[1,1] - A[0,1]*A[1,0]
elif n == 3:
return A[0,0]*(A[1,1]*A[2,2] - A[1,2]*A[2,1]) - A[0,1]*(A[1,0]*A[2,2] - A[1,2]*A[2,0]) + A[0,2]*(A[1,0]*A[2,1] - A[1,1]*A[2,0])
else:
L, U, P = lup_decompose(A)
idxs = np.arange(0, n)
nrows = n - np.count_nonzero(P[idxs,idxs]) # The number of moved rows
S = max(nrows - 1, 0) # The number of row swaps
return (-1)**S * triangular_det(L) * triangular_det(U)
def solve(A, b):
if not type(A) is type(b):
raise TypeError(f"Arguments `A` and `b` must be of the same Galois field array class, not {type(A)} and {type(b)}.")
if not (A.ndim == 2 and A.shape[0] == A.shape[1]):
raise np.linalg.LinAlgError(f"Argument `A` must be square, not {A.shape}.")
if not b.ndim in [1, 2]:
raise ValueError(f"Argument `b` must have dimension equal to A or one less, not {b.ndim}.")
if not A.shape[-1] == b.shape[0]:
raise ValueError(f"The last dimension of `A` must equal the first dimension of `b`, not {A.shape} and {b.shape}.")
A_inv = inv(A)
x = A_inv @ b
return x
| 33.357639 | 165 | 0.554387 |
b9dce65fd2034c0985878a1a51c636acc2fa0dec | 12,201 | py | Python | jbossply/jbossparser.py | EnigmaBridge/jbossply | 44b30b15982cae781f0c356fab7263751b20b4d0 | [
"MIT"
] | 3 | 2017-02-23T15:31:25.000Z | 2018-07-05T13:41:57.000Z | jbossply/jbossparser.py | EnigmaBridge/jbossply | 44b30b15982cae781f0c356fab7263751b20b4d0 | [
"MIT"
] | 1 | 2017-07-13T09:14:23.000Z | 2017-07-13T09:14:23.000Z | jbossply/jbossparser.py | EnigmaBridge/jbossply | 44b30b15982cae781f0c356fab7263751b20b4d0 | [
"MIT"
] | 7 | 2017-03-07T07:16:27.000Z | 2021-01-21T03:00:58.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 DeWitt Clinton All Rights Reserved.
# Copyright 2009 DeWitt Clinton All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'ph4r05@gmail.com'
import ply
import ply.lex
import ply.yacc
# The list of tokens to be extracted by the JbossLexer and parsed by
# the JbossParser. These tokens form the contract between the
# JbossLexer and the JbossParser and any changes here will need to
# be synchronized among those classes.
JBOSS_TOKENS = [
# Initial state tokens
'BEGIN_ARRAY',
'BEGIN_OBJECT',
'END_ARRAY',
'END_OBJECT',
'NAME_SEPARATOR',
'VALUE_SEPARATOR',
'QUOTATION_MARK',
'FALSE',
'TRUE',
'NULL',
'UNDEFINED',
'DECIMAL_POINT',
'DIGITS',
'E',
'MINUS',
'PLUS',
'ZERO',
# String state tokens
'UNESCAPED',
'ESCAPE',
# Escaped state tokens
'REVERSE_SOLIDUS',
'SOLIDUS',
'BACKSPACE_CHAR',
'FORM_FEED_CHAR',
'LINE_FEED_CHAR',
'CARRIAGE_RETURN_CHAR',
'TAB_CHAR',
'UNICODE_HEX'
]
# noinspection PyPep8Naming,PyMethodMayBeStatic,PySingleQuotedDocstring
class JbossLexer(object):
"""A class-based wrapper around the ply.lex instance.
The JsonLexer tokenizes an input string and produces LexToken instances
corresponding to the JSON_TOKENS values.
"""
def __init__(self, **kwargs):
"""Constructs the JsonLexer based on the tokenization rules herein.
Successful construction builds the ply.lex instance and sets
self.lexer.
"""
self.lexer = ply.lex.lex(module=self, **kwargs)
# The JsonLexer uses the JSON_TOKENS values as a contact between
# the lexer and the parser.
tokens = JBOSS_TOKENS
# The JsonLexer has three exclusive states:
#
# default:
# The default context, tokenizing objects, arrays, numbers, etc.
# string:
# Within quote-delimited strings.
# escaped:
# A single-use state that treats the next character literally.
states = (
('string', 'exclusive'),
('escaped', 'exclusive')
)
def t_ANY_error(self, t):
last_cr = self.lexer.lexdata.rfind('\n', 0, t.lexpos)
if last_cr < 0:
last_cr = 0
column = (t.lexpos - last_cr) + 1
print "Illegal character '%s' at line %d pos %d" % (
t.value[0], t.lineno, column)
t.lexer.skip(1)
# Skips over '\s', '\t', '\n', and '\r' characters in the default state
t_ignore = '\x20\x09\x0A\x0D'
# Default state tokens
t_BEGIN_ARRAY = r'\x5B' # '['
t_BEGIN_OBJECT = r'\x7B' # '{'
t_END_ARRAY = r'\x5D' # ']'
t_END_OBJECT = r'\x7D' # '}'
t_NAME_SEPARATOR = r'=>' # '=>'
t_VALUE_SEPARATOR = r'\x2C' # ','
t_FALSE = r'\x66\x61\x6c\x73\x65' # 'false'
t_TRUE = r'\x74\x72\x75\x65' # 'true'
t_NULL = r'\x6e\x75\x6c\x6c' # 'null'
t_UNDEFINED = r'undefined' # 'null'
t_DECIMAL_POINT = r'\x2E' # '.'
t_DIGITS = r'[\x30-\x39]+' # '0'..'9'
t_E = r'[\x45\x65]' # 'e' or 'E'
t_MINUS = r'\x2D' # '-'
t_PLUS = r'\x2B' # '+'
t_ZERO = r'\x30' # '0'
# Enters the string state on an opening quotation mark
def t_QUOTATION_MARK(self, t):
r'\x22' # '"'
t.lexer.push_state('string')
return t
# Don't skip over any tokens inside the string state
t_string_ignore = ''
# TODO(dewitt): Verify that this matches the correct range, the spec
# says '%x5D-10FFFF' but most pythons by default will not handle that
def t_string_UNESCAPED(self, t):
r'[\x20-\x21,\x23-\x5B,\x5D-\xFF]+'
t.value = t.value if isinstance(t.value, unicode) else unicode(t.value, encoding='utf8')
return t
# Exits the string state on an unescaped closing quotation mark
def t_string_QUOTATION_MARK(self, t):
r'\x22' # '"'
t.lexer.pop_state()
return t
# Enter the escaped state on a '\' character
def t_string_ESCAPE(self, t):
r'\x5C' # '\'
t.lexer.push_state('escaped')
return t
# Don't skip over any tokens inside the escaped state
t_escaped_ignore = ''
def t_escaped_QUOTATION_MARK(self, t):
r'\x22' # '"'
t.lexer.pop_state()
return t
def t_escaped_REVERSE_SOLIDUS(self, t):
r'\x5C' # '\'
t.lexer.pop_state()
return t
def t_escaped_SOLIDUS(self, t):
r'\x2F' # '/'
t.lexer.pop_state()
return t
def t_escaped_BACKSPACE_CHAR(self, t):
r'\x62' # 'b'
t.lexer.pop_state()
t.value = unichr(0x0008)
return t
def t_escaped_FORM_FEED_CHAR(self, t):
r'\x66' # 'f'
t.lexer.pop_state()
t.value = unichr(0x000c)
return t
def t_escaped_CARRIAGE_RETURN_CHAR(self, t):
r'\x72' # 'r'
t.lexer.pop_state()
t.value = unichr(0x000d)
return t
def t_escaped_LINE_FEED_CHAR(self, t):
r'\x6E' # 'n'
t.lexer.pop_state()
t.value = unichr(0x000a)
return t
def t_escaped_TAB_CHAR(self, t):
r'\x74' # 't'
t.lexer.pop_state()
t.value = unichr(0x0009)
return t
def t_escaped_UNICODE_HEX(self, t):
r'\x75[\x30-\x39,\x41-\x46,\x61-\x66]{4}' # 'uXXXX'
t.lexer.pop_state()
return t
def tokenize(self, data, *args, **kwargs):
"""Invoke the lexer on an input string an return the list of tokens.
This is relatively inefficient and should only be used for
testing/debugging as it slurps up all tokens into one list.
Args:
data: The input to be tokenized.
Returns:
A list of LexTokens
"""
self.lexer.input(data)
tokens = list()
while True:
token = self.lexer.token()
if not token:
break
tokens.append(token)
return tokens
class JbossParser(object):
"""A class-based wrapper around the ply.yacc instance.
The JsonParser takes the tokenized output from the JsonLexer and
parses it accoring to the JSON grammar rules. The output is a
python data structure that represents the input data.
"""
def __init__(self, lexer=None, **kwargs):
"""Constructs the JsonParser based on the grammar contained herein.
Successful construction builds the ply.yacc instance and sets
self.parser.
Args:
lexer: A ply.lex or JsonLexer instance that will produce JSON_TOKENS.
"""
if lexer is not None:
if isinstance(lexer, JbossLexer):
self.lexer = lexer.lexer
else:
# Assume that the lexer is a ply.lex instance or similar
self.lexer = lexer
else:
self.lexer = JbossLexer().lexer
kwargs.setdefault('debug', False)
kwargs.setdefault('write_tables', False)
self.parser = ply.yacc.yacc(module=self, **kwargs)
# The JsonParser uses the JSON_TOKENS values as a contact between
# the lexer and the parser.
tokens = JBOSS_TOKENS
# Define the parser
def p_text(self, p):
"""text : object
| array"""
p[0] = p[1]
def p_value(self, p):
"""value : object
| array
| number
| string"""
p[0] = p[1]
def p_value_false(self, p):
"""value : FALSE"""
p[0] = False
def p_value_true(self, p):
"""value : TRUE"""
p[0] = True
def p_value_null(self, p):
"""value : NULL"""
p[0] = None
def p_value_undefined(self, p):
"""value : UNDEFINED"""
p[0] = None
def p_object(self, p):
"""object : BEGIN_OBJECT members END_OBJECT"""
p[0] = dict(p[2])
def p_members(self, p):
"""members :
| members member VALUE_SEPARATOR
| members member"""
if len(p) == 1:
p[0] = list()
else:
p[1].append(p[2])
p[0] = p[1]
def p_member(self, p):
"""member : string NAME_SEPARATOR value"""
p[0] = (p[1], p[3])
def p_values(self, p):
"""values :
| values value VALUE_SEPARATOR
| values value"""
if len(p) == 1:
p[0] = list()
else:
p[1].append(p[2])
p[0] = p[1]
def p_array(self, p):
"""array : BEGIN_ARRAY values END_ARRAY"""
p[0] = p[2]
def p_number_positive(self, p):
"""number : integer
| float"""
p[0] = p[1]
def p_number_negative(self, p):
"""number : MINUS integer
| MINUS float"""
p[0] = -p[2]
def p_integer(self, p):
"""integer : int"""
p[0] = p[1]
def p_integer_exp(self, p):
"""integer : int exp"""
p[0] = p[1] * (10 ** p[2])
def p_number_float(self, p):
"""float : int frac"""
p[0] = p[1] + p[2]
def p_number_float_exp(self, p):
"""float : int frac exp"""
p[0] = (p[1] + p[2]) * (10 ** p[3])
def p_exp_negative(self, p):
"""exp : E MINUS DIGITS"""
p[0] = -int(p[3])
def p_exp(self, p):
"""exp : E DIGITS"""
p[0] = int(p[2])
def p_exp_positive(self, p):
"""exp : E PLUS DIGITS"""
p[0] = int(p[3])
def p_frac(self, p):
"""frac : DECIMAL_POINT DIGITS"""
p[0] = float('.' + p[2])
def p_int_zero(self, p):
"""int : ZERO"""
p[0] = int(0)
def p_int_non_zero(self, p):
"""int : DIGITS"""
if p[1].startswith('0'):
raise SyntaxError('Leading zeroes are not allowed.')
p[0] = int(p[1])
def p_string(self, p):
"""string : QUOTATION_MARK chars QUOTATION_MARK"""
p[0] = p[2]
def p_chars(self, p):
"""chars :
| chars char"""
if len(p) == 1:
p[0] = unicode()
else:
p[0] = p[1] + p[2]
def p_char(self, p):
"""char : UNESCAPED
| ESCAPE QUOTATION_MARK
| ESCAPE REVERSE_SOLIDUS
| ESCAPE SOLIDUS
| ESCAPE BACKSPACE_CHAR
| ESCAPE FORM_FEED_CHAR
| ESCAPE LINE_FEED_CHAR
| ESCAPE CARRIAGE_RETURN_CHAR
| ESCAPE TAB_CHAR"""
# Because the subscript [-1] has special meaning for YaccProduction
# slices we use [len(p) - 1] to always take the last value.
p[0] = p[len(p) - 1]
def p_char_unicode_hex(self, p):
"""char : ESCAPE UNICODE_HEX"""
# This looks more complicated than it is. The escaped string is of
# the form \uXXXX and is assigned to p[2]. We take the trailing
# XXXX string via p[2][1:], parse it as a radix 16 (hex) integer,
# and convert that to the corresponding unicode character.
p[0] = unichr(int(p[2][1:], 16))
def p_error(self, p):
print "Syntax error at '%s'" % p
# Invoke the parser
def parse(self, data, lexer=None, *args, **kwargs):
"""Parse the input JSON data string into a python data structure.
Args:
data: An input data string
lexer: An optional ply.lex instance that overrides the default lexer.
Returns:
A python dict or list representing the input JSON data.
"""
if lexer is None:
lexer = self.lexer
return self.parser.parse(data, lexer=lexer, *args, **kwargs)
| 29.05 | 96 | 0.560036 |
57245f3d51d5c79595f06bc7837bdd981650055b | 126 | py | Python | sandbox/lib/pythonbin/Crypto/__init__.py | Jumpscale/sandbox_linux | 2aacd36b467ef30ac83718abfa82c6883b67a02f | [
"Apache-2.0"
] | 1 | 2018-10-30T07:19:27.000Z | 2018-10-30T07:19:27.000Z | sandbox/lib/pythonbin/Crypto/__init__.py | Jumpscale/sandbox_linux | 2aacd36b467ef30ac83718abfa82c6883b67a02f | [
"Apache-2.0"
] | 1 | 2018-04-04T12:13:40.000Z | 2018-05-03T07:57:52.000Z | venv/Lib/site-packages/Crypto/__init__.py | vrian/orsen | 9c10148aba62868fad4b679a4b9b717829586e96 | [
"Apache-2.0"
] | null | null | null | __all__ = ['Cipher', 'Hash', 'Protocol', 'PublicKey', 'Util', 'Signature',
'IO', 'Math']
version_info = (3, 5, 1)
| 25.2 | 74 | 0.539683 |
993c3ff27c79f8b391179c8fc8a8bf7bb0a444f2 | 9,266 | py | Python | src/run.py | molumitu/implementation-matters | c45f9ad76516c27468fe135f268546df38f9829c | [
"MIT"
] | 1 | 2022-02-25T01:34:01.000Z | 2022-02-25T01:34:01.000Z | src/run.py | molumitu/implementation-matters | c45f9ad76516c27468fe135f268546df38f9829c | [
"MIT"
] | null | null | null | src/run.py | molumitu/implementation-matters | c45f9ad76516c27468fe135f268546df38f9829c | [
"MIT"
] | null | null | null |
from src.policy_gradients.agent import Trainer
import git
import numpy as np
import os
import argparse
from src.policy_gradients import models
import sys
import json
import torch
from cox.store import Store, schema_from_dict
# Tee object allows for logging to both stdout and to file
class Tee(object):
def __init__(self, file_path, stream_type, mode='a'):
assert stream_type in ['stdout', 'stderr']
self.file = open(file_path, mode)
self.stream_type = stream_type
self.errors = 'chill'
if stream_type == 'stdout':
self.stream = sys.stdout
sys.stdout = self
else:
self.stream = sys.stderr
sys.stderr = self
def write(self, data):
self.file.write(data)
self.stream.write(data)
def flush(self):
self.file.flush()
self.stream.flush()
def main(params):
for k, v in zip(params.keys(), params.values()):
assert v is not None, f"Value for {k} is None"
# #
# Setup logging
# #
metadata_schema = schema_from_dict(params)
base_directory = params['out_dir']
store = Store(base_directory)
# redirect stderr, stdout to file
"""
def make_err_redirector(stream_name):
tee = Tee(os.path.join(store.path, stream_name + '.txt'), stream_name)
return tee
stderr_tee = make_err_redirector('stderr')
stdout_tee = make_err_redirector('stdout')
"""
# Store the experiment path and the git commit for this experiment
metadata_schema.update({
'store_path':str,
'git_commit':str
})
repo = git.Repo(path=os.path.dirname(os.path.realpath(__file__)),
search_parent_directories=True)
metadata_table = store.add_table('metadata', metadata_schema)
metadata_table.update_row(params)
metadata_table.update_row({
'store_path':store.path,
'git_commit':repo.head.object.hexsha
})
metadata_table.flush_row()
# Table for checkpointing models and envs
if params['save_iters'] > 0:
store.add_table('checkpoints', {
'val_model':store.PYTORCH_STATE,
'policy_model':store.PYTORCH_STATE,
'envs':store.PICKLE,
'policy_opt': store.PYTORCH_STATE,
'val_opt': store.PYTORCH_STATE,
'iteration':int
})
# The trainer object is in charge of sampling trajectories and
# taking PPO/TRPO optimization steps
p = Trainer.agent_from_params(params, store=store)
rewards = []
# Table for final results
final_table = store.add_table('final_results', {
'iteration':int,
'5_rewards':float,
'terminated_early':bool
})
def finalize_table(iteration, terminated_early, rewards):
final_5_rewards = np.array(rewards)[-5:].mean()
final_table.append_row({
'iteration':iteration,
'5_rewards':final_5_rewards,
'terminated_early':terminated_early
})
# Try-except so that we save if the user interrupts the process
try:
for i in range(params['train_steps']):
print('Step %d' % (i,))
if params['save_iters'] > 0 and i % params['save_iters'] == 0:
store['checkpoints'].append_row({
'iteration':i,
'val_model': p.val_model.state_dict(),
'policy_model': p.policy_model.state_dict(),
'policy_opt': p.POLICY_ADAM.state_dict(),
'val_opt': p.val_opt.state_dict(),
'envs':p.envs
})
mean_reward = p.train_step()
rewards.append(mean_reward)
finalize_table(i, False, rewards)
except KeyboardInterrupt:
torch.save(p.val_model, 'saved_experts/%s-expert-vf' % (params['game'],))
torch.save(p.policy_model, 'saved_experts/%s-expert-pol' % (params['game'],))
finalize_table(i, True, rewards)
store.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate experiments to be run.')
# Basic setup
parser.add_argument('--config-path', type=str, required=True,
help='json for this config')
parser.add_argument('--game', type=str, help='gym game')
parser.add_argument('--mode', type=str, choices=['ppo', 'trpo'],
help='pg alg')
parser.add_argument('--out-dir', type=str,
help='out dir for store + logging')
parser.add_argument('--advanced-logging', type=bool, const=True, nargs='?')
parser.add_argument('--kl-approximation-iters', type=int,
help='how often to do kl approx exps')
parser.add_argument('--log-every', type=int)
parser.add_argument('--policy-net-type', type=str,
choices=models.POLICY_NETS.keys())
parser.add_argument('--value-net-type', type=str,
choices=models.VALUE_NETS.keys())
parser.add_argument('--train-steps', type=int,
help='num agent training steps')
parser.add_argument('--cpu', type=bool, const=True, nargs='?')
# Which value loss to use
parser.add_argument('--value-calc', type=str,
help='which value calculation to use')
parser.add_argument('--initialization', type=str)
# General Policy Gradient parameters
parser.add_argument('--num-actors', type=int, help='num actors (serial)',
choices=[1])
parser.add_argument('--t', type=int,
help='num timesteps to run each actor for')
parser.add_argument('--gamma', type=float, help='discount on reward')
parser.add_argument('--lambda', type=float, help='GAE hyperparameter')
parser.add_argument('--val-lr', type=float, help='value fn learning rate')
parser.add_argument('--val-epochs', type=int, help='value fn epochs')
# PPO parameters
parser.add_argument('--adam-eps', type=float, choices=[0, 1e-5], help='adam eps parameter')
parser.add_argument('--num-minibatches',type=int,
help='num minibatches in ppo per epoch')
parser.add_argument('--ppo-epochs', type=int)
parser.add_argument('--ppo-lr', type=float,
help='if nonzero, use gradient descent w this lr')
parser.add_argument('--ppo-lr-adam', type=float,
help='if nonzero, use adam with this lr')
parser.add_argument('--anneal-lr', type=bool,
help='if we should anneal lr linearly from start to finish')
parser.add_argument('--clip-eps', type=float, help='ppo clipping')
parser.add_argument('--entropy-coeff', type=float,
help='entropy weight hyperparam')
parser.add_argument('--value-clipping', type=bool,
help='should clip values (w/ ppo eps)')
parser.add_argument('--value-multiplier', type=float,
help='coeff for value loss in combined step ppo loss')
parser.add_argument('--share-weights', type=bool,
help='share weights in valnet and polnet')
parser.add_argument('--clip-grad-norm', type=float,
help='gradient norm clipping (-1 for no clipping)')
# TRPO parameters
parser.add_argument('--max-kl', type=float, help='trpo max kl hparam')
parser.add_argument('--max-kl-final', type=float, help='trpo max kl final')
parser.add_argument('--fisher-frac-samples', type=float,
help='frac samples to use in fisher vp estimate')
parser.add_argument('--cg-steps', type=int,
help='num cg steps in fisher vp estimate')
parser.add_argument('--damping', type=float, help='damping to use in cg')
parser.add_argument('--max-backtrack', type=int, help='max bt steps in fvp')
# Normalization parameters
parser.add_argument('--norm-rewards', type=str, help='type of rewards normalization',
choices=['rewards', 'returns', 'none'])
parser.add_argument('--norm-states', type=bool, help='should norm states')
parser.add_argument('--clip-rewards', type=float, help='clip rews eps')
parser.add_argument('--clip-observations', type=float, help='clips obs eps')
# Saving
parser.add_argument('--save-iters', type=int, help='how often to save model (0 = no saving)')
# For grid searches only
# parser.add_argument('--cox-experiment-path', type=str, default='')
args = parser.parse_args()
json_params = json.load(open(args.config_path))
# Override the JSON config with the argparse config
params = vars(args)
missing_keys = []
for key in json_params:
if key not in params:
missing_keys.append(key)
assert not missing_keys, "Following keys not in args: " + str(missing_keys)
missing_keys = []
for key in params:
if key not in json_params and key != "config_path":
missing_keys.append(key)
assert not missing_keys, "Following keys not in JSON: " + str(missing_keys)
json_params.update({k: params[k] for k in params if params[k] is not None})
params = json_params
main(params)
| 38.608333 | 97 | 0.616555 |
47baa926366232c167a9daa6a3fa98d8dbebfe29 | 6,071 | py | Python | tests/scipy_ndimage_test.py | joshuagornall/jax | c97cd0a526c12ad81988fd58c1c66df4ddd71813 | [
"ECL-2.0",
"Apache-2.0"
] | 14 | 2021-04-24T03:26:39.000Z | 2022-01-28T14:25:13.000Z | tests/scipy_ndimage_test.py | joshuagornall/jax | c97cd0a526c12ad81988fd58c1c66df4ddd71813 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2022-01-31T13:20:35.000Z | 2022-02-14T13:20:49.000Z | tests/scipy_ndimage_test.py | joshuagornall/jax | c97cd0a526c12ad81988fd58c1c66df4ddd71813 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-08-04T13:36:09.000Z | 2020-11-04T02:58:55.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
import scipy.ndimage as osp_ndimage
from jax import grad
from jax import test_util as jtu
from jax import dtypes
from jax.scipy import ndimage as lsp_ndimage
from jax._src.util import prod
from jax.config import config
config.parse_flags_with_absl()
float_dtypes = jtu.dtypes.floating
int_dtypes = jtu.dtypes.integer
def _fixed_ref_map_coordinates(input, coordinates, order, mode, cval=0.0):
# SciPy's implementation of map_coordinates handles boundaries incorrectly,
# unless mode='reflect'. For order=1, this only affects interpolation outside
# the bounds of the original array.
# https://github.com/scipy/scipy/issues/2640
assert order <= 1
padding = [(max(-np.floor(c.min()).astype(int) + 1, 0),
max(np.ceil(c.max()).astype(int) + 1 - size, 0))
for c, size in zip(coordinates, input.shape)]
shifted_coords = [c + p[0] for p, c in zip(padding, coordinates)]
pad_mode = {
'nearest': 'edge', 'mirror': 'reflect', 'reflect': 'symmetric'
}.get(mode, mode)
if mode == 'constant':
padded = np.pad(input, padding, mode=pad_mode, constant_values=cval)
else:
padded = np.pad(input, padding, mode=pad_mode)
result = osp_ndimage.map_coordinates(
padded, shifted_coords, order=order, mode=mode, cval=cval)
return result
class NdimageTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_coordinates={}_order={}_mode={}_cval={}_impl={}_round={}".format(
jtu.format_shape_dtype_string(shape, dtype),
jtu.format_shape_dtype_string(coords_shape, coords_dtype),
order, mode, cval, impl, round_),
"rng_factory": rng_factory, "shape": shape,
"coords_shape": coords_shape, "dtype": dtype,
"coords_dtype": coords_dtype, "order": order, "mode": mode,
"cval": cval, "impl": impl, "round_": round_}
for shape in [(5,), (3, 4), (3, 4, 5)]
for coords_shape in [(7,), (2, 3, 4)]
for dtype in float_dtypes + int_dtypes
for coords_dtype in float_dtypes
for order in [0, 1]
for mode in ['wrap', 'constant', 'nearest']
for cval in ([0, -1] if mode == 'constant' else [0])
for impl, rng_factory in [
("original", partial(jtu.rand_uniform, low=0, high=1)),
("fixed", partial(jtu.rand_uniform, low=-0.75, high=1.75)),
]
for round_ in [True, False]))
def testMapCoordinates(self, shape, dtype, coords_shape, coords_dtype, order,
mode, cval, impl, round_, rng_factory):
def args_maker():
x = np.arange(prod(shape), dtype=dtype).reshape(shape)
coords = [(size - 1) * rng(coords_shape, coords_dtype) for size in shape]
if round_:
coords = [c.round().astype(int) for c in coords]
return x, coords
rng = rng_factory(self.rng())
lsp_op = lambda x, c: lsp_ndimage.map_coordinates(
x, c, order=order, mode=mode, cval=cval)
impl_fun = (osp_ndimage.map_coordinates if impl == "original"
else _fixed_ref_map_coordinates)
osp_op = lambda x, c: impl_fun(x, c, order=order, mode=mode, cval=cval)
if dtype in float_dtypes:
epsilon = max([dtypes.finfo(dtypes.canonicalize_dtype(d)).eps
for d in [dtype, coords_dtype]])
self._CheckAgainstNumpy(osp_op, lsp_op, args_maker, tol=100*epsilon)
else:
self._CheckAgainstNumpy(osp_op, lsp_op, args_maker, tol=0)
def testMapCoordinatesErrors(self):
x = np.arange(5.0)
c = [np.linspace(0, 5, num=3)]
with self.assertRaisesRegex(NotImplementedError, 'requires order<=1'):
lsp_ndimage.map_coordinates(x, c, order=2)
with self.assertRaisesRegex(
NotImplementedError, 'does not yet support mode'):
lsp_ndimage.map_coordinates(x, c, order=1, mode='reflect')
with self.assertRaisesRegex(ValueError, 'sequence of length'):
lsp_ndimage.map_coordinates(x, [c, c], order=1)
def testMapCoordinateDocstring(self):
self.assertIn("Only nearest neighbor",
lsp_ndimage.map_coordinates.__doc__)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_order={}".format(np.dtype(dtype), order),
"dtype": dtype, "order": order}
for dtype in float_dtypes + int_dtypes
for order in [0, 1]))
def testMapCoordinatesRoundHalf(self, dtype, order):
x = np.arange(-3, 3, dtype=dtype)
c = np.array([[.5, 1.5, 2.5, 3.5]])
def args_maker():
return x, c
lsp_op = lambda x, c: lsp_ndimage.map_coordinates(x, c, order=order)
osp_op = lambda x, c: osp_ndimage.map_coordinates(x, c, order=order)
self._CheckAgainstNumpy(osp_op, lsp_op, args_maker)
def testContinuousGradients(self):
# regression test for https://github.com/google/jax/issues/3024
def loss(delta):
x = np.arange(100.0)
border = 10
indices = np.arange(x.size) + delta
# linear interpolation of the linear function y=x should be exact
shifted = lsp_ndimage.map_coordinates(x, [indices], order=1)
return ((x - shifted) ** 2)[border:-border].mean()
# analytical gradient of (x - (x - delta)) ** 2 is 2 * delta
self.assertAllClose(grad(loss)(0.5), 1.0, check_dtypes=False)
self.assertAllClose(grad(loss)(1.0), 2.0, check_dtypes=False)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| 39.422078 | 94 | 0.67633 |
3b7277af8c0889e53f972bccf288df3e0d050aea | 1,778 | py | Python | kostal/InfoVersions.py | DAMEK86/kostal-piko-py | 6594a1cc35fe77a789e3462f2b0762c0f359720a | [
"MIT"
] | null | null | null | kostal/InfoVersions.py | DAMEK86/kostal-piko-py | 6594a1cc35fe77a789e3462f2b0762c0f359720a | [
"MIT"
] | null | null | null | kostal/InfoVersions.py | DAMEK86/kostal-piko-py | 6594a1cc35fe77a789e3462f2b0762c0f359720a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from kostal.DxsApi import DxsEntry
class InfoVersions:
VERSION_UI = 16779267
VERSION_FW = 16779265
VERSION_HW = 16779266
VERSION_PAR = 16779268
SERIAL_NUMBER = 16777728
ARTICLE_NUMBER = 16777472
COUNTRY_SETTINGS_NAME = 16779522
COUNTRY_SETTINGS_VERSION = 16779521
def __init__(self, inverter) -> None:
self.__inverter = inverter
async def version_ui(self) -> DxsEntry:
return (await self.__inverter.fetch_props(self.VERSION_UI)) \
.get_entry_by_id(self.VERSION_UI)
async def version_fw(self) -> DxsEntry:
return (await self.__inverter.fetch_props(self.VERSION_FW)) \
.get_entry_by_id(self.VERSION_FW)
async def version_hw(self) -> DxsEntry:
return (await self.__inverter.fetch_props(self.VERSION_HW)) \
.get_entry_by_id(self.VERSION_HW)
async def version_par(self) -> DxsEntry:
return (await self.__inverter.fetch_props(self.VERSION_PAR)) \
.get_entry_by_id(self.VERSION_PAR)
async def serial_nr(self) -> DxsEntry:
return (await self.__inverter.fetch_props(self.SERIAL_NUMBER)) \
.get_entry_by_id(self.SERIAL_NUMBER)
async def article_nr(self) -> DxsEntry:
return (await self.__inverter.fetch_props(self.ARTICLE_NUMBER)) \
.get_entry_by_id(self.ARTICLE_NUMBER)
async def country_settings_name(self) -> DxsEntry:
return (await self.__inverter.fetch_props(self.COUNTRY_SETTINGS_NAME)) \
.get_entry_by_id(self.COUNTRY_SETTINGS_NAME)
async def country_settings_version(self) -> DxsEntry:
return (await self.__inverter.fetch_props(self.COUNTRY_SETTINGS_VERSION)) \
.get_entry_by_id(self.COUNTRY_SETTINGS_VERSION)
| 36.285714 | 83 | 0.704162 |
d2c3316eb7f9c6f58349f6e4153517175e650bad | 2,525 | py | Python | synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py | V02460/synapse | 782dd72037cf71fb3f9e4922b07c56df2f59de75 | [
"Apache-2.0"
] | 1 | 2020-03-05T12:58:46.000Z | 2020-03-05T12:58:46.000Z | synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py | V02460/synapse | 782dd72037cf71fb3f9e4922b07c56df2f59de75 | [
"Apache-2.0"
] | null | null | null | synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py | V02460/synapse | 782dd72037cf71fb3f9e4922b07c56df2f59de75 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from twisted.internet import defer
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_json_object_from_request,
)
from ._base import client_patterns
logger = logging.getLogger(__name__)
class RoomUpgradeRestServlet(RestServlet):
"""Handler for room uprade requests.
Handles requests of the form:
POST /_matrix/client/r0/rooms/$roomid/upgrade HTTP/1.1
Content-Type: application/json
{
"new_version": "2",
}
Creates a new room and shuts down the old one. Returns the ID of the new room.
Args:
hs (synapse.server.HomeServer):
"""
PATTERNS = client_patterns(
# /rooms/$roomid/upgrade
"/rooms/(?P<room_id>[^/]*)/upgrade$"
)
def __init__(self, hs):
super(RoomUpgradeRestServlet, self).__init__()
self._hs = hs
self._room_creation_handler = hs.get_room_creation_handler()
self._auth = hs.get_auth()
@defer.inlineCallbacks
def on_POST(self, request, room_id):
requester = yield self._auth.get_user_by_req(request)
content = parse_json_object_from_request(request)
assert_params_in_dict(content, ("new_version",))
new_version = content["new_version"]
if new_version not in KNOWN_ROOM_VERSIONS:
raise SynapseError(
400,
"Your homeserver does not support this room version",
Codes.UNSUPPORTED_ROOM_VERSION,
)
new_room_id = yield self._room_creation_handler.upgrade_room(
requester, room_id, new_version
)
ret = {"replacement_room": new_room_id}
return 200, ret
def register_servlets(hs, http_server):
RoomUpgradeRestServlet(hs).register(http_server)
| 28.693182 | 82 | 0.685545 |
116115b3d4e3361435fcd662ff278b5c58ee900e | 9,272 | py | Python | sdk/python/pulumi_aws/rds/event_subscription.py | JakeGinnivan/pulumi-aws | c91ef78932964ac74eda7f5da81f65b0f1798c93 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/rds/event_subscription.py | JakeGinnivan/pulumi-aws | c91ef78932964ac74eda7f5da81f65b0f1798c93 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/rds/event_subscription.py | JakeGinnivan/pulumi-aws | c91ef78932964ac74eda7f5da81f65b0f1798c93 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class EventSubscription(pulumi.CustomResource):
arn: pulumi.Output[str]
customer_aws_id: pulumi.Output[str]
enabled: pulumi.Output[bool]
"""
A boolean flag to enable/disable the subscription. Defaults to true.
"""
event_categories: pulumi.Output[list]
"""
A list of event categories for a SourceType that you want to subscribe to. See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html or run `aws rds describe-event-categories`.
"""
name: pulumi.Output[str]
"""
The name of the DB event subscription. By default generated by this provider.
"""
name_prefix: pulumi.Output[str]
"""
The name of the DB event subscription. Conflicts with `name`.
"""
sns_topic: pulumi.Output[str]
"""
The SNS topic to send events to.
"""
source_ids: pulumi.Output[list]
"""
A list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. If specified, a source_type must also be specified.
"""
source_type: pulumi.Output[str]
"""
The type of source that will be generating the events. Valid options are `db-instance`, `db-security-group`, `db-parameter-group`, `db-snapshot`, `db-cluster` or `db-cluster-snapshot`. If not set, all sources will be subscribed to.
"""
tags: pulumi.Output[dict]
"""
A map of tags to assign to the resource.
"""
def __init__(__self__, resource_name, opts=None, enabled=None, event_categories=None, name=None, name_prefix=None, sns_topic=None, source_ids=None, source_type=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a DB event subscription resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
default_instance = aws.rds.Instance("defaultInstance",
allocated_storage=10,
db_subnet_group_name="my_database_subnet_group",
engine="mysql",
engine_version="5.6.17",
instance_class="db.t2.micro",
name="mydb",
parameter_group_name="default.mysql5.6",
password="bar",
username="foo")
default_topic = aws.sns.Topic("defaultTopic")
default_event_subscription = aws.rds.EventSubscription("defaultEventSubscription",
event_categories=[
"availability",
"deletion",
"failover",
"failure",
"low storage",
"maintenance",
"notification",
"read replica",
"recovery",
"restoration",
],
sns_topic=default_topic.arn,
source_ids=[default_instance.id],
source_type="db-instance")
```
## Attributes
The following additional atttributes are provided:
* `id` - The name of the RDS event notification subscription
* `arn` - The Amazon Resource Name of the RDS event notification subscription
* `customer_aws_id` - The AWS customer account associated with the RDS event notification subscription
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] enabled: A boolean flag to enable/disable the subscription. Defaults to true.
:param pulumi.Input[list] event_categories: A list of event categories for a SourceType that you want to subscribe to. See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html or run `aws rds describe-event-categories`.
:param pulumi.Input[str] name: The name of the DB event subscription. By default generated by this provider.
:param pulumi.Input[str] name_prefix: The name of the DB event subscription. Conflicts with `name`.
:param pulumi.Input[str] sns_topic: The SNS topic to send events to.
:param pulumi.Input[list] source_ids: A list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. If specified, a source_type must also be specified.
:param pulumi.Input[str] source_type: The type of source that will be generating the events. Valid options are `db-instance`, `db-security-group`, `db-parameter-group`, `db-snapshot`, `db-cluster` or `db-cluster-snapshot`. If not set, all sources will be subscribed to.
:param pulumi.Input[dict] tags: A map of tags to assign to the resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['enabled'] = enabled
__props__['event_categories'] = event_categories
__props__['name'] = name
__props__['name_prefix'] = name_prefix
if sns_topic is None:
raise TypeError("Missing required property 'sns_topic'")
__props__['sns_topic'] = sns_topic
__props__['source_ids'] = source_ids
__props__['source_type'] = source_type
__props__['tags'] = tags
__props__['arn'] = None
__props__['customer_aws_id'] = None
super(EventSubscription, __self__).__init__(
'aws:rds/eventSubscription:EventSubscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, customer_aws_id=None, enabled=None, event_categories=None, name=None, name_prefix=None, sns_topic=None, source_ids=None, source_type=None, tags=None):
"""
Get an existing EventSubscription resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] enabled: A boolean flag to enable/disable the subscription. Defaults to true.
:param pulumi.Input[list] event_categories: A list of event categories for a SourceType that you want to subscribe to. See http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html or run `aws rds describe-event-categories`.
:param pulumi.Input[str] name: The name of the DB event subscription. By default generated by this provider.
:param pulumi.Input[str] name_prefix: The name of the DB event subscription. Conflicts with `name`.
:param pulumi.Input[str] sns_topic: The SNS topic to send events to.
:param pulumi.Input[list] source_ids: A list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. If specified, a source_type must also be specified.
:param pulumi.Input[str] source_type: The type of source that will be generating the events. Valid options are `db-instance`, `db-security-group`, `db-parameter-group`, `db-snapshot`, `db-cluster` or `db-cluster-snapshot`. If not set, all sources will be subscribed to.
:param pulumi.Input[dict] tags: A map of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["customer_aws_id"] = customer_aws_id
__props__["enabled"] = enabled
__props__["event_categories"] = event_categories
__props__["name"] = name
__props__["name_prefix"] = name_prefix
__props__["sns_topic"] = sns_topic
__props__["source_ids"] = source_ids
__props__["source_type"] = source_type
__props__["tags"] = tags
return EventSubscription(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 51.226519 | 277 | 0.670298 |
39dce7713db07d33eabe9c1a7b892e9f2c162c33 | 5,242 | py | Python | tests/test_func.py | pexip/os-python-cachetools | 2e72da5b861f106df393e25d975a3bccb89b6fb3 | [
"MIT"
] | 1 | 2021-01-04T04:25:14.000Z | 2021-01-04T04:25:14.000Z | tests/test_func.py | pexip/os-python-cachetools | 2e72da5b861f106df393e25d975a3bccb89b6fb3 | [
"MIT"
] | null | null | null | tests/test_func.py | pexip/os-python-cachetools | 2e72da5b861f106df393e25d975a3bccb89b6fb3 | [
"MIT"
] | null | null | null | import unittest
import cachetools.func
class DecoratorTestMixin(object):
def decorator(self, maxsize, **kwargs):
return self.DECORATOR(maxsize, **kwargs)
def test_decorator(self):
cached = self.decorator(maxsize=2)(lambda n: n)
self.assertEqual(cached.cache_parameters(), {
'maxsize': 2, 'typed': False
})
self.assertEqual(cached.cache_info(), (0, 0, 2, 0))
self.assertEqual(cached(1), 1)
self.assertEqual(cached.cache_info(), (0, 1, 2, 1))
self.assertEqual(cached(1), 1)
self.assertEqual(cached.cache_info(), (1, 1, 2, 1))
self.assertEqual(cached(1.0), 1.0)
self.assertEqual(cached.cache_info(), (2, 1, 2, 1))
def test_decorator_clear(self):
cached = self.decorator(maxsize=2)(lambda n: n)
self.assertEqual(cached.cache_parameters(), {
'maxsize': 2, 'typed': False
})
self.assertEqual(cached.cache_info(), (0, 0, 2, 0))
self.assertEqual(cached(1), 1)
self.assertEqual(cached.cache_info(), (0, 1, 2, 1))
cached.cache_clear()
self.assertEqual(cached.cache_info(), (0, 0, 2, 0))
self.assertEqual(cached(1), 1)
self.assertEqual(cached.cache_info(), (0, 1, 2, 1))
def test_decorator_nocache(self):
cached = self.decorator(maxsize=0)(lambda n: n)
self.assertEqual(cached.cache_parameters(), {
'maxsize': 0, 'typed': False
})
self.assertEqual(cached.cache_info(), (0, 0, 0, 0))
self.assertEqual(cached(1), 1)
self.assertEqual(cached.cache_info(), (0, 1, 0, 0))
self.assertEqual(cached(1), 1)
self.assertEqual(cached.cache_info(), (0, 2, 0, 0))
self.assertEqual(cached(1.0), 1.0)
self.assertEqual(cached.cache_info(), (0, 3, 0, 0))
def test_decorator_unbound(self):
cached = self.decorator(maxsize=None)(lambda n: n)
self.assertEqual(cached.cache_parameters(), {
'maxsize': None, 'typed': False
})
self.assertEqual(cached.cache_info(), (0, 0, None, 0))
self.assertEqual(cached(1), 1)
self.assertEqual(cached.cache_info(), (0, 1, None, 1))
self.assertEqual(cached(1), 1)
self.assertEqual(cached.cache_info(), (1, 1, None, 1))
self.assertEqual(cached(1.0), 1.0)
self.assertEqual(cached.cache_info(), (2, 1, None, 1))
def test_decorator_typed(self):
cached = self.decorator(maxsize=2, typed=True)(lambda n: n)
self.assertEqual(cached.cache_parameters(), {
'maxsize': 2, 'typed': True
})
self.assertEqual(cached.cache_info(), (0, 0, 2, 0))
self.assertEqual(cached(1), 1)
self.assertEqual(cached.cache_info(), (0, 1, 2, 1))
self.assertEqual(cached(1), 1)
self.assertEqual(cached.cache_info(), (1, 1, 2, 1))
self.assertEqual(cached(1.0), 1.0)
self.assertEqual(cached.cache_info(), (1, 2, 2, 2))
self.assertEqual(cached(1.0), 1.0)
self.assertEqual(cached.cache_info(), (2, 2, 2, 2))
def test_decorator_user_function(self):
cached = self.decorator(lambda n: n)
self.assertEqual(cached.cache_parameters(), {
'maxsize': 128, 'typed': False
})
self.assertEqual(cached.cache_info(), (0, 0, 128, 0))
self.assertEqual(cached(1), 1)
self.assertEqual(cached.cache_info(), (0, 1, 128, 1))
self.assertEqual(cached(1), 1)
self.assertEqual(cached.cache_info(), (1, 1, 128, 1))
self.assertEqual(cached(1.0), 1.0)
self.assertEqual(cached.cache_info(), (2, 1, 128, 1))
def test_decorator_needs_rlock(self):
cached = self.decorator(lambda n: n)
class RecursiveEquals:
def __init__(self, use_cache):
self._use_cache = use_cache
def __hash__(self):
return hash(self._use_cache)
def __eq__(self, other):
if self._use_cache:
# This call will happen while the cache-lock is held,
# requiring a reentrant lock to avoid deadlock.
cached(self)
return self._use_cache == other._use_cache
# Prime the cache.
cached(RecursiveEquals(False))
cached(RecursiveEquals(True))
# Then do a call which will cause a deadlock with a non-reentrant lock.
self.assertEqual(cached(RecursiveEquals(True)), RecursiveEquals(True))
class FIFODecoratorTest(unittest.TestCase, DecoratorTestMixin):
DECORATOR = staticmethod(cachetools.func.fifo_cache)
class LFUDecoratorTest(unittest.TestCase, DecoratorTestMixin):
DECORATOR = staticmethod(cachetools.func.lfu_cache)
class LRUDecoratorTest(unittest.TestCase, DecoratorTestMixin):
DECORATOR = staticmethod(cachetools.func.lru_cache)
class MRUDecoratorTest(unittest.TestCase, DecoratorTestMixin):
DECORATOR = staticmethod(cachetools.func.mru_cache)
class RRDecoratorTest(unittest.TestCase, DecoratorTestMixin):
DECORATOR = staticmethod(cachetools.func.rr_cache)
class TTLDecoratorTest(unittest.TestCase, DecoratorTestMixin):
DECORATOR = staticmethod(cachetools.func.ttl_cache)
| 36.657343 | 79 | 0.624571 |
2d834d5f19f6879a4697d16686d5cab4058a1b40 | 157,719 | py | Python | tests/unit/gapic/aiplatform_v1/test_pipeline_service.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | 1 | 2022-03-30T05:23:29.000Z | 2022-03-30T05:23:29.000Z | tests/unit/gapic/aiplatform_v1/test_pipeline_service.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/aiplatform_v1/test_pipeline_service.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1.services.pipeline_service import (
PipelineServiceAsyncClient,
)
from google.cloud.aiplatform_v1.services.pipeline_service import PipelineServiceClient
from google.cloud.aiplatform_v1.services.pipeline_service import pagers
from google.cloud.aiplatform_v1.services.pipeline_service import transports
from google.cloud.aiplatform_v1.types import artifact
from google.cloud.aiplatform_v1.types import context
from google.cloud.aiplatform_v1.types import deployed_model_ref
from google.cloud.aiplatform_v1.types import encryption_spec
from google.cloud.aiplatform_v1.types import env_var
from google.cloud.aiplatform_v1.types import execution
from google.cloud.aiplatform_v1.types import explanation
from google.cloud.aiplatform_v1.types import explanation_metadata
from google.cloud.aiplatform_v1.types import io
from google.cloud.aiplatform_v1.types import model
from google.cloud.aiplatform_v1.types import operation as gca_operation
from google.cloud.aiplatform_v1.types import pipeline_job
from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job
from google.cloud.aiplatform_v1.types import pipeline_service
from google.cloud.aiplatform_v1.types import pipeline_state
from google.cloud.aiplatform_v1.types import training_pipeline
from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline
from google.cloud.aiplatform_v1.types import value
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import any_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert PipelineServiceClient._get_default_mtls_endpoint(None) is None
assert (
PipelineServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
PipelineServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
PipelineServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
PipelineServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
PipelineServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [PipelineServiceClient, PipelineServiceAsyncClient,]
)
def test_pipeline_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.PipelineServiceGrpcTransport, "grpc"),
(transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_pipeline_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [PipelineServiceClient, PipelineServiceAsyncClient,]
)
def test_pipeline_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_pipeline_service_client_get_transport_class():
transport = PipelineServiceClient.get_transport_class()
available_transports = [
transports.PipelineServiceGrpcTransport,
]
assert transport in available_transports
transport = PipelineServiceClient.get_transport_class("grpc")
assert transport == transports.PipelineServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"),
(
PipelineServiceAsyncClient,
transports.PipelineServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
PipelineServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(PipelineServiceClient),
)
@mock.patch.object(
PipelineServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(PipelineServiceAsyncClient),
)
def test_pipeline_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(PipelineServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(PipelineServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
PipelineServiceClient,
transports.PipelineServiceGrpcTransport,
"grpc",
"true",
),
(
PipelineServiceAsyncClient,
transports.PipelineServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
PipelineServiceClient,
transports.PipelineServiceGrpcTransport,
"grpc",
"false",
),
(
PipelineServiceAsyncClient,
transports.PipelineServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
PipelineServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(PipelineServiceClient),
)
@mock.patch.object(
PipelineServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(PipelineServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_pipeline_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [PipelineServiceClient, PipelineServiceAsyncClient]
)
@mock.patch.object(
PipelineServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(PipelineServiceClient),
)
@mock.patch.object(
PipelineServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(PipelineServiceAsyncClient),
)
def test_pipeline_service_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"),
(
PipelineServiceAsyncClient,
transports.PipelineServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_pipeline_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
PipelineServiceClient,
transports.PipelineServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
PipelineServiceAsyncClient,
transports.PipelineServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_pipeline_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_pipeline_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = PipelineServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
PipelineServiceClient,
transports.PipelineServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
PipelineServiceAsyncClient,
transports.PipelineServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_pipeline_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [pipeline_service.CreateTrainingPipelineRequest, dict,]
)
def test_create_training_pipeline(request_type, transport: str = "grpc"):
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_training_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_training_pipeline.TrainingPipeline(
name="name_value",
display_name="display_name_value",
training_task_definition="training_task_definition_value",
state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
)
response = client.create_training_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.CreateTrainingPipelineRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_training_pipeline.TrainingPipeline)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.training_task_definition == "training_task_definition_value"
assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
def test_create_training_pipeline_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_training_pipeline), "__call__"
) as call:
client.create_training_pipeline()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.CreateTrainingPipelineRequest()
@pytest.mark.asyncio
async def test_create_training_pipeline_async(
transport: str = "grpc_asyncio",
request_type=pipeline_service.CreateTrainingPipelineRequest,
):
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_training_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_training_pipeline.TrainingPipeline(
name="name_value",
display_name="display_name_value",
training_task_definition="training_task_definition_value",
state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
)
)
response = await client.create_training_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.CreateTrainingPipelineRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_training_pipeline.TrainingPipeline)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.training_task_definition == "training_task_definition_value"
assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
@pytest.mark.asyncio
async def test_create_training_pipeline_async_from_dict():
await test_create_training_pipeline_async(request_type=dict)
def test_create_training_pipeline_field_headers():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.CreateTrainingPipelineRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_training_pipeline), "__call__"
) as call:
call.return_value = gca_training_pipeline.TrainingPipeline()
client.create_training_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_training_pipeline_field_headers_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.CreateTrainingPipelineRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_training_pipeline), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_training_pipeline.TrainingPipeline()
)
await client.create_training_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_training_pipeline_flattened():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_training_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_training_pipeline.TrainingPipeline()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_training_pipeline(
parent="parent_value",
training_pipeline=gca_training_pipeline.TrainingPipeline(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].training_pipeline
mock_val = gca_training_pipeline.TrainingPipeline(name="name_value")
assert arg == mock_val
def test_create_training_pipeline_flattened_error():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_training_pipeline(
pipeline_service.CreateTrainingPipelineRequest(),
parent="parent_value",
training_pipeline=gca_training_pipeline.TrainingPipeline(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_training_pipeline_flattened_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_training_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_training_pipeline.TrainingPipeline()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_training_pipeline.TrainingPipeline()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_training_pipeline(
parent="parent_value",
training_pipeline=gca_training_pipeline.TrainingPipeline(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].training_pipeline
mock_val = gca_training_pipeline.TrainingPipeline(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_training_pipeline_flattened_error_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_training_pipeline(
pipeline_service.CreateTrainingPipelineRequest(),
parent="parent_value",
training_pipeline=gca_training_pipeline.TrainingPipeline(name="name_value"),
)
@pytest.mark.parametrize(
"request_type", [pipeline_service.GetTrainingPipelineRequest, dict,]
)
def test_get_training_pipeline(request_type, transport: str = "grpc"):
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_training_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = training_pipeline.TrainingPipeline(
name="name_value",
display_name="display_name_value",
training_task_definition="training_task_definition_value",
state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
)
response = client.get_training_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.GetTrainingPipelineRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, training_pipeline.TrainingPipeline)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.training_task_definition == "training_task_definition_value"
assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
def test_get_training_pipeline_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_training_pipeline), "__call__"
) as call:
client.get_training_pipeline()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.GetTrainingPipelineRequest()
@pytest.mark.asyncio
async def test_get_training_pipeline_async(
transport: str = "grpc_asyncio",
request_type=pipeline_service.GetTrainingPipelineRequest,
):
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_training_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
training_pipeline.TrainingPipeline(
name="name_value",
display_name="display_name_value",
training_task_definition="training_task_definition_value",
state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
)
)
response = await client.get_training_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.GetTrainingPipelineRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, training_pipeline.TrainingPipeline)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.training_task_definition == "training_task_definition_value"
assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
@pytest.mark.asyncio
async def test_get_training_pipeline_async_from_dict():
await test_get_training_pipeline_async(request_type=dict)
def test_get_training_pipeline_field_headers():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.GetTrainingPipelineRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_training_pipeline), "__call__"
) as call:
call.return_value = training_pipeline.TrainingPipeline()
client.get_training_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_training_pipeline_field_headers_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.GetTrainingPipelineRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_training_pipeline), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
training_pipeline.TrainingPipeline()
)
await client.get_training_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_training_pipeline_flattened():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_training_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = training_pipeline.TrainingPipeline()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_training_pipeline(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_training_pipeline_flattened_error():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_training_pipeline(
pipeline_service.GetTrainingPipelineRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_training_pipeline_flattened_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_training_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = training_pipeline.TrainingPipeline()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
training_pipeline.TrainingPipeline()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_training_pipeline(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_training_pipeline_flattened_error_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_training_pipeline(
pipeline_service.GetTrainingPipelineRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [pipeline_service.ListTrainingPipelinesRequest, dict,]
)
def test_list_training_pipelines(request_type, transport: str = "grpc"):
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_training_pipelines), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = pipeline_service.ListTrainingPipelinesResponse(
next_page_token="next_page_token_value",
)
response = client.list_training_pipelines(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.ListTrainingPipelinesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTrainingPipelinesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_training_pipelines_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_training_pipelines), "__call__"
) as call:
client.list_training_pipelines()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.ListTrainingPipelinesRequest()
@pytest.mark.asyncio
async def test_list_training_pipelines_async(
transport: str = "grpc_asyncio",
request_type=pipeline_service.ListTrainingPipelinesRequest,
):
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_training_pipelines), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pipeline_service.ListTrainingPipelinesResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_training_pipelines(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.ListTrainingPipelinesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTrainingPipelinesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_training_pipelines_async_from_dict():
await test_list_training_pipelines_async(request_type=dict)
def test_list_training_pipelines_field_headers():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.ListTrainingPipelinesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_training_pipelines), "__call__"
) as call:
call.return_value = pipeline_service.ListTrainingPipelinesResponse()
client.list_training_pipelines(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_training_pipelines_field_headers_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.ListTrainingPipelinesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_training_pipelines), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pipeline_service.ListTrainingPipelinesResponse()
)
await client.list_training_pipelines(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_training_pipelines_flattened():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_training_pipelines), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = pipeline_service.ListTrainingPipelinesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_training_pipelines(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_training_pipelines_flattened_error():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_training_pipelines(
pipeline_service.ListTrainingPipelinesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_training_pipelines_flattened_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_training_pipelines), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = pipeline_service.ListTrainingPipelinesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pipeline_service.ListTrainingPipelinesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_training_pipelines(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_training_pipelines_flattened_error_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_training_pipelines(
pipeline_service.ListTrainingPipelinesRequest(), parent="parent_value",
)
def test_list_training_pipelines_pager(transport_name: str = "grpc"):
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_training_pipelines), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
pipeline_service.ListTrainingPipelinesResponse(
training_pipelines=[
training_pipeline.TrainingPipeline(),
training_pipeline.TrainingPipeline(),
training_pipeline.TrainingPipeline(),
],
next_page_token="abc",
),
pipeline_service.ListTrainingPipelinesResponse(
training_pipelines=[], next_page_token="def",
),
pipeline_service.ListTrainingPipelinesResponse(
training_pipelines=[training_pipeline.TrainingPipeline(),],
next_page_token="ghi",
),
pipeline_service.ListTrainingPipelinesResponse(
training_pipelines=[
training_pipeline.TrainingPipeline(),
training_pipeline.TrainingPipeline(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_training_pipelines(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, training_pipeline.TrainingPipeline) for i in results)
def test_list_training_pipelines_pages(transport_name: str = "grpc"):
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_training_pipelines), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
pipeline_service.ListTrainingPipelinesResponse(
training_pipelines=[
training_pipeline.TrainingPipeline(),
training_pipeline.TrainingPipeline(),
training_pipeline.TrainingPipeline(),
],
next_page_token="abc",
),
pipeline_service.ListTrainingPipelinesResponse(
training_pipelines=[], next_page_token="def",
),
pipeline_service.ListTrainingPipelinesResponse(
training_pipelines=[training_pipeline.TrainingPipeline(),],
next_page_token="ghi",
),
pipeline_service.ListTrainingPipelinesResponse(
training_pipelines=[
training_pipeline.TrainingPipeline(),
training_pipeline.TrainingPipeline(),
],
),
RuntimeError,
)
pages = list(client.list_training_pipelines(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_training_pipelines_async_pager():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_training_pipelines),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
pipeline_service.ListTrainingPipelinesResponse(
training_pipelines=[
training_pipeline.TrainingPipeline(),
training_pipeline.TrainingPipeline(),
training_pipeline.TrainingPipeline(),
],
next_page_token="abc",
),
pipeline_service.ListTrainingPipelinesResponse(
training_pipelines=[], next_page_token="def",
),
pipeline_service.ListTrainingPipelinesResponse(
training_pipelines=[training_pipeline.TrainingPipeline(),],
next_page_token="ghi",
),
pipeline_service.ListTrainingPipelinesResponse(
training_pipelines=[
training_pipeline.TrainingPipeline(),
training_pipeline.TrainingPipeline(),
],
),
RuntimeError,
)
async_pager = await client.list_training_pipelines(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, training_pipeline.TrainingPipeline) for i in responses)
@pytest.mark.asyncio
async def test_list_training_pipelines_async_pages():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_training_pipelines),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
pipeline_service.ListTrainingPipelinesResponse(
training_pipelines=[
training_pipeline.TrainingPipeline(),
training_pipeline.TrainingPipeline(),
training_pipeline.TrainingPipeline(),
],
next_page_token="abc",
),
pipeline_service.ListTrainingPipelinesResponse(
training_pipelines=[], next_page_token="def",
),
pipeline_service.ListTrainingPipelinesResponse(
training_pipelines=[training_pipeline.TrainingPipeline(),],
next_page_token="ghi",
),
pipeline_service.ListTrainingPipelinesResponse(
training_pipelines=[
training_pipeline.TrainingPipeline(),
training_pipeline.TrainingPipeline(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_training_pipelines(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [pipeline_service.DeleteTrainingPipelineRequest, dict,]
)
def test_delete_training_pipeline(request_type, transport: str = "grpc"):
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_training_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_training_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.DeleteTrainingPipelineRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_training_pipeline_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_training_pipeline), "__call__"
) as call:
client.delete_training_pipeline()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.DeleteTrainingPipelineRequest()
@pytest.mark.asyncio
async def test_delete_training_pipeline_async(
transport: str = "grpc_asyncio",
request_type=pipeline_service.DeleteTrainingPipelineRequest,
):
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_training_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_training_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.DeleteTrainingPipelineRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_training_pipeline_async_from_dict():
await test_delete_training_pipeline_async(request_type=dict)
def test_delete_training_pipeline_field_headers():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.DeleteTrainingPipelineRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_training_pipeline), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_training_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_training_pipeline_field_headers_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.DeleteTrainingPipelineRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_training_pipeline), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_training_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_training_pipeline_flattened():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_training_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_training_pipeline(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_training_pipeline_flattened_error():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_training_pipeline(
pipeline_service.DeleteTrainingPipelineRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_training_pipeline_flattened_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_training_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_training_pipeline(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_training_pipeline_flattened_error_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_training_pipeline(
pipeline_service.DeleteTrainingPipelineRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [pipeline_service.CancelTrainingPipelineRequest, dict,]
)
def test_cancel_training_pipeline(request_type, transport: str = "grpc"):
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_training_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.cancel_training_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.CancelTrainingPipelineRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_cancel_training_pipeline_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_training_pipeline), "__call__"
) as call:
client.cancel_training_pipeline()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.CancelTrainingPipelineRequest()
@pytest.mark.asyncio
async def test_cancel_training_pipeline_async(
transport: str = "grpc_asyncio",
request_type=pipeline_service.CancelTrainingPipelineRequest,
):
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_training_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.cancel_training_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.CancelTrainingPipelineRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_cancel_training_pipeline_async_from_dict():
await test_cancel_training_pipeline_async(request_type=dict)
def test_cancel_training_pipeline_field_headers():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.CancelTrainingPipelineRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_training_pipeline), "__call__"
) as call:
call.return_value = None
client.cancel_training_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_training_pipeline_field_headers_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.CancelTrainingPipelineRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_training_pipeline), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.cancel_training_pipeline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_cancel_training_pipeline_flattened():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_training_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.cancel_training_pipeline(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_cancel_training_pipeline_flattened_error():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.cancel_training_pipeline(
pipeline_service.CancelTrainingPipelineRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_cancel_training_pipeline_flattened_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_training_pipeline), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.cancel_training_pipeline(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_cancel_training_pipeline_flattened_error_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.cancel_training_pipeline(
pipeline_service.CancelTrainingPipelineRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [pipeline_service.CreatePipelineJobRequest, dict,]
)
def test_create_pipeline_job(request_type, transport: str = "grpc"):
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_pipeline_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_pipeline_job.PipelineJob(
name="name_value",
display_name="display_name_value",
state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
service_account="service_account_value",
network="network_value",
)
response = client.create_pipeline_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.CreatePipelineJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_pipeline_job.PipelineJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
assert response.service_account == "service_account_value"
assert response.network == "network_value"
def test_create_pipeline_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_pipeline_job), "__call__"
) as call:
client.create_pipeline_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.CreatePipelineJobRequest()
@pytest.mark.asyncio
async def test_create_pipeline_job_async(
transport: str = "grpc_asyncio",
request_type=pipeline_service.CreatePipelineJobRequest,
):
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_pipeline_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_pipeline_job.PipelineJob(
name="name_value",
display_name="display_name_value",
state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
service_account="service_account_value",
network="network_value",
)
)
response = await client.create_pipeline_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.CreatePipelineJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_pipeline_job.PipelineJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
assert response.service_account == "service_account_value"
assert response.network == "network_value"
@pytest.mark.asyncio
async def test_create_pipeline_job_async_from_dict():
await test_create_pipeline_job_async(request_type=dict)
def test_create_pipeline_job_field_headers():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.CreatePipelineJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_pipeline_job), "__call__"
) as call:
call.return_value = gca_pipeline_job.PipelineJob()
client.create_pipeline_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_pipeline_job_field_headers_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.CreatePipelineJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_pipeline_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_pipeline_job.PipelineJob()
)
await client.create_pipeline_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_pipeline_job_flattened():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_pipeline_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_pipeline_job.PipelineJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_pipeline_job(
parent="parent_value",
pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"),
pipeline_job_id="pipeline_job_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].pipeline_job
mock_val = gca_pipeline_job.PipelineJob(name="name_value")
assert arg == mock_val
arg = args[0].pipeline_job_id
mock_val = "pipeline_job_id_value"
assert arg == mock_val
def test_create_pipeline_job_flattened_error():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_pipeline_job(
pipeline_service.CreatePipelineJobRequest(),
parent="parent_value",
pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"),
pipeline_job_id="pipeline_job_id_value",
)
@pytest.mark.asyncio
async def test_create_pipeline_job_flattened_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_pipeline_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_pipeline_job.PipelineJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_pipeline_job.PipelineJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_pipeline_job(
parent="parent_value",
pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"),
pipeline_job_id="pipeline_job_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].pipeline_job
mock_val = gca_pipeline_job.PipelineJob(name="name_value")
assert arg == mock_val
arg = args[0].pipeline_job_id
mock_val = "pipeline_job_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_pipeline_job_flattened_error_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_pipeline_job(
pipeline_service.CreatePipelineJobRequest(),
parent="parent_value",
pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"),
pipeline_job_id="pipeline_job_id_value",
)
@pytest.mark.parametrize(
"request_type", [pipeline_service.GetPipelineJobRequest, dict,]
)
def test_get_pipeline_job(request_type, transport: str = "grpc"):
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pipeline_job.PipelineJob(
name="name_value",
display_name="display_name_value",
state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
service_account="service_account_value",
network="network_value",
)
response = client.get_pipeline_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.GetPipelineJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pipeline_job.PipelineJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
assert response.service_account == "service_account_value"
assert response.network == "network_value"
def test_get_pipeline_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
client.get_pipeline_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.GetPipelineJobRequest()
@pytest.mark.asyncio
async def test_get_pipeline_job_async(
transport: str = "grpc_asyncio", request_type=pipeline_service.GetPipelineJobRequest
):
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pipeline_job.PipelineJob(
name="name_value",
display_name="display_name_value",
state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED,
service_account="service_account_value",
network="network_value",
)
)
response = await client.get_pipeline_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.GetPipelineJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pipeline_job.PipelineJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED
assert response.service_account == "service_account_value"
assert response.network == "network_value"
@pytest.mark.asyncio
async def test_get_pipeline_job_async_from_dict():
await test_get_pipeline_job_async(request_type=dict)
def test_get_pipeline_job_field_headers():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.GetPipelineJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
call.return_value = pipeline_job.PipelineJob()
client.get_pipeline_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_pipeline_job_field_headers_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.GetPipelineJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pipeline_job.PipelineJob()
)
await client.get_pipeline_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_pipeline_job_flattened():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pipeline_job.PipelineJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_pipeline_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_pipeline_job_flattened_error():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_pipeline_job(
pipeline_service.GetPipelineJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_pipeline_job_flattened_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = pipeline_job.PipelineJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pipeline_job.PipelineJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_pipeline_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_pipeline_job_flattened_error_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_pipeline_job(
pipeline_service.GetPipelineJobRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [pipeline_service.ListPipelineJobsRequest, dict,]
)
def test_list_pipeline_jobs(request_type, transport: str = "grpc"):
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_pipeline_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = pipeline_service.ListPipelineJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_pipeline_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.ListPipelineJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPipelineJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_pipeline_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_pipeline_jobs), "__call__"
) as call:
client.list_pipeline_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.ListPipelineJobsRequest()
@pytest.mark.asyncio
async def test_list_pipeline_jobs_async(
transport: str = "grpc_asyncio",
request_type=pipeline_service.ListPipelineJobsRequest,
):
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_pipeline_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pipeline_service.ListPipelineJobsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_pipeline_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.ListPipelineJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPipelineJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_pipeline_jobs_async_from_dict():
await test_list_pipeline_jobs_async(request_type=dict)
def test_list_pipeline_jobs_field_headers():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.ListPipelineJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_pipeline_jobs), "__call__"
) as call:
call.return_value = pipeline_service.ListPipelineJobsResponse()
client.list_pipeline_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_pipeline_jobs_field_headers_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.ListPipelineJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_pipeline_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pipeline_service.ListPipelineJobsResponse()
)
await client.list_pipeline_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_pipeline_jobs_flattened():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_pipeline_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = pipeline_service.ListPipelineJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_pipeline_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_pipeline_jobs_flattened_error():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_pipeline_jobs(
pipeline_service.ListPipelineJobsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_pipeline_jobs_flattened_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_pipeline_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = pipeline_service.ListPipelineJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
pipeline_service.ListPipelineJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_pipeline_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_pipeline_jobs_flattened_error_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_pipeline_jobs(
pipeline_service.ListPipelineJobsRequest(), parent="parent_value",
)
def test_list_pipeline_jobs_pager(transport_name: str = "grpc"):
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_pipeline_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
pipeline_service.ListPipelineJobsResponse(
pipeline_jobs=[
pipeline_job.PipelineJob(),
pipeline_job.PipelineJob(),
pipeline_job.PipelineJob(),
],
next_page_token="abc",
),
pipeline_service.ListPipelineJobsResponse(
pipeline_jobs=[], next_page_token="def",
),
pipeline_service.ListPipelineJobsResponse(
pipeline_jobs=[pipeline_job.PipelineJob(),], next_page_token="ghi",
),
pipeline_service.ListPipelineJobsResponse(
pipeline_jobs=[pipeline_job.PipelineJob(), pipeline_job.PipelineJob(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_pipeline_jobs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, pipeline_job.PipelineJob) for i in results)
def test_list_pipeline_jobs_pages(transport_name: str = "grpc"):
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_pipeline_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
pipeline_service.ListPipelineJobsResponse(
pipeline_jobs=[
pipeline_job.PipelineJob(),
pipeline_job.PipelineJob(),
pipeline_job.PipelineJob(),
],
next_page_token="abc",
),
pipeline_service.ListPipelineJobsResponse(
pipeline_jobs=[], next_page_token="def",
),
pipeline_service.ListPipelineJobsResponse(
pipeline_jobs=[pipeline_job.PipelineJob(),], next_page_token="ghi",
),
pipeline_service.ListPipelineJobsResponse(
pipeline_jobs=[pipeline_job.PipelineJob(), pipeline_job.PipelineJob(),],
),
RuntimeError,
)
pages = list(client.list_pipeline_jobs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_pipeline_jobs_async_pager():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_pipeline_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
pipeline_service.ListPipelineJobsResponse(
pipeline_jobs=[
pipeline_job.PipelineJob(),
pipeline_job.PipelineJob(),
pipeline_job.PipelineJob(),
],
next_page_token="abc",
),
pipeline_service.ListPipelineJobsResponse(
pipeline_jobs=[], next_page_token="def",
),
pipeline_service.ListPipelineJobsResponse(
pipeline_jobs=[pipeline_job.PipelineJob(),], next_page_token="ghi",
),
pipeline_service.ListPipelineJobsResponse(
pipeline_jobs=[pipeline_job.PipelineJob(), pipeline_job.PipelineJob(),],
),
RuntimeError,
)
async_pager = await client.list_pipeline_jobs(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, pipeline_job.PipelineJob) for i in responses)
@pytest.mark.asyncio
async def test_list_pipeline_jobs_async_pages():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_pipeline_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
pipeline_service.ListPipelineJobsResponse(
pipeline_jobs=[
pipeline_job.PipelineJob(),
pipeline_job.PipelineJob(),
pipeline_job.PipelineJob(),
],
next_page_token="abc",
),
pipeline_service.ListPipelineJobsResponse(
pipeline_jobs=[], next_page_token="def",
),
pipeline_service.ListPipelineJobsResponse(
pipeline_jobs=[pipeline_job.PipelineJob(),], next_page_token="ghi",
),
pipeline_service.ListPipelineJobsResponse(
pipeline_jobs=[pipeline_job.PipelineJob(), pipeline_job.PipelineJob(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_pipeline_jobs(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [pipeline_service.DeletePipelineJobRequest, dict,]
)
def test_delete_pipeline_job(request_type, transport: str = "grpc"):
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_pipeline_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_pipeline_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.DeletePipelineJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_pipeline_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_pipeline_job), "__call__"
) as call:
client.delete_pipeline_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.DeletePipelineJobRequest()
@pytest.mark.asyncio
async def test_delete_pipeline_job_async(
transport: str = "grpc_asyncio",
request_type=pipeline_service.DeletePipelineJobRequest,
):
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_pipeline_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_pipeline_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.DeletePipelineJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_pipeline_job_async_from_dict():
await test_delete_pipeline_job_async(request_type=dict)
def test_delete_pipeline_job_field_headers():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.DeletePipelineJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_pipeline_job), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_pipeline_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_pipeline_job_field_headers_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.DeletePipelineJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_pipeline_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_pipeline_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_pipeline_job_flattened():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_pipeline_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_pipeline_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_pipeline_job_flattened_error():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_pipeline_job(
pipeline_service.DeletePipelineJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_pipeline_job_flattened_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_pipeline_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_pipeline_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_pipeline_job_flattened_error_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_pipeline_job(
pipeline_service.DeletePipelineJobRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [pipeline_service.CancelPipelineJobRequest, dict,]
)
def test_cancel_pipeline_job(request_type, transport: str = "grpc"):
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_pipeline_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.cancel_pipeline_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.CancelPipelineJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_cancel_pipeline_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_pipeline_job), "__call__"
) as call:
client.cancel_pipeline_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.CancelPipelineJobRequest()
@pytest.mark.asyncio
async def test_cancel_pipeline_job_async(
transport: str = "grpc_asyncio",
request_type=pipeline_service.CancelPipelineJobRequest,
):
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_pipeline_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.cancel_pipeline_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pipeline_service.CancelPipelineJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_cancel_pipeline_job_async_from_dict():
await test_cancel_pipeline_job_async(request_type=dict)
def test_cancel_pipeline_job_field_headers():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.CancelPipelineJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_pipeline_job), "__call__"
) as call:
call.return_value = None
client.cancel_pipeline_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_pipeline_job_field_headers_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pipeline_service.CancelPipelineJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_pipeline_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.cancel_pipeline_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_cancel_pipeline_job_flattened():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_pipeline_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.cancel_pipeline_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_cancel_pipeline_job_flattened_error():
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.cancel_pipeline_job(
pipeline_service.CancelPipelineJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_cancel_pipeline_job_flattened_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_pipeline_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.cancel_pipeline_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_cancel_pipeline_job_flattened_error_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.cancel_pipeline_job(
pipeline_service.CancelPipelineJobRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.PipelineServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.PipelineServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PipelineServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.PipelineServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = PipelineServiceClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = PipelineServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.PipelineServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PipelineServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.PipelineServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = PipelineServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.PipelineServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.PipelineServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.PipelineServiceGrpcTransport,
transports.PipelineServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.PipelineServiceGrpcTransport,)
def test_pipeline_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.PipelineServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_pipeline_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.PipelineServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_training_pipeline",
"get_training_pipeline",
"list_training_pipelines",
"delete_training_pipeline",
"cancel_training_pipeline",
"create_pipeline_job",
"get_pipeline_job",
"list_pipeline_jobs",
"delete_pipeline_job",
"cancel_pipeline_job",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_pipeline_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.PipelineServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_pipeline_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.PipelineServiceTransport()
adc.assert_called_once()
def test_pipeline_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
PipelineServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.PipelineServiceGrpcTransport,
transports.PipelineServiceGrpcAsyncIOTransport,
],
)
def test_pipeline_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.PipelineServiceGrpcTransport, grpc_helpers),
(transports.PipelineServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_pipeline_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.PipelineServiceGrpcTransport,
transports.PipelineServiceGrpcAsyncIOTransport,
],
)
def test_pipeline_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_pipeline_service_host_no_port():
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
),
)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_pipeline_service_host_with_port():
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com:8000"
),
)
assert client.transport._host == "aiplatform.googleapis.com:8000"
def test_pipeline_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.PipelineServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_pipeline_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.PipelineServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.PipelineServiceGrpcTransport,
transports.PipelineServiceGrpcAsyncIOTransport,
],
)
def test_pipeline_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.PipelineServiceGrpcTransport,
transports.PipelineServiceGrpcAsyncIOTransport,
],
)
def test_pipeline_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_pipeline_service_grpc_lro_client():
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_pipeline_service_grpc_lro_async_client():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_artifact_path():
project = "squid"
location = "clam"
metadata_store = "whelk"
artifact = "octopus"
expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(
project=project,
location=location,
metadata_store=metadata_store,
artifact=artifact,
)
actual = PipelineServiceClient.artifact_path(
project, location, metadata_store, artifact
)
assert expected == actual
def test_parse_artifact_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"metadata_store": "cuttlefish",
"artifact": "mussel",
}
path = PipelineServiceClient.artifact_path(**expected)
# Check that the path construction is reversible.
actual = PipelineServiceClient.parse_artifact_path(path)
assert expected == actual
def test_context_path():
project = "winkle"
location = "nautilus"
metadata_store = "scallop"
context = "abalone"
expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(
project=project,
location=location,
metadata_store=metadata_store,
context=context,
)
actual = PipelineServiceClient.context_path(
project, location, metadata_store, context
)
assert expected == actual
def test_parse_context_path():
expected = {
"project": "squid",
"location": "clam",
"metadata_store": "whelk",
"context": "octopus",
}
path = PipelineServiceClient.context_path(**expected)
# Check that the path construction is reversible.
actual = PipelineServiceClient.parse_context_path(path)
assert expected == actual
def test_custom_job_path():
project = "oyster"
location = "nudibranch"
custom_job = "cuttlefish"
expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(
project=project, location=location, custom_job=custom_job,
)
actual = PipelineServiceClient.custom_job_path(project, location, custom_job)
assert expected == actual
def test_parse_custom_job_path():
expected = {
"project": "mussel",
"location": "winkle",
"custom_job": "nautilus",
}
path = PipelineServiceClient.custom_job_path(**expected)
# Check that the path construction is reversible.
actual = PipelineServiceClient.parse_custom_job_path(path)
assert expected == actual
def test_endpoint_path():
project = "scallop"
location = "abalone"
endpoint = "squid"
expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(
project=project, location=location, endpoint=endpoint,
)
actual = PipelineServiceClient.endpoint_path(project, location, endpoint)
assert expected == actual
def test_parse_endpoint_path():
expected = {
"project": "clam",
"location": "whelk",
"endpoint": "octopus",
}
path = PipelineServiceClient.endpoint_path(**expected)
# Check that the path construction is reversible.
actual = PipelineServiceClient.parse_endpoint_path(path)
assert expected == actual
def test_execution_path():
project = "oyster"
location = "nudibranch"
metadata_store = "cuttlefish"
execution = "mussel"
expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(
project=project,
location=location,
metadata_store=metadata_store,
execution=execution,
)
actual = PipelineServiceClient.execution_path(
project, location, metadata_store, execution
)
assert expected == actual
def test_parse_execution_path():
expected = {
"project": "winkle",
"location": "nautilus",
"metadata_store": "scallop",
"execution": "abalone",
}
path = PipelineServiceClient.execution_path(**expected)
# Check that the path construction is reversible.
actual = PipelineServiceClient.parse_execution_path(path)
assert expected == actual
def test_model_path():
project = "squid"
location = "clam"
model = "whelk"
expected = "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
actual = PipelineServiceClient.model_path(project, location, model)
assert expected == actual
def test_parse_model_path():
expected = {
"project": "octopus",
"location": "oyster",
"model": "nudibranch",
}
path = PipelineServiceClient.model_path(**expected)
# Check that the path construction is reversible.
actual = PipelineServiceClient.parse_model_path(path)
assert expected == actual
def test_network_path():
project = "cuttlefish"
network = "mussel"
expected = "projects/{project}/global/networks/{network}".format(
project=project, network=network,
)
actual = PipelineServiceClient.network_path(project, network)
assert expected == actual
def test_parse_network_path():
expected = {
"project": "winkle",
"network": "nautilus",
}
path = PipelineServiceClient.network_path(**expected)
# Check that the path construction is reversible.
actual = PipelineServiceClient.parse_network_path(path)
assert expected == actual
def test_pipeline_job_path():
project = "scallop"
location = "abalone"
pipeline_job = "squid"
expected = "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(
project=project, location=location, pipeline_job=pipeline_job,
)
actual = PipelineServiceClient.pipeline_job_path(project, location, pipeline_job)
assert expected == actual
def test_parse_pipeline_job_path():
expected = {
"project": "clam",
"location": "whelk",
"pipeline_job": "octopus",
}
path = PipelineServiceClient.pipeline_job_path(**expected)
# Check that the path construction is reversible.
actual = PipelineServiceClient.parse_pipeline_job_path(path)
assert expected == actual
def test_training_pipeline_path():
project = "oyster"
location = "nudibranch"
training_pipeline = "cuttlefish"
expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(
project=project, location=location, training_pipeline=training_pipeline,
)
actual = PipelineServiceClient.training_pipeline_path(
project, location, training_pipeline
)
assert expected == actual
def test_parse_training_pipeline_path():
expected = {
"project": "mussel",
"location": "winkle",
"training_pipeline": "nautilus",
}
path = PipelineServiceClient.training_pipeline_path(**expected)
# Check that the path construction is reversible.
actual = PipelineServiceClient.parse_training_pipeline_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "scallop"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = PipelineServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "abalone",
}
path = PipelineServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = PipelineServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "squid"
expected = "folders/{folder}".format(folder=folder,)
actual = PipelineServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "clam",
}
path = PipelineServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = PipelineServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "whelk"
expected = "organizations/{organization}".format(organization=organization,)
actual = PipelineServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "octopus",
}
path = PipelineServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = PipelineServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "oyster"
expected = "projects/{project}".format(project=project,)
actual = PipelineServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nudibranch",
}
path = PipelineServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = PipelineServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "cuttlefish"
location = "mussel"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = PipelineServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "winkle",
"location": "nautilus",
}
path = PipelineServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = PipelineServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.PipelineServiceTransport, "_prep_wrapped_messages"
) as prep:
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.PipelineServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = PipelineServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = PipelineServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = PipelineServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(PipelineServiceClient, transports.PipelineServiceGrpcTransport),
(PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| 37.922337 | 121 | 0.690348 |
54fde86faecd4cf5ef8ce222076bfffd944b657b | 12,631 | py | Python | pythontools/sockets/server.py | CrawlerCode/PythonTools | 927893b80dd803eb73edc98d905191d90eb1aede | [
"MIT"
] | null | null | null | pythontools/sockets/server.py | CrawlerCode/PythonTools | 927893b80dd803eb73edc98d905191d90eb1aede | [
"MIT"
] | null | null | null | pythontools/sockets/server.py | CrawlerCode/PythonTools | 927893b80dd803eb73edc98d905191d90eb1aede | [
"MIT"
] | null | null | null | from pythontools.core import logger, events
import socket, json, base64, traceback, math
from threading import Thread
from pythontools.dev import crypthography, dev
class Server:
def __init__(self, password):
self.serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.password = password
self.clientSocks = []
self.clients = []
self.seq = base64.b64encode(self.password.encode('ascii')).decode("utf-8")
self.packagePrintBlacklist = []
self.packagePrintBlacklist.append("ALIVE")
self.packagePrintBlacklist.append("ALIVE_OK")
self.maxClients = 10
self.printUnsignedData = True
self.uploadError = False
self.eventScope = "global"
self.enabled_encrypt = False
self.secret_key = b''
self.enabled_whitelist_mac = False
self.enabled_whitelist_ip = False
self.whitelisted_ips = []
self.whitelisted_macs = []
def enableEncrypt(self, secret_key):
self.enabled_encrypt = True
if type(secret_key) == str: secret_key = bytes(secret_key, "utf-8")
if type(secret_key) != bytes: secret_key = b''
self.secret_key = secret_key
def enableWhitelistIp(self, ips:list):
self.enabled_whitelist_ip = True
self.whitelisted_ips = ips
def enableWhitelistMac(self, macs:list):
self.enabled_whitelist_mac = True
self.whitelisted_macs = macs
def start(self, host, port):
if self.enabled_encrypt is True:
if self.secret_key == b'':
self.secret_key = crypthography.generateSecretKey()
logger.log("§8[§eSERVER§8] §aSecret-Key generated: " + self.secret_key.decode("utf-8"))
return
logger.log("§8[§eSERVER§8] §6Starting...")
try:
self.serverSocket.bind((host, port))
self.serverSocket.listen(self.maxClients)
logger.log("§8[§eSERVER§8] §aListening on §6" + str((host, port)))
except Exception as e:
logger.log("§8[§eSERVER§8] §8[§cERROR§8] §cFailed: " + str(e))
return
def clientTask(clientSocket, address):
logger.log("§8[§eSERVER§8] §aClient connected from §6" + str(address))
error = False
if self.enabled_whitelist_ip is True:
if address[0] not in self.whitelisted_ips:
error = True
logger.log("§8[§eSERVER§8] §8[§cWARNING§8] §cIp-Address §6'" + str(address[0]) + "'§c not whitelisted!")
lastData = ""
while error is False:
try:
recvData = clientSocket.recv(32768)
recvData = str(recvData, "utf-8")
if recvData != "":
if not recvData.startswith("{") and (recvData.endswith("}" + self.seq) or (lastData + recvData).endswith("}" + self.seq)):
if lastData != "":
recvData = lastData + recvData
if self.printUnsignedData:
logger.log("§8[§eSERVER§8] §8[§cWARNING§8] §cUnsigned data repaired")
elif not recvData.endswith("}" + self.seq):
lastData += recvData
if self.printUnsignedData:
logger.log("§8[§eSERVER§8] §8[§cWARNING§8] §cReceiving unsigned data: §r" + recvData)
continue
if "}" + self.seq + "{" in recvData:
recvDataList = recvData.split("}" + self.seq + "{")
recvData = "["
for i in range(len(recvDataList)):
if self.enabled_encrypt is True:
recvData += crypthography.decrypt(self.secret_key, base64.b64decode((recvDataList[i][1:] if i == 0 else recvDataList[i]).replace("}" + self.seq, "").encode('ascii'))).decode("utf-8")
if i + 1 < len(recvDataList):
recvData += ", "
else:
recvData += recvDataList[i].replace(self.seq, "")
if i + 1 < len(recvDataList):
recvData += "}, {"
recvData += "]"
lastData = ""
elif "}" + self.seq in recvData:
if self.enabled_encrypt is True:
recvData = "[" + crypthography.decrypt(self.secret_key, base64.b64decode(recvData.replace("}" + self.seq, "")[1:].encode('ascii'))).decode("utf-8") + "]"
else:
recvData = "[" + recvData.replace(self.seq, "") + "]"
lastData = ""
try:
recvData = json.loads(recvData)
except:
logger.log("§8[§eSERVER§8] §8[§cWARNING§8] §cReceiving broken data: §r" + str(recvData))
continue
for data in recvData:
if data["METHOD"] == "ALIVE":
self.sendTo(clientSocket, {"METHOD": "ALIVE_OK"})
elif data["METHOD"] == "AUTHENTICATION":
logger.log("§8[§eSERVER§8] §r[IN] " + data["METHOD"])
if data["PASSWORD"] == self.password:
if self.enabled_whitelist_mac is True:
if "MAC" not in data:
error = True
self.sendTo(clientSocket, {"METHOD": "AUTHENTICATION_FAILED"})
logger.log("§8[§eSERVER§8] §8[§cWARNING§8] §cNo MAC are given!")
elif data["MAC"] not in self.whitelisted_macs:
error = True
self.sendTo(clientSocket, {"METHOD": "AUTHENTICATION_FAILED"})
logger.log("§8[§eSERVER§8] §8[§cWARNING§8] §cMAC §6'" + str(data["MAC"]) + "'§c not whitelisted!")
if error is False:
for c in self.clients:
if c["clientID"] == data["CLIENT_ID"]:
self.sendTo(clientSocket, {"METHOD": "AUTHENTICATION_FAILED"})
error = True
break
if error is False:
client = {"clientSocket": clientSocket, "clientID": data["CLIENT_ID"], "clientType": data["CLIENT_TYPE"]}
self.clients.append(client)
self.sendTo(clientSocket, {"METHOD": "AUTHENTICATION_OK"})
logger.log("§8[§eSERVER§8] §aClient '" + data["CLIENT_ID"] + "' authenticated §6('" + data["MAC"] + "')")
events.call("ON_CLIENT_CONNECT", client, scope=self.eventScope)
else:
self.sendTo(clientSocket, {"METHOD": "AUTHENTICATION_FAILED"})
error = True
break
else:
client = self.getClient(clientSocket)
if client is not None:
if data["METHOD"] not in self.packagePrintBlacklist:
logger.log("§8[§eSERVER§8] §r[IN] " + data["METHOD"])
events.call("ON_RECEIVE", client, data, scope=self.eventScope)
else:
logger.log("§8[§eSERVER§8] §8[§cWARNING§8] §cReceiving not authenticated package: §r" + data["METHOD"])
except Exception as e:
if "Connection reset by peer" in str(e) or "Connection timed out" in str(e) or "Bad file descriptor" in str(e): break
if self.uploadError is True:
try:
link = dev.uploadToHastebin(traceback.format_exc())
logger.log("§8[§eSERVER§8] §8[§cWARNING§8] §cException: §4" + str(e) + " §r" + str(link))
except: logger.log("§8[§eSERVER§8] §8[§cWARNING§8] §cException: §4" + str(e))
else: logger.log("§8[§eSERVER§8] §8[§cWARNING§8] §cException: §4" + str(e))
break
p = True
for client in self.clients:
if client["clientSocket"] == clientSocket:
events.call("ON_CLIENT_DISCONNECT", client, scope=self.eventScope)
logger.log("§8[§eSERVER§8] §6Client '" + client["clientID"] + "' disconnected")
self.clients.remove(client)
p = False
try: self.clientSocks.remove(clientSocket)
except: pass
try: clientSocket.close()
except: pass
if p is True: logger.log("§8[§eSERVER§8] §6Client " + str(address) + " disconnected")
while True:
(client, clientAddress) = self.serverSocket.accept()
client.settimeout(120)
self.clientSocks.append(client)
Thread(target=clientTask, args=[client, clientAddress]).start()
def ON_CLIENT_CONNECT(self, function):
events.registerEvent(events.Event("ON_CLIENT_CONNECT", function, scope=self.eventScope))
def ON_CLIENT_DISCONNECT(self, function):
events.registerEvent(events.Event("ON_CLIENT_DISCONNECT", function, scope=self.eventScope))
def ON_RECEIVE(self, function):
events.registerEvent(events.Event("ON_RECEIVE", function, scope=self.eventScope))
def addPackageToPrintBlacklist(self, package):
self.packagePrintBlacklist.append(package)
def getClient(self, clientSocket):
for client in self.clients:
if client["clientSocket"] == clientSocket:
return client
return None
def sendToAll(self, message):
for sSock in self.clientSocks:
self.sendTo(sSock, message)
def sendTo(self, sock, data):
try:
send_data = json.dumps(data)
if self.enabled_encrypt is True:
send_data = "{" + base64.b64encode(crypthography.encrypt(self.secret_key, send_data)).decode('utf-8') + "}"
send_data = bytes(send_data + self.seq, "utf-8")
#if len(send_data) > 65536:
# for i in range(math.ceil(len(send_data)/65536)):
# sock.send(send_data[65536*i:][:65536])
#else:
# sock.send(send_data)
sock.send(send_data)
if data["METHOD"] not in self.packagePrintBlacklist:
logger.log("§8[§eSERVER§8] §r[OUT] " + data["METHOD"])
except Exception as e:
logger.log("§8[§eSERVER§8] §8[§cWARNING§8] §cFailed to send data: " + str(e))
if e == BrokenPipeError or "Broken pipe" in str(e):
p = True
for client in self.clients:
if client["clientSocket"] == sock:
events.call("ON_CLIENT_DISCONNECT", client, scope=self.eventScope)
logger.log("§8[§eSERVER§8] §6Client '" + client["clientID"] + "' disconnected")
self.clients.remove(client)
p = False
try: self.clientSocks.remove(sock)
except: pass
try: sock.close()
except: pass
if p is True: logger.log("§8[§eSERVER§8] §6Client disconnected")
def sendToClientID(self, clientID, data):
for client in self.clients:
if client["clientID"] == clientID:
self.sendTo(clientID["clientSocket"], data)
def close(self):
self.serverSocket.close()
logger.log("§8[§eSERVER§8] §6Closed")
| 54.2103 | 218 | 0.486976 |
e8e320fe532bd4396ce03be775051e20c8723bd1 | 2,076 | py | Python | assignment 7/matrix-machine_learning_lab/triangular.py | dhruvgairola/linearAlgebra-coursera | 20109133b9e53a7a38cbd17d8ca1fa1316bbf0d3 | [
"MIT"
] | 6 | 2015-09-18T02:07:21.000Z | 2020-04-22T17:05:11.000Z | triangular.py | tri2sing/LinearAlgebraPython | f3dde94f02f146089607eb520ebd4467becb5f9e | [
"Apache-2.0"
] | null | null | null | triangular.py | tri2sing/LinearAlgebraPython | f3dde94f02f146089607eb520ebd4467becb5f9e | [
"Apache-2.0"
] | 10 | 2015-09-05T03:54:00.000Z | 2020-04-21T12:56:40.000Z | from vec import Vec
from vecutil import zero_vec
def triangular_solve_n(rowlist, b):
'''
Solves an upper-triangular linear system.
rowlist is a nonempty list of Vecs. Let n = len(rowlist).
The domain D of all these Vecs is {0,1, ..., n-1}.
b is an n-element list or a Vec whose domain is {0,1, ..., n-1}.
The linear equations are:
rowlist[0] * x = b[0]
...
rowlist[n-1] * x = b[n-1]
The system is triangular. That means rowlist[i][j] is zero
for all i, j in {0,1, ..., n-1} such that i >j.
This procedure assumes that rowlist[j][j] != 0 for j=0,1, ..., n-1.
The procedure returns the Vec x that is the unique solution
to the linear system.
'''
D = rowlist[0].D
n = len(D)
assert D == set(range(n))
x = zero_vec(D)
for j in reversed(range(n)):
x[j] = (b[j] - rowlist[j] * x)/rowlist[j][j]
return x
def triangular_solve(rowlist, label_list, b):
'''
Solves an upper-triangular linear system.
rowlist is a nonempty list of Vecs. Let n = len(rowlist).
b is an n-element list or a Vec over domain {0,1, ..., n-1}.
The linear equations are:
rowlist[0] * x = b[0]
...
rowlist[n-1] * x = b[n-1]
label_list is a list consisting of the elements of D,
where D is the domain of each of the vectors in rowlist.
The system is triangular with respect to the ordering given
by label_list. That means rowlist[n-1][d] is zero for
every element d of D except for the last element of label_list,
rowlist[n-2][d] is zero for every element d of D except for
the last two elements of label_list, and so on.
This procedure assumes that rowlist[j][label_list[j]] != 0
for j = 0,1, ..., n-1.
The procedure returns the Vec x that is the unique solution
to the linear system.
'''
D = rowlist[0].D
x = zero_vec(D)
for j in reversed(range(len(D))):
c = label_list[j]
row = rowlist[j]
x[c] = (b[j] - x*row)/row[c]
return x
| 34.6 | 71 | 0.595857 |
95a730d9baab4f457b7bbc10d9a23dea10c98802 | 3,751 | py | Python | main/binanceapi/binance_interface.py | 4RCAN3/CryptoTrader | df1f88552489fd85627bb2cd7c7f6e0ba4665ade | [
"MIT"
] | null | null | null | main/binanceapi/binance_interface.py | 4RCAN3/CryptoTrader | df1f88552489fd85627bb2cd7c7f6e0ba4665ade | [
"MIT"
] | null | null | null | main/binanceapi/binance_interface.py | 4RCAN3/CryptoTrader | df1f88552489fd85627bb2cd7c7f6e0ba4665ade | [
"MIT"
] | null | null | null | import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from tkinter import *
from config import Binance, API_LIST
from binance_script import Work
obj = Binance()
class BinanceAPI(Tk):
def __init__(self):
Tk.__init__(self)
l = Label(self, image = PhotoImage("binwp.png"))
l.place(x=0, y=0)
self.apichoice = IntVar()
self.api_key = StringVar()
self.secret_key = StringVar()
self.optionchoice = IntVar()
self.dropchoice = StringVar()
self.api_input_page()
def api_input_page(self):
apiframe = Frame(self)
l = Label(apiframe, image = PhotoImage("binwp.png"))
l.place(x = 0, y = 0, relwidth=1, relheight=1)
e1 = StringVar()
e2 = StringVar()
def submit():
self.api_key = e1.get()
self.secret_key = e2.get()
try:
w = Work(self.api_key, self.secret_key)
except:
self.api_input_page()
l1 = Label(apiframe, height = 1, relief = "groove", text = "API KEY", font = ("helvetica", 10, "bold"))
l1.grid(row = 0, sticky = "NESW", padx = 5)
l2 = Label(apiframe, height = 1, relief = "groove", text = "SECRET KEY", font = ("helvetica", 10, "bold"))
l2.grid(row = 1, sticky = "NESW", padx = 5)
e1 = Entry(apiframe, textvariable = self.api_key, bd = 2)
e1.grid(row = 0, column = 2, padx = 5)
e2 = Entry(apiframe, textvariable = self.secret_key, bd = 2, show = "*")
e2.grid(row = 1, column = 2, padx = 5)
b = Button(apiframe, text = "Submit", command = lambda: [submit(), apiframe.destroy(), self.geometry("700x500"), self.options_menu()])
b.grid(row = 2, column = 1)
apiframe.grid(row = 0, column = 0, sticky = "NESW")
apiframe.grid_rowconfigure(0, weight=1)
apiframe.grid_columnconfigure(0, weight=1)
def options_menu(self):
optsmenu = Frame(self)
var = IntVar()
var1 = StringVar()
var1.set("Choose")
def check_radio():
self.optionchoice = var.get()
print(self.optionchoice)
def check_drop():
self.dropchoice = (var1.get())
print(self.dropchoice)
OPTIONS = API_LIST
l = Label(optsmenu, text = "Choose a symbol you want to perform the following functions for:", relief = "groove", font = ("helvetica", 12, "bold"))
l.grid(row = 0, sticky = W)
w = OptionMenu(optsmenu, var1, *OPTIONS)
w.grid(row = 1, pady = 5)
l1 = Label(optsmenu, text = "Choose any of the following options for your selected symbol:", relief = "groove", font = ("helvetica", 12, "bold"))
l1.grid(row = 2, sticky = W)
o1 = Radiobutton(optsmenu, text = "Enquire about a specific cryptocurrency", variable=var, value = 1, command = check_radio, anchor="w")
o1.grid(row = 3, sticky = W)
o2 = Radiobutton(optsmenu, text = "Enquire about your account balance", variable = var, value = 2, command = check_radio, anchor = "w")
o2.grid(row = 4, sticky = W)
o3 = Radiobutton(optsmenu, text = "Plot a graph and analyse the historical data of the chosen symbol", variable = var, value = 3, command = check_radio, anchor = "w")
o3.grid(row = 5, sticky = W)
b = Button(optsmenu, text = "Next", command = lambda: [check_drop(), optsmenu.destroy()], anchor = "center")
b.grid(row = 7)
optsmenu.grid(row = 0, column = 0, sticky = "NESW")
optsmenu.grid_rowconfigure(0, weight=1)
optsmenu.grid_columnconfigure(0, weight=1) | 38.670103 | 174 | 0.592642 |
aeccc9c9599e0cc21b024a729d89193620d3dbe2 | 7,248 | py | Python | numpyro/distributions/directional.py | ahoho/numpyro | 64e94e346c51a6c0c1ba51aa7b608e73513f158f | [
"Apache-2.0"
] | null | null | null | numpyro/distributions/directional.py | ahoho/numpyro | 64e94e346c51a6c0c1ba51aa7b608e73513f158f | [
"Apache-2.0"
] | null | null | null | numpyro/distributions/directional.py | ahoho/numpyro | 64e94e346c51a6c0c1ba51aa7b608e73513f158f | [
"Apache-2.0"
] | null | null | null | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import math
from jax import lax
import jax.numpy as jnp
import jax.random as random
from jax.scipy.special import erf, i0e, i1e
from numpyro.distributions import constraints
from numpyro.distributions.distribution import Distribution
from numpyro.distributions.util import is_prng_key, promote_shapes, safe_normalize, validate_sample, von_mises_centered
class VonMises(Distribution):
arg_constraints = {'loc': constraints.real, 'concentration': constraints.positive}
reparametrized_params = ['loc']
support = constraints.interval(-math.pi, math.pi)
def __init__(self, loc, concentration, validate_args=None):
""" von Mises distribution for sampling directions.
:param loc: center of distribution
:param concentration: concentration of distribution
"""
self.loc, self.concentration = promote_shapes(loc, concentration)
batch_shape = lax.broadcast_shapes(jnp.shape(concentration), jnp.shape(loc))
super(VonMises, self).__init__(batch_shape=batch_shape,
validate_args=validate_args)
def sample(self, key, sample_shape=()):
""" Generate sample from von Mises distribution
:param key: random number generator key
:param sample_shape: shape of samples
:return: samples from von Mises
"""
assert is_prng_key(key)
samples = von_mises_centered(key, self.concentration, sample_shape + self.shape())
samples = samples + self.loc # VM(0, concentration) -> VM(loc,concentration)
samples = (samples + jnp.pi) % (2. * jnp.pi) - jnp.pi
return samples
@validate_sample
def log_prob(self, value):
return -(jnp.log(2 * jnp.pi) + jnp.log(i0e(self.concentration))) + \
self.concentration * (jnp.cos((value - self.loc) % (2 * jnp.pi)) - 1)
@property
def mean(self):
""" Computes circular mean of distribution. NOTE: same as location when mapped to support [-pi, pi] """
return jnp.broadcast_to((self.loc + jnp.pi) % (2. * jnp.pi) - jnp.pi, self.batch_shape)
@property
def variance(self):
""" Computes circular variance of distribution """
return jnp.broadcast_to(1. - i1e(self.concentration) / i0e(self.concentration),
self.batch_shape)
class ProjectedNormal(Distribution):
"""
Projected isotropic normal distribution of arbitrary dimension.
This distribution over directional data is qualitatively similar to the von
Mises and von Mises-Fisher distributions, but permits tractable variational
inference via reparametrized gradients.
To use this distribution with autoguides and HMC, use ``handlers.reparam``
with a :class:`~numpyro.infer.reparam.ProjectedNormalReparam`
reparametrizer in the model, e.g.::
@handlers.reparam(config={"direction": ProjectedNormalReparam()})
def model():
direction = numpyro.sample("direction",
ProjectedNormal(zeros(3)))
...
.. note:: This implements :meth:`log_prob` only for dimensions {2,3}.
[1] D. Hernandez-Stumpfhauser, F.J. Breidt, M.J. van der Woerd (2017)
"The General Projected Normal Distribution of Arbitrary Dimension:
Modeling and Bayesian Inference"
https://projecteuclid.org/euclid.ba/1453211962
"""
arg_constraints = {"concentration": constraints.real_vector}
reparametrized_params = ["concentration"]
support = constraints.sphere
def __init__(self, concentration, *, validate_args=None):
assert jnp.ndim(concentration) >= 1
self.concentration = concentration
batch_shape = concentration.shape[:-1]
event_shape = concentration.shape[-1:]
super().__init__(batch_shape, event_shape, validate_args=validate_args)
@property
def mean(self):
"""
Note this is the mean in the sense of a centroid in the submanifold
that minimizes expected squared geodesic distance.
"""
return safe_normalize(self.concentration)
@property
def mode(self):
return safe_normalize(self.concentration)
def sample(self, key, sample_shape=()):
shape = sample_shape + self.batch_shape + self.event_shape
eps = random.normal(key, shape=shape)
return safe_normalize(self.concentration + eps)
def log_prob(self, value):
if self._validate_args:
event_shape = value.shape[-1:]
if event_shape != self.event_shape:
raise ValueError(f"Expected event shape {self.event_shape}, "
f"but got {event_shape}")
self._validate_sample(value)
dim = int(self.concentration.shape[-1])
if dim == 2:
return _projected_normal_log_prob_2(self.concentration, value)
if dim == 3:
return _projected_normal_log_prob_3(self.concentration, value)
raise NotImplementedError(
f"ProjectedNormal.log_prob() is not implemented for dim = {dim}. "
"Consider using handlers.reparam with ProjectedNormalReparam."
)
@staticmethod
def infer_shapes(concentration):
batch_shape = concentration[:-1]
event_shape = concentration[-1:]
return batch_shape, event_shape
def _projected_normal_log_prob_2(concentration, value):
def _dot(x, y):
return (x[..., None, :] @ y[..., None])[..., 0, 0]
# We integrate along a ray, factorizing the integrand as a product of:
# a truncated normal distribution over coordinate t parallel to the ray, and
# a univariate normal distribution over coordinate r perpendicular to the ray.
t = _dot(concentration, value)
t2 = t * t
r2 = _dot(concentration, concentration) - t2
perp_part = (-0.5) * r2 - 0.5 * math.log(2 * math.pi)
# This is the log of a definite integral, computed by mathematica:
# Integrate[x/(E^((x-t)^2/2) Sqrt[2 Pi]), {x, 0, Infinity}]
# = (t + Sqrt[2/Pi]/E^(t^2/2) + t Erf[t/Sqrt[2]])/2
para_part = jnp.log((jnp.exp((-0.5) * t2) * ((2 / math.pi) ** 0.5)
+ t * (1 + erf(t * 0.5 ** 0.5))) / 2)
return para_part + perp_part
def _projected_normal_log_prob_3(concentration, value):
def _dot(x, y):
return (x[..., None, :] @ y[..., None])[..., 0, 0]
# We integrate along a ray, factorizing the integrand as a product of:
# a truncated normal distribution over coordinate t parallel to the ray, and
# a bivariate normal distribution over coordinate r perpendicular to the ray.
t = _dot(concentration, value)
t2 = t * t
r2 = _dot(concentration, concentration) - t2
perp_part = (-0.5) * r2 - math.log(2 * math.pi)
# This is the log of a definite integral, computed by mathematica:
# Integrate[x^2/(E^((x-t)^2/2) Sqrt[2 Pi]), {x, 0, Infinity}]
# = t/(E^(t^2/2) Sqrt[2 Pi]) + ((1 + t^2) (1 + Erf[t/Sqrt[2]]))/2
para_part = jnp.log(t * jnp.exp((-0.5) * t2) / (2 * math.pi) ** 0.5
+ (1 + t2) * (1 + erf(t * 0.5 ** 0.5)) / 2)
return para_part + perp_part
| 39.824176 | 119 | 0.642936 |
78f1e26c617296b226b4e1b2302633a597f969ff | 86 | py | Python | src/_version.py | Eastern-Skill7173/SharedSnippets | 399c9d61a938788e229bc6e6f6dcd38bf5ad9c0e | [
"MIT"
] | 1 | 2022-03-18T20:29:40.000Z | 2022-03-18T20:29:40.000Z | src/_version.py | Eastern-Skill7173/SharedSnippets | 399c9d61a938788e229bc6e6f6dcd38bf5ad9c0e | [
"MIT"
] | null | null | null | src/_version.py | Eastern-Skill7173/SharedSnippets | 399c9d61a938788e229bc6e6f6dcd38bf5ad9c0e | [
"MIT"
] | null | null | null | from typing import Final
__version__: Final = "0.0.1"
__date__: Final = "2022-04-09"
| 17.2 | 30 | 0.709302 |
ed3589a865baf1c80d2914ab8d8756c340139773 | 6,485 | py | Python | pythonx/ncm2.py | roxma/cm2 | 24d77cd5206438a6670ff1cf8d1628532910e14e | [
"MIT"
] | 1,448 | 2018-06-29T02:29:20.000Z | 2022-03-29T04:16:43.000Z | pythonx/ncm2.py | roxma/cm2 | 24d77cd5206438a6670ff1cf8d1628532910e14e | [
"MIT"
] | 188 | 2018-06-29T06:30:58.000Z | 2022-03-17T11:00:57.000Z | pythonx/ncm2.py | roxma/cm2 | 24d77cd5206438a6670ff1cf8d1628532910e14e | [
"MIT"
] | 55 | 2018-07-11T13:54:21.000Z | 2022-03-16T23:32:11.000Z | import sys
import os
from importlib import import_module
import logging
import platform
import subprocess
from subprocess import Popen
from os import path
import unicodedata
from copy import deepcopy
import json
import time
__all__ = ['Ncm2Base', 'Ncm2Source', 'Popen']
if platform.system() == 'Windows':
cls = Popen
# redefine popen
class Popen(cls):
def __init__(self, *args, **keys):
if 'startupinfo' not in keys:
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
keys['startupinfo'] = si
cls.__init__(self, *args, **keys)
def getLogger(name):
def get_loglevel():
# logging setup
level = logging.INFO
if 'NVIM_PYTHON_LOG_LEVEL' in os.environ:
l = getattr(logging,
os.environ['NVIM_PYTHON_LOG_LEVEL'].strip(),
level)
if isinstance(l, int):
level = l
if 'NVIM_NCM2_LOG_LEVEL' in os.environ:
l = getattr(logging,
os.environ['NVIM_NCM2_LOG_LEVEL'].strip(),
level)
if isinstance(l, int):
level = l
return level
logger = logging.getLogger(__name__)
logger.setLevel(get_loglevel())
return logger
logger = getLogger(__name__)
def matcher_get(context, opt=None):
if 'matcher' in context:
if opt is None:
opt = context['matcher']
elif opt is None:
# FIXME This is only for backword compability
opt = context
context = {}
name = opt['name']
modname = 'ncm2_matcher.' + name
mod = import_module(modname)
# Some matchers, e.g. equal matcher, need to disable the incremental
# match feature. It needs a way to set inc_match=0. This is why we need
# to pass context to the matcher.
m = mod.Matcher(context=context, **opt)
return m
def matcher_opt_formalize(opt):
if type(opt) is str:
return dict(name=opt)
return deepcopy(opt)
class Ncm2Base:
def __init__(self, nvim):
self.nvim = nvim
def matcher_opt_formalize(self, opt):
return matcher_opt_formalize(opt)
def matcher_get(self, context):
return matcher_get(context)
def match_formalize(self, ctx, item):
e = {}
if type(item) is str:
e['word'] = item
else:
e = deepcopy(item)
e['icase'] = 1
e['equal'] = 1
if 'menu' not in e or type(e['menu']) != str:
e['menu'] = ''
if 'info' not in e or type(e['info']) != str:
e['info'] = ''
if 'abbr' not in e or type(e['abbr']) != str:
e['abbr'] = e['word']
if 'kind' not in e or type(e['kind']) != str:
e['kind'] = ''
# LanguageClient-neovim sends json-encoded user_data
if type(e.get('user_data', None)) is str:
try:
e['user_data'] = json.loads(item['user_data'])
except:
pass
if 'user_data' not in e or type(e['user_data']) != dict:
e['user_data'] = {}
ud = e['user_data']
ud['source'] = ctx['source']
ud['ncm2'] = 1
return e
def matches_formalize(self, ctx, matches):
formalized = []
for e in matches:
formalized.append(self.match_formalize(ctx, e))
return formalized
def lccol2pos(self, lnum, ccol, src):
"""
convert lnum, ccol into pos
"""
lines = src.splitlines() or [""]
pos = 0
for i in range(lnum - 1):
pos += len(lines[i]) + 1
pos += ccol - 1
return pos
def pos2lccol(self, pos, src):
"""
convert pos into lnum, ccol
"""
lines = src.splitlines() or [""]
p = 0
for idx, line in enumerate(lines):
if p <= pos and p + len(line) >= pos:
return (idx + 1, pos - p + 1)
p += len(line) + 1
def get_src(self, src, ctx):
"""
Get the source code of current scope identified by the ctx object.
"""
bufnr = ctx['bufnr']
changedtick = ctx['changedtick']
scope_offset = ctx.get('scope_offset', 0)
scope_len = ctx.get('scope_len', len(src))
return src[scope_offset: scope_offset + scope_len]
def update_rtp(self, rtp):
for ele in rtp.split(','):
pyx = path.join(ele, 'pythonx')
if pyx not in sys.path:
sys.path.append(pyx)
py3 = path.join(ele, 'python3')
if py3 not in sys.path:
sys.path.append(py3)
def strdisplaywidth(self, s):
def get_char_display_width(unicode_str):
r = unicodedata.east_asian_width(unicode_str)
if r == "F": # Fullwidth
return 1
elif r == "H": # Half-width
return 1
elif r == "W": # Wide
return 2
elif r == "Na": # Narrow
return 1
elif r == "A": # Ambiguous, go with 2
return 1
elif r == "N": # Neutral
return 1
else:
return 1
s = unicodedata.normalize('NFC', s)
w = 0
for c in s:
w += get_char_display_width(c)
return w
class Ncm2Source(Ncm2Base):
def __init__(self, nvim):
Ncm2Base.__init__(self, nvim)
# add lazy_check_context to on_complete method
on_complete_impl = self.on_complete
def on_complete(context, *args):
if not self.lazy_check_context(context):
logger.info('on_complete lazy_check_context failed')
return
on_complete_impl(context, *args)
self.on_complete = on_complete
logger.debug('on_complete is wrapped')
def lazy_check_context(self, context):
if context.get('dated', 0):
return False
# only checks when we receives a context that seems old
now = time.time()
if now >= context['time'] + 0.5:
return not self.nvim.call('ncm2#complete_context_dated', context)
else:
return True
def complete(self, ctx, startccol, matches, refresh=False):
self.nvim.call('ncm2#complete', ctx, startccol,
matches, refresh, async_=True)
| 29.343891 | 77 | 0.537702 |
18c8944509a5fea2d53f20be05ca278db2b0f884 | 7,973 | py | Python | docsrc/conf.py | kaschbacher/bac | 858ebe1bcd61dd6a73431bb651d1b3efa447a8c6 | [
"RSA-MD"
] | 1 | 2021-02-14T04:11:58.000Z | 2021-02-14T04:11:58.000Z | docsrc/conf.py | kaschbacher/bac | 858ebe1bcd61dd6a73431bb651d1b3efa447a8c6 | [
"RSA-MD"
] | 1 | 2021-07-12T10:37:55.000Z | 2021-07-12T10:37:55.000Z | docsrc/conf.py | kaschbacher/bac | 858ebe1bcd61dd6a73431bb651d1b3efa447a8c6 | [
"RSA-MD"
] | null | null | null | # -*- coding: utf-8 -*-
#
# bac documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.coverage", "sphinx.ext.napoleon", "sphinx.ext.mathjax"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "bac"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "nature"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "bacdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"bac.tex",
"bac Documentation",
"Kirstin Aschbacher",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"bac",
"bac Documentation",
["Kirstin Aschbacher"],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"bac",
"bac Documentation",
"Kirstin Aschbacher",
"bac",
"Machine Learning Prediction of High BAC Levels with Smartbreathlyzer Data",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.023346 | 103 | 0.695598 |
00bb4ab2f8969b133c74b6bed6d3f54a256ea2bd | 842 | py | Python | Masters/urls.py | adithyanps/netprofit-django | 7ba87f054d09a201352635bb6cf8d0112208609e | [
"MIT"
] | null | null | null | Masters/urls.py | adithyanps/netprofit-django | 7ba87f054d09a201352635bb6cf8d0112208609e | [
"MIT"
] | null | null | null | Masters/urls.py | adithyanps/netprofit-django | 7ba87f054d09a201352635bb6cf8d0112208609e | [
"MIT"
] | null | null | null | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import (
PartnerViewSet,
BranchViewset,
AreaViewSet,
ProductViewset,
ProductCategoryViewSet,
AccountViewset,
AccountDefaultViewSet,
SerialNumberViewSet
)
router = DefaultRouter()
router.register('partner', PartnerViewSet)
router.register('branch', BranchViewset)
router.register('area', AreaViewSet)
router.register('product-category', ProductCategoryViewSet)
router.register('product', ProductViewset)
router.register('account', AccountViewset)
router.register('accountDefault', AccountDefaultViewSet)
router.register('serial-number', SerialNumberViewSet)
app_name = 'masters'
urlpatterns = [
path('',include(router.urls)),
]
| 28.066667 | 59 | 0.707838 |
f3026111cdcbfe252742f0b23389d194587a2c95 | 1,872 | py | Python | contrib/PyTorch/Official/cv/image_classification/Xception_ID1777_for_PyTorch/pthtar2onx.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | contrib/PyTorch/Official/cv/image_classification/Xception_ID1777_for_PyTorch/pthtar2onx.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 1 | 2022-01-20T03:11:05.000Z | 2022-01-20T06:53:39.000Z | contrib/PyTorch/Official/cv/image_classification/Xception_ID1777_for_PyTorch/pthtar2onx.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import torch
from xception import xception
import torch.onnx
import argparse
from collections import OrderedDict
parser = argparse.ArgumentParser(description='xception')
parser.add_argument('--model-path', default='', type=str, metavar='PATH',
help='model path')
def proc_node_module(checkpoint, AttrName):
new_state_dict = OrderedDict()
for k, v in checkpoint[AttrName].items():
if(k[0:7] == "module."):
name = k[7:]
else:
name = k[0:]
new_state_dict[name] = v
return new_state_dict
def convert():
args = parser.parse_args()
model_path = args.model_path
checkpoint = torch.load(model_path, map_location='cpu')
checkpoint['state_dict'] = proc_node_module(checkpoint, 'state_dict')
model = xception()
model.load_state_dict(checkpoint['state_dict'])
model.eval()
print(model)
input_names = ["actual_input_1"]
output_names = ["output1"]
dummy_input = torch.randn(16, 3, 299, 299)
torch.onnx.export(model, dummy_input, "xception_npu_16.onnx", input_names=input_names, output_names=output_names,
opset_version=11)
if __name__ == "__main__":
convert()
| 35.320755 | 117 | 0.67094 |
85efcbb8ef82ce1c7765e3da0806725c00faefed | 136 | py | Python | tests/unit/helpers_test.py | sebastianbasierski/dvdd | ae71addf512fb06e0ce06149970997fea804c0e4 | [
"BSD-3-Clause"
] | 1 | 2019-07-09T07:29:00.000Z | 2019-07-09T07:29:00.000Z | tests/unit/helpers_test.py | sebastianbasierski/dvdd | ae71addf512fb06e0ce06149970997fea804c0e4 | [
"BSD-3-Clause"
] | 21 | 2019-07-09T19:44:33.000Z | 2019-07-17T16:40:56.000Z | tests/unit/helpers_test.py | sebastianbasierski/dvdd | ae71addf512fb06e0ce06149970997fea804c0e4 | [
"BSD-3-Clause"
] | 1 | 2019-07-10T09:25:41.000Z | 2019-07-10T09:25:41.000Z | from libs.helpers import get_platform
def test_get_platform():
p = get_platform()
assert p is not None
# assert p == "arm"
| 17 | 37 | 0.676471 |
e88f3e5da8cd66a2dff279694d72574f01ec3ea4 | 22,156 | py | Python | nox/sessions.py | ajcerejeira/nox | 4deea887faf00634bd87d6e594c8eca4d8612991 | [
"Apache-2.0"
] | null | null | null | nox/sessions.py | ajcerejeira/nox | 4deea887faf00634bd87d6e594c8eca4d8612991 | [
"Apache-2.0"
] | 1 | 2021-07-24T15:38:33.000Z | 2021-07-24T15:38:33.000Z | nox/sessions.py | sitedata/nox | 00775b470129787b14c45f2853441f9f1cad006d | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Alethea Katherine Flowers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import enum
import hashlib
import os
import re
import sys
import unicodedata
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Union,
)
import nox.command
import py
from nox import _typing
from nox._decorators import Func
from nox.logger import logger
from nox.virtualenv import CondaEnv, PassthroughEnv, ProcessEnv, VirtualEnv
if _typing.TYPE_CHECKING:
from nox.manifest import Manifest
def _normalize_path(envdir: str, path: Union[str, bytes]) -> str:
"""Normalizes a string to be a "safe" filesystem path for a virtualenv."""
if isinstance(path, bytes):
path = path.decode("utf-8")
path = unicodedata.normalize("NFKD", path).encode("ascii", "ignore")
path = path.decode("ascii")
path = re.sub(r"[^\w\s-]", "-", path).strip().lower()
path = re.sub(r"[-\s]+", "-", path)
path = path.strip("-")
full_path = os.path.join(envdir, path)
if len(full_path) > 100 - len("bin/pythonX.Y"):
if len(envdir) < 100 - 9:
path = hashlib.sha1(path.encode("ascii")).hexdigest()[:8]
full_path = os.path.join(envdir, path)
logger.warning("The virtualenv name was hashed to avoid being too long.")
else:
logger.error(
"The virtualenv path {} is too long and will cause issues on "
"some environments. Use the --envdir path to modify where "
"nox stores virtualenvs.".format(full_path)
)
return full_path
def _dblquote_pkg_install_args(args: Tuple[str, ...]) -> Tuple[str, ...]:
"""Double-quote package install arguments in case they contain '>' or '<' symbols"""
# routine used to handle a single arg
def _dblquote_pkg_install_arg(pkg_req_str: str) -> str:
# sanity check: we need an even number of double-quotes
if pkg_req_str.count('"') % 2 != 0:
raise ValueError(
"ill-formated argument with odd number of quotes: %s" % pkg_req_str
)
if "<" in pkg_req_str or ">" in pkg_req_str:
if pkg_req_str[0] == '"' and pkg_req_str[-1] == '"':
# already double-quoted string
return pkg_req_str
else:
# need to double-quote string
if '"' in pkg_req_str:
raise ValueError(
"Cannot escape requirement string: %s" % pkg_req_str
)
return '"%s"' % pkg_req_str
else:
# no dangerous char: no need to double-quote string
return pkg_req_str
# double-quote all args that need to be and return the result
return tuple(_dblquote_pkg_install_arg(a) for a in args)
class _SessionQuit(Exception):
pass
class _SessionSkip(Exception):
pass
class Status(enum.Enum):
ABORTED = -1
FAILED = 0
SUCCESS = 1
SKIPPED = 2
class Session:
"""The Session object is passed into each user-defined session function.
This is your primary means for installing package and running commands in
your Nox session.
"""
__slots__ = ("_runner",)
def __init__(self, runner: "SessionRunner") -> None:
self._runner = runner
@property
def __dict__(self) -> "Dict[str, SessionRunner]": # type: ignore
"""Attribute dictionary for object inspection.
This is needed because ``__slots__`` turns off ``__dict__`` by
default. Unlike a typical object, modifying the result of this
dictionary won't allow modification of the instance.
"""
return {"_runner": self._runner}
@property
def env(self) -> dict:
"""A dictionary of environment variables to pass into all commands."""
return self.virtualenv.env
@property
def posargs(self) -> List[str]:
"""This is set to any extra arguments
passed to ``nox`` on the commandline."""
return self._runner.global_config.posargs
@property
def virtualenv(self) -> ProcessEnv:
"""The virtualenv that all commands are run in."""
venv = self._runner.venv
if venv is None:
raise ValueError("A virtualenv has not been created for this session")
return venv
@property
def python(self) -> Optional[Union[str, Sequence[str], bool]]:
"""The python version passed into ``@nox.session``."""
return self._runner.func.python
@property
def bin_paths(self) -> Optional[List[str]]:
"""The bin directories for the virtualenv."""
return self.virtualenv.bin_paths
@property
def bin(self) -> Optional[str]:
"""The first bin directory for the virtualenv."""
paths = self.bin_paths
return paths[0] if paths is not None else None
def create_tmp(self) -> str:
"""Create, and return, a temporary directory."""
tmpdir = os.path.join(self._runner.envdir, "tmp")
os.makedirs(tmpdir, exist_ok=True)
self.env["TMPDIR"] = tmpdir
return tmpdir
@property
def interactive(self) -> bool:
"""Returns True if Nox is being run in an interactive session or False otherwise."""
return not self._runner.global_config.non_interactive and sys.stdin.isatty()
def chdir(self, dir: str) -> None:
"""Change the current working directory."""
self.log("cd {}".format(dir))
os.chdir(dir)
cd = chdir
"""An alias for :meth:`chdir`."""
def _run_func(
self, func: Callable, args: Iterable[Any], kwargs: Mapping[str, Any]
) -> Any:
"""Legacy support for running a function through :func`run`."""
self.log("{}(args={!r}, kwargs={!r})".format(func, args, kwargs))
try:
return func(*args, **kwargs)
except Exception as e:
logger.exception("Function {!r} raised {!r}.".format(func, e))
raise nox.command.CommandFailed()
def run(
self, *args: str, env: Mapping[str, str] = None, **kwargs: Any
) -> Optional[Any]:
"""Run a command.
Commands must be specified as a list of strings, for example::
session.run('pytest', '-k', 'fast', 'tests/')
session.run('flake8', '--import-order-style=google')
You **can not** just pass everything as one string. For example, this
**will not work**::
session.run('pytest -k fast tests/')
You can set environment variables for the command using ``env``::
session.run(
'bash', '-c', 'echo $SOME_ENV',
env={'SOME_ENV': 'Hello'})
You can also tell nox to treat non-zero exit codes as success using
``success_codes``. For example, if you wanted to treat the ``pytest``
"tests discovered, but none selected" error as success::
session.run(
'pytest', '-k', 'not slow',
success_codes=[0, 5])
On Windows, builtin commands like ``del`` cannot be directly invoked,
but you can use ``cmd /c`` to invoke them::
session.run('cmd', '/c', 'del', 'docs/modules.rst')
:param env: A dictionary of environment variables to expose to the
command. By default, all environment variables are passed.
:type env: dict or None
:param bool silent: Silence command output, unless the command fails.
``False`` by default.
:param success_codes: A list of return codes that are considered
successful. By default, only ``0`` is considered success.
:type success_codes: list, tuple, or None
:param external: If False (the default) then programs not in the
virtualenv path will cause a warning. If True, no warning will be
emitted. These warnings can be turned into errors using
``--error-on-external-run``. This has no effect for sessions that
do not have a virtualenv.
:type external: bool
"""
if not args:
raise ValueError("At least one argument required to run().")
if self._runner.global_config.install_only:
logger.info("Skipping {} run, as --install-only is set.".format(args[0]))
return None
return self._run(*args, env=env, **kwargs)
def run_always(
self, *args: str, env: Mapping[str, str] = None, **kwargs: Any
) -> Optional[Any]:
"""Run a command **always**.
This is a variant of :meth:`run` that runs in all cases, including in
the presence of ``--install-only``.
:param env: A dictionary of environment variables to expose to the
command. By default, all environment variables are passed.
:type env: dict or None
:param bool silent: Silence command output, unless the command fails.
``False`` by default.
:param success_codes: A list of return codes that are considered
successful. By default, only ``0`` is considered success.
:type success_codes: list, tuple, or None
:param external: If False (the default) then programs not in the
virtualenv path will cause a warning. If True, no warning will be
emitted. These warnings can be turned into errors using
``--error-on-external-run``. This has no effect for sessions that
do not have a virtualenv.
:type external: bool
"""
if not args:
raise ValueError("At least one argument required to run_always().")
return self._run(*args, env=env, **kwargs)
def _run(self, *args: str, env: Mapping[str, str] = None, **kwargs: Any) -> Any:
"""Like run(), except that it runs even if --install-only is provided."""
# Legacy support - run a function given.
if callable(args[0]):
return self._run_func(args[0], args[1:], kwargs)
# Combine the env argument with our virtualenv's env vars.
if env is not None:
overlay_env = env
env = self.env.copy()
env.update(overlay_env)
else:
env = self.env
# If --error-on-external-run is specified, error on external programs.
if self._runner.global_config.error_on_external_run:
kwargs.setdefault("external", "error")
# Allow all external programs when running outside a sandbox.
if not self.virtualenv.is_sandboxed:
kwargs["external"] = True
if args[0] in self.virtualenv.allowed_globals:
kwargs["external"] = True
# Run a shell command.
return nox.command.run(args, env=env, paths=self.bin_paths, **kwargs)
def conda_install(
self, *args: str, auto_offline: bool = True, **kwargs: Any
) -> None:
"""Install invokes `conda install`_ to install packages inside of the
session's environment.
To install packages directly::
session.conda_install('pandas')
session.conda_install('numpy', 'scipy')
session.conda_install('--channel=conda-forge', 'dask==2.1.0')
To install packages from a ``requirements.txt`` file::
session.conda_install('--file', 'requirements.txt')
session.conda_install('--file', 'requirements-dev.txt')
By default this method will detect when internet connection is not
available and will add the `--offline` flag automatically in that case.
To disable this behaviour, set `auto_offline=False`.
To install the current package without clobbering conda-installed
dependencies::
session.install('.', '--no-deps')
# Install in editable mode.
session.install('-e', '.', '--no-deps')
Additional keyword args are the same as for :meth:`run`.
.. _conda install:
"""
venv = self._runner.venv
prefix_args = () # type: Tuple[str, ...]
if isinstance(venv, CondaEnv):
prefix_args = ("--prefix", venv.location)
elif not isinstance(venv, PassthroughEnv): # pragma: no cover
raise ValueError(
"A session without a conda environment can not install dependencies from conda."
)
if not args:
raise ValueError("At least one argument required to install().")
# Escape args that should be (conda-specific; pip install does not need this)
args = _dblquote_pkg_install_args(args)
if "silent" not in kwargs:
kwargs["silent"] = True
extraopts = () # type: Tuple[str, ...]
if auto_offline and venv.is_offline():
logger.warning(
"Automatically setting the `--offline` flag as conda repo seems unreachable."
)
extraopts = ("--offline",)
self._run(
"conda",
"install",
"--yes",
*extraopts,
*prefix_args,
*args,
external="error",
**kwargs
)
def install(self, *args: str, **kwargs: Any) -> None:
"""Install invokes `pip`_ to install packages inside of the session's
virtualenv.
To install packages directly::
session.install('pytest')
session.install('requests', 'mock')
session.install('requests[security]==2.9.1')
To install packages from a ``requirements.txt`` file::
session.install('-r', 'requirements.txt')
session.install('-r', 'requirements-dev.txt')
To install the current package::
session.install('.')
# Install in editable mode.
session.install('-e', '.')
Additional keyword args are the same as for :meth:`run`.
.. _pip: https://pip.readthedocs.org
"""
if not isinstance(
self._runner.venv, (CondaEnv, VirtualEnv, PassthroughEnv)
): # pragma: no cover
raise ValueError(
"A session without a virtualenv can not install dependencies."
)
if not args:
raise ValueError("At least one argument required to install().")
if "silent" not in kwargs:
kwargs["silent"] = True
self._run("pip", "install", *args, external="error", **kwargs)
def notify(self, target: "Union[str, SessionRunner]") -> None:
"""Place the given session at the end of the queue.
This method is idempotent; multiple notifications to the same session
have no effect.
Args:
target (Union[str, Callable]): The session to be notified. This
may be specified as the appropriate string (same as used for
``nox -s``) or using the function object.
"""
self._runner.manifest.notify(target)
def log(self, *args: Any, **kwargs: Any) -> None:
"""Outputs a log during the session."""
logger.info(*args, **kwargs)
def error(self, *args: Any) -> "_typing.NoReturn":
"""Immediately aborts the session and optionally logs an error."""
raise _SessionQuit(*args)
def skip(self, *args: Any) -> "_typing.NoReturn":
"""Immediately skips the session and optionally logs a warning."""
raise _SessionSkip(*args)
class SessionRunner:
def __init__(
self,
name: str,
signatures: List[str],
func: Func,
global_config: argparse.Namespace,
manifest: "Manifest",
) -> None:
self.name = name
self.signatures = signatures
self.func = func
self.global_config = global_config
self.manifest = manifest
self.venv = None # type: Optional[ProcessEnv]
@property
def description(self) -> Optional[str]:
doc = self.func.__doc__
if doc:
first_line = doc.strip().split("\n")[0]
return first_line
return None
def __str__(self) -> str:
sigs = ", ".join(self.signatures)
return "Session(name={}, signatures={})".format(self.name, sigs)
@property
def friendly_name(self) -> str:
return self.signatures[0] if self.signatures else self.name
@property
def envdir(self) -> str:
return _normalize_path(self.global_config.envdir, self.friendly_name)
def _create_venv(self) -> None:
backend = (
self.global_config.force_venv_backend
or self.func.venv_backend
or self.global_config.default_venv_backend
)
if backend == "none" or self.func.python is False:
self.venv = PassthroughEnv()
return
reuse_existing = (
self.func.reuse_venv or self.global_config.reuse_existing_virtualenvs
)
if backend is None or backend == "virtualenv":
self.venv = VirtualEnv(
self.envdir,
interpreter=self.func.python, # type: ignore
reuse_existing=reuse_existing,
venv_params=self.func.venv_params,
)
elif backend == "conda":
self.venv = CondaEnv(
self.envdir,
interpreter=self.func.python, # type: ignore
reuse_existing=reuse_existing,
venv_params=self.func.venv_params,
)
elif backend == "venv":
self.venv = VirtualEnv(
self.envdir,
interpreter=self.func.python, # type: ignore
reuse_existing=reuse_existing,
venv=True,
venv_params=self.func.venv_params,
)
else:
raise ValueError(
"Expected venv_backend one of ('virtualenv', 'conda', 'venv'), but got '{}'.".format(
backend
)
)
self.venv.create()
def execute(self) -> "Result":
logger.warning("Running session {}".format(self.friendly_name))
try:
# By default, nox should quietly change to the directory where
# the noxfile.py file is located.
cwd = py.path.local(
os.path.realpath(os.path.dirname(self.global_config.noxfile))
).as_cwd()
with cwd:
self._create_venv()
session = Session(self)
self.func(session)
# Nothing went wrong; return a success.
return Result(self, Status.SUCCESS)
except nox.virtualenv.InterpreterNotFound as exc:
if self.global_config.error_on_missing_interpreters:
return Result(self, Status.FAILED, reason=str(exc))
else:
return Result(self, Status.SKIPPED, reason=str(exc))
except _SessionQuit as exc:
return Result(self, Status.ABORTED, reason=str(exc))
except _SessionSkip as exc:
return Result(self, Status.SKIPPED, reason=str(exc))
except nox.command.CommandFailed:
return Result(self, Status.FAILED)
except KeyboardInterrupt:
logger.error("Session {} interrupted.".format(self.friendly_name))
raise
except Exception as exc:
logger.exception(
"Session {} raised exception {!r}".format(self.friendly_name, exc)
)
return Result(self, Status.FAILED)
class Result:
"""An object representing the result of a session."""
def __init__(
self, session: SessionRunner, status: Status, reason: Optional[str] = None
) -> None:
"""Initialize the Result object.
Args:
session (~nox.sessions.SessionRunner):
The session runner which ran.
status (~nox.sessions.Status): The final result status.
reason (str): Additional info.
"""
self.session = session
self.status = status
self.reason = reason
def __bool__(self) -> bool:
return self.status.value > 0
def __nonzero__(self) -> bool:
return self.__bool__()
@property
def imperfect(self) -> str:
"""Return the English imperfect tense for the status.
Returns:
str: A word or phrase representing the status.
"""
if self.status == Status.SUCCESS:
return "was successful"
status = self.status.name.lower()
if self.reason:
return "{}: {}".format(status, self.reason)
else:
return status
def log(self, message: str) -> None:
"""Log a message using the appropriate log function.
Args:
message (str): The message to be logged.
"""
log_function = logger.info
if self.status == Status.SUCCESS:
log_function = logger.success
if self.status == Status.SKIPPED:
log_function = logger.warning
if self.status.value <= 0:
log_function = logger.error
log_function(message)
def serialize(self) -> Dict[str, Any]:
"""Return a serialized representation of this result.
Returns:
dict: The serialized result.
"""
return {
"args": getattr(self.session.func, "call_spec", {}),
"name": self.session.name,
"result": self.status.name.lower(),
"result_code": self.status.value,
"signatures": self.session.signatures,
}
| 34.244204 | 101 | 0.590495 |
d66d8a79c10f271268eb7cad3384ff785193e58e | 10,685 | py | Python | lib/datasets/gmu_scene.py | aditya2592/PoseCNN | a763120ce0ceb55cf3432980287ef463728f8052 | [
"MIT"
] | 655 | 2018-03-21T19:55:45.000Z | 2022-03-25T20:41:21.000Z | lib/datasets/gmu_scene.py | SergioRAgostinho/PoseCNN | da9eaae850eed7521a2a48a4d27474d655caab42 | [
"MIT"
] | 122 | 2018-04-04T13:57:49.000Z | 2022-03-18T09:28:44.000Z | lib/datasets/gmu_scene.py | SergioRAgostinho/PoseCNN | da9eaae850eed7521a2a48a4d27474d655caab42 | [
"MIT"
] | 226 | 2018-03-22T01:40:04.000Z | 2022-03-17T11:56:14.000Z | __author__ = 'yuxiang'
import os
import datasets
import datasets.gmu_scene
import datasets.imdb
import cPickle
import numpy as np
import cv2
class gmu_scene(datasets.imdb):
def __init__(self, image_set, gmu_scene_path = None):
datasets.imdb.__init__(self, 'gmu_scene_' + image_set)
self._image_set = image_set
self._gmu_scene_path = self._get_default_path() if gmu_scene_path is None \
else gmu_scene_path
self._data_path = os.path.join(self._gmu_scene_path, 'data')
self._classes = ('__background__', 'coca_cola_glass_bottle', 'coffee_mate_french_vanilla', \
'honey_bunches_of_oats_honey_roasted', 'hunts_sauce', 'mahatma_rice', \
'nature_valley_soft_baked_oatmeal_squares_cinnamon_brown_sugar', 'nature_valley_sweet_and_salty_nut_almond', \
'palmolive_orange', 'pop_secret_light_butter', 'pringles_bbq', 'red_bull')
self._class_colors = [(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1), (1, 1, 0), (1, 0, 1), (0, 1, 1), \
(0.5, 0, 0), (0, 0.5, 0), (0, 0, 0.5), (0.5, 0.5, 0), (0.5, 0, 0.5)]
self._class_weights = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.png'
self._image_index = self._load_image_set_index()
self._roidb_handler = self.gt_roidb
assert os.path.exists(self._gmu_scene_path), \
'gmu_scene path does not exist: {}'.format(self._gmu_scene_path)
assert os.path.exists(self._data_path), \
'Data path does not exist: {}'.format(self._data_path)
# image
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self.image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
# depth
def depth_path_at(self, i):
"""
Return the absolute path to depth i in the image sequence.
"""
return self.depth_path_from_index(self.image_index[i])
def depth_path_from_index(self, index):
"""
Construct an depth path from the image's "index" identifier.
"""
index_depth = index.replace('Images', 'Depths')
index_depth = index_depth.replace('rgb', 'depth')
depth_path = os.path.join(self._data_path, index_depth + self._image_ext)
assert os.path.exists(depth_path), \
'Path does not exist: {}'.format(depth_path)
return depth_path
# label
def label_path_at(self, i):
"""
Return the absolute path to metadata i in the image sequence.
"""
return self.label_path_from_index(self.image_index[i])
def label_path_from_index(self, index):
"""
Construct an metadata path from the image's "index" identifier.
"""
index_label = index.replace('Images', 'Labels')
index_label = index_label.replace('rgb', 'label')
label_path = os.path.join(self._data_path, index_label + self._image_ext)
assert os.path.exists(label_path), \
'Path does not exist: {}'.format(label_path)
return label_path
# camera pose
def metadata_path_at(self, i):
"""
Return the absolute path to metadata i in the image sequence.
"""
return self.metadata_path_from_index(self.image_index[i])
def metadata_path_from_index(self, index):
"""
Construct an metadata path from the image's "index" identifier.
"""
index_meta = index.replace('Images', 'MetaData')
index_meta = index_meta.replace('rgb', 'meta')
metadata_path = os.path.join(self._data_path, index_meta + '.mat')
assert os.path.exists(metadata_path), \
'Path does not exist: {}'.format(metadata_path)
return metadata_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
image_set_file = os.path.join(self._gmu_scene_path, self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.rstrip('\n') for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where KITTI is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data', 'GMUScene')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self._load_gmu_scene_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def _load_gmu_scene_annotation(self, index):
"""
Load class name and meta data
"""
# image path
image_path = self.image_path_from_index(index)
# depth path
depth_path = self.depth_path_from_index(index)
# label path
label_path = self.label_path_from_index(index)
# metadata path
metadata_path = self.metadata_path_from_index(index)
# parse image name
pos = index.find('/')
video_id = index[:pos]
return {'image': image_path,
'depth': depth_path,
'label': label_path,
'meta_data': metadata_path,
'video_id': video_id,
'class_colors': self._class_colors,
'class_weights': self._class_weights,
'flipped': False}
def _process_label_image(self, label_image):
"""
change label image to label index
"""
class_colors = self._class_colors
width = label_image.shape[1]
height = label_image.shape[0]
label_index = np.zeros((height, width), dtype=np.float32)
# label image is in BRG order
index = label_image[:,:,2] + 256*label_image[:,:,1] + 256*256*label_image[:,:,0]
for i in xrange(len(class_colors)):
color = class_colors[i]
ind = 255 * (color[0] + 256*color[1] + 256*256*color[2])
I = np.where(index == ind)
label_index[I] = i
return label_index
def labels_to_image(self, im, labels):
class_colors = self._class_colors
height = labels.shape[0]
width = labels.shape[1]
image_r = np.zeros((height, width), dtype=np.float32)
image_g = np.zeros((height, width), dtype=np.float32)
image_b = np.zeros((height, width), dtype=np.float32)
for i in xrange(len(class_colors)):
color = class_colors[i]
I = np.where(labels == i)
image_r[I] = 255 * color[0]
image_g[I] = 255 * color[1]
image_b[I] = 255 * color[2]
image = np.stack((image_r, image_g, image_b), axis=-1)
# index = np.where(image == 255)
# image[index] = im[index]
# image = 0.1*im + 0.9*image
return image.astype(np.uint8)
def evaluate_segmentations(self, segmentations, output_dir):
print 'evaluating segmentations'
# compute histogram
n_cl = self.num_classes
hist = np.zeros((n_cl, n_cl))
# make image dir
image_dir = os.path.join(output_dir, 'images')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
# make matlab result dir
import scipy.io
mat_dir = os.path.join(output_dir, 'mat')
if not os.path.exists(mat_dir):
os.makedirs(mat_dir)
# for each image
for im_ind, index in enumerate(self.image_index):
# read ground truth labels
im = cv2.imread(self.label_path_from_index(index), cv2.IMREAD_UNCHANGED)
gt_labels = im.astype(np.float32)
# predicated labels
sg_labels = segmentations[im_ind]['labels']
hist += self.fast_hist(gt_labels.flatten(), sg_labels.flatten(), n_cl)
"""
# label image
rgba = cv2.imread(self.image_path_from_index(index), cv2.IMREAD_UNCHANGED)
image = rgba[:,:,:3]
alpha = rgba[:,:,3]
I = np.where(alpha == 0)
image[I[0], I[1], :] = 255
label_image = self.labels_to_image(image, sg_labels)
# save image
filename = os.path.join(image_dir, '%04d.png' % im_ind)
print filename
cv2.imwrite(filename, label_image)
"""
# save matlab result
labels = {'labels': sg_labels}
filename = os.path.join(mat_dir, '%04d.mat' % im_ind)
print filename
scipy.io.savemat(filename, labels)
# overall accuracy
acc = np.diag(hist).sum() / hist.sum()
print 'overall accuracy', acc
# per-class accuracy
acc = np.diag(hist) / hist.sum(1)
print 'mean accuracy', np.nanmean(acc)
# per-class IU
print 'per-class IU'
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
for i in range(n_cl):
print '{} {}'.format(self._classes[i], iu[i])
print 'mean IU', np.nanmean(iu)
freq = hist.sum(1) / hist.sum()
print 'fwavacc', (freq[freq > 0] * iu[freq > 0]).sum()
filename = os.path.join(output_dir, 'segmentation.txt')
with open(filename, 'wt') as f:
for i in range(n_cl):
f.write('{:f}\n'.format(iu[i]))
if __name__ == '__main__':
d = datasets.gmu_scene('train')
res = d.roidb
from IPython import embed; embed()
| 36.220339 | 135 | 0.584745 |
d2140469dfe3a3096d91d8a0f07c235c606543ec | 1,470 | py | Python | bleu.py | toastynews/hong-kong-bleu | 040b70e2e3480d9633f14fd2c7ba6453b5b64c4d | [
"CC-BY-4.0"
] | null | null | null | bleu.py | toastynews/hong-kong-bleu | 040b70e2e3480d9633f14fd2c7ba6453b5b64c4d | [
"CC-BY-4.0"
] | null | null | null | bleu.py | toastynews/hong-kong-bleu | 040b70e2e3480d9633f14fd2c7ba6453b5b64c4d | [
"CC-BY-4.0"
] | null | null | null | import string
from os import listdir
from os.path import isfile, join
from nltk.translate.bleu_score import corpus_bleu
from nltk.translate.nist_score import corpus_nist
reference_directory = 'reference/'
data_directories = [
'aws/',
'azure/',
'google/'
]
references = []
ref_files = [f for f in listdir(reference_directory) if isfile(join(reference_directory, f))]
for file in ref_files:
with open(reference_directory + file, 'r', encoding="utf8") as f:
fileTokens = []
for line in f:
text = line.translate(str.maketrans('', '', string.punctuation)).lower()
token = text.split()
fileTokens.extend(token)
references.append([fileTokens])
for directory in data_directories:
candidates = []
only_files = [f for f in listdir(directory) if isfile(join(directory, f))]
for file in only_files:
fileTokens = []
with open(directory + file, 'r', encoding="utf8") as f:
for line in f:
text = line.translate(str.maketrans('', '', string.punctuation)).lower()
token = text.split()
fileTokens.extend(token)
candidates.append(fileTokens)
bleu_score = corpus_bleu(references, candidates)
nist_score = corpus_nist(references, candidates)
print(directory)
print("bleu {}".format(bleu_score))
print("mist {}".format(nist_score))
| 35 | 93 | 0.621088 |
a4e6814682d03b795f707970242c3d0761bf5f38 | 1,827 | py | Python | src/backend/common/deferred/queues/gcloud_queue.py | ofekashery/the-blue-alliance | df0e47d054161fe742ac6198a6684247d0713279 | [
"MIT"
] | null | null | null | src/backend/common/deferred/queues/gcloud_queue.py | ofekashery/the-blue-alliance | df0e47d054161fe742ac6198a6684247d0713279 | [
"MIT"
] | null | null | null | src/backend/common/deferred/queues/gcloud_queue.py | ofekashery/the-blue-alliance | df0e47d054161fe742ac6198a6684247d0713279 | [
"MIT"
] | null | null | null | from typing import Dict, Optional
from google.cloud import tasks_v2
from backend.common.deferred.queues.task_queue import TaskQueue
from backend.common.deferred.requests.gcloud_http_request import (
GCloudHttpTaskRequestConfiguration,
)
from backend.common.deferred.requests.gcloud_request import GCloudTaskRequest
class GCloudTaskQueue(TaskQueue[GCloudTaskRequest]):
http_request_configuration: Optional[GCloudHttpTaskRequestConfiguration]
def __init__(
self,
name: str,
*,
http_request_configuration: Optional[GCloudHttpTaskRequestConfiguration] = None,
gcloud_client: tasks_v2.CloudTasksClient
) -> None:
self.http_request_configuration = http_request_configuration
self._gcloud_client = gcloud_client
super().__init__(name)
def _task_request(
self,
url: str,
headers: Dict[str, str],
body: bytes,
service: Optional[str] = None,
) -> GCloudTaskRequest:
http_request_configuration = self.http_request_configuration
if http_request_configuration is not None:
from backend.common.deferred.requests.gcloud_http_request import (
GCloudHttpTaskRequest,
)
return GCloudHttpTaskRequest(http_request_configuration, url, headers, body)
else:
from backend.common.deferred.requests.gcloud_service_request import (
GCloudServiceTaskRequest,
)
return GCloudServiceTaskRequest(url, headers, body, service)
def _enqueue(self, request: GCloudTaskRequest) -> None:
proto_request = tasks_v2.CreateTaskRequest()
proto_request.parent = self.name
proto_request.task = request.proto_task
self._gcloud_client.create_task(request=proto_request)
| 33.218182 | 88 | 0.705528 |
1008f58a15e78d3af59cf09c96ca80b9ad27e407 | 8,695 | py | Python | setuptools/_distutils/tests/test_unixccompiler.py | jensecj/setuptools | 41a229bdf7d2f444f6f355042e816a53fbc5a3b2 | [
"MIT"
] | null | null | null | setuptools/_distutils/tests/test_unixccompiler.py | jensecj/setuptools | 41a229bdf7d2f444f6f355042e816a53fbc5a3b2 | [
"MIT"
] | null | null | null | setuptools/_distutils/tests/test_unixccompiler.py | jensecj/setuptools | 41a229bdf7d2f444f6f355042e816a53fbc5a3b2 | [
"MIT"
] | null | null | null | """Tests for distutils.unixccompiler."""
import os
import sys
import unittest
from test.support import run_unittest
from .py38compat import EnvironmentVarGuard
from distutils import sysconfig
from distutils.errors import DistutilsPlatformError
from distutils.unixccompiler import UnixCCompiler
from distutils.util import _clear_cached_macosx_ver
class UnixCCompilerTestCase(unittest.TestCase):
def setUp(self):
self._backup_platform = sys.platform
self._backup_get_config_var = sysconfig.get_config_var
self._backup_get_config_vars = sysconfig.get_config_vars
class CompilerWrapper(UnixCCompiler):
def rpath_foo(self):
return self.runtime_library_dir_option('/foo')
self.cc = CompilerWrapper()
def tearDown(self):
sys.platform = self._backup_platform
sysconfig.get_config_var = self._backup_get_config_var
sysconfig.get_config_vars = self._backup_get_config_vars
@unittest.skipIf(sys.platform == 'win32', "can't test on Windows")
def test_runtime_libdir_option(self):
# Issue #5900; GitHub Issue #37
#
# Ensure RUNPATH is added to extension modules with RPATH if
# GNU ld is used
# darwin
sys.platform = 'darwin'
darwin_ver_var = 'MACOSX_DEPLOYMENT_TARGET'
darwin_rpath_flag = '-Wl,-rpath,/foo'
darwin_lib_flag = '-L/foo'
# (macOS version from syscfg, macOS version from env var) -> flag
# Version value of None generates two tests: as None and as empty string
# Expected flag value of None means an mismatch exception is expected
darwin_test_cases = [
((None , None ), darwin_lib_flag),
((None , '11' ), darwin_rpath_flag),
(('10' , None ), darwin_lib_flag),
(('10.3' , None ), darwin_lib_flag),
(('10.3.1', None ), darwin_lib_flag),
(('10.5' , None ), darwin_rpath_flag),
(('10.5.1', None ), darwin_rpath_flag),
(('10.3' , '10.3' ), darwin_lib_flag),
(('10.3' , '10.5' ), darwin_rpath_flag),
(('10.5' , '10.3' ), darwin_lib_flag),
(('10.5' , '11' ), darwin_rpath_flag),
(('10.4' , '10' ), None),
]
def make_darwin_gcv(syscfg_macosx_ver):
def gcv(var):
if var == darwin_ver_var:
return syscfg_macosx_ver
return "xxx"
return gcv
def do_darwin_test(syscfg_macosx_ver, env_macosx_ver, expected_flag):
env = os.environ
msg = "macOS version = (sysconfig=%r, env=%r)" % \
(syscfg_macosx_ver, env_macosx_ver)
# Save
old_gcv = sysconfig.get_config_var
old_env_macosx_ver = env.get(darwin_ver_var)
# Setup environment
_clear_cached_macosx_ver()
sysconfig.get_config_var = make_darwin_gcv(syscfg_macosx_ver)
if env_macosx_ver is not None:
env[darwin_ver_var] = env_macosx_ver
elif darwin_ver_var in env:
env.pop(darwin_ver_var)
# Run the test
if expected_flag is not None:
self.assertEqual(self.cc.rpath_foo(), expected_flag, msg=msg)
else:
with self.assertRaisesRegex(DistutilsPlatformError,
darwin_ver_var + r' mismatch', msg=msg):
self.cc.rpath_foo()
# Restore
if old_env_macosx_ver is not None:
env[darwin_ver_var] = old_env_macosx_ver
elif darwin_ver_var in env:
env.pop(darwin_ver_var)
sysconfig.get_config_var = old_gcv
_clear_cached_macosx_ver()
for macosx_vers, expected_flag in darwin_test_cases:
syscfg_macosx_ver, env_macosx_ver = macosx_vers
do_darwin_test(syscfg_macosx_ver, env_macosx_ver, expected_flag)
# Bonus test cases with None interpreted as empty string
if syscfg_macosx_ver is None:
do_darwin_test("", env_macosx_ver, expected_flag)
if env_macosx_ver is None:
do_darwin_test(syscfg_macosx_ver, "", expected_flag)
if syscfg_macosx_ver is None and env_macosx_ver is None:
do_darwin_test("", "", expected_flag)
old_gcv = sysconfig.get_config_var
# hp-ux
sys.platform = 'hp-ux'
def gcv(v):
return 'xxx'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), ['+s', '-L/foo'])
def gcv(v):
return 'gcc'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), ['-Wl,+s', '-L/foo'])
def gcv(v):
return 'g++'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), ['-Wl,+s', '-L/foo'])
sysconfig.get_config_var = old_gcv
# GCC GNULD
sys.platform = 'bar'
def gcv(v):
if v == 'CC':
return 'gcc'
elif v == 'GNULD':
return 'yes'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-Wl,--enable-new-dtags,-R/foo')
def gcv(v):
if v == 'CC':
return 'gcc -pthread -B /bar'
elif v == 'GNULD':
return 'yes'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-Wl,--enable-new-dtags,-R/foo')
# GCC non-GNULD
sys.platform = 'bar'
def gcv(v):
if v == 'CC':
return 'gcc'
elif v == 'GNULD':
return 'no'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-Wl,-R/foo')
# GCC GNULD with fully qualified configuration prefix
# see #7617
sys.platform = 'bar'
def gcv(v):
if v == 'CC':
return 'x86_64-pc-linux-gnu-gcc-4.4.2'
elif v == 'GNULD':
return 'yes'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-Wl,--enable-new-dtags,-R/foo')
# non-GCC GNULD
sys.platform = 'bar'
def gcv(v):
if v == 'CC':
return 'cc'
elif v == 'GNULD':
return 'yes'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-R/foo')
# non-GCC non-GNULD
sys.platform = 'bar'
def gcv(v):
if v == 'CC':
return 'cc'
elif v == 'GNULD':
return 'no'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-R/foo')
@unittest.skipIf(sys.platform == 'win32', "can't test on Windows")
def test_cc_overrides_ldshared(self):
# Issue #18080:
# ensure that setting CC env variable also changes default linker
def gcv(v):
if v == 'LDSHARED':
return 'gcc-4.2 -bundle -undefined dynamic_lookup '
return 'gcc-4.2'
def gcvs(*args, _orig=sysconfig.get_config_vars):
if args:
return list(map(sysconfig.get_config_var, args))
return _orig()
sysconfig.get_config_var = gcv
sysconfig.get_config_vars = gcvs
with EnvironmentVarGuard() as env:
env['CC'] = 'my_cc'
del env['LDSHARED']
sysconfig.customize_compiler(self.cc)
self.assertEqual(self.cc.linker_so[0], 'my_cc')
@unittest.skipIf(sys.platform == 'win32', "can't test on Windows")
def test_explicit_ldshared(self):
# Issue #18080:
# ensure that setting CC env variable does not change
# explicit LDSHARED setting for linker
def gcv(v):
if v == 'LDSHARED':
return 'gcc-4.2 -bundle -undefined dynamic_lookup '
return 'gcc-4.2'
def gcvs(*args, _orig=sysconfig.get_config_vars):
if args:
return list(map(sysconfig.get_config_var, args))
return _orig()
sysconfig.get_config_var = gcv
sysconfig.get_config_vars = gcvs
with EnvironmentVarGuard() as env:
env['CC'] = 'my_cc'
env['LDSHARED'] = 'my_ld -bundle -dynamic'
sysconfig.customize_compiler(self.cc)
self.assertEqual(self.cc.linker_so[0], 'my_ld')
def test_suite():
return unittest.makeSuite(UnixCCompilerTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| 36.078838 | 80 | 0.568833 |
7839e8bb13e1e51f6353b5ea1e49e7b09a1e9e48 | 5,947 | py | Python | psydac/api/printing/pycode.py | mayuri-dhote/psydac | 01ddbe2d049a599684c45060912d01c2658160a3 | [
"MIT"
] | 20 | 2019-07-30T12:37:57.000Z | 2022-03-09T11:35:04.000Z | psydac/api/printing/pycode.py | mayuri-dhote/psydac | 01ddbe2d049a599684c45060912d01c2658160a3 | [
"MIT"
] | 98 | 2019-04-01T16:32:27.000Z | 2022-03-21T19:30:35.000Z | psydac/api/printing/pycode.py | mayuri-dhote/psydac | 01ddbe2d049a599684c45060912d01c2658160a3 | [
"MIT"
] | 7 | 2019-10-03T03:49:47.000Z | 2022-03-01T09:11:49.000Z | from sympy.core import Symbol
from sympy.core import S
from sympy.printing.precedence import precedence
from psydac.pyccel.codegen.printing.pycode import PythonCodePrinter as PyccelPythonCodePrinter
from sympde.topology.derivatives import _partial_derivatives
from sympde.topology import SymbolicExpr
#==============================================================================
class PythonCodePrinter(PyccelPythonCodePrinter):
def __init__(self, settings=None):
self._enable_dependencies = settings.pop('enable_dependencies', True)
PyccelPythonCodePrinter.__init__(self, settings=settings)
# .........................................................
# PSYDAC objects
# .........................................................
def _print_SplBasic(self, expr):
code = ''
if self._enable_dependencies and expr.dependencies:
imports = []
for dep in expr.dependencies:
imports +=dep.imports
code = '\n'.join(self._print(i) for i in imports)
for dep in expr.dependencies:
code = '{code}\n{dep}'.format(code=code,
dep=self._print(dep))
return '{code}\n{func}'.format(code=code, func=self._print(expr.func))
def _print_Kernel(self, expr):
code = ''
if self._enable_dependencies and expr.dependencies:
imports = []
for dep in expr.dependencies:
imports +=dep.imports
code = '\n'.join(self._print(i) for i in imports)
for dep in expr.dependencies:
code = '{code}\n{dep}'.format(code=code,
dep=self._print(dep))
funcs = [func for fs in expr.func for func in fs if func is not None ]
funcs = '\n'.join(self._print(func) for func in funcs)
return '{code}\n{funcs}'.format(code=code, funcs=funcs)
def _print_Interface(self, expr):
code = '\n'.join(self._print(i) for i in expr.imports)
return code +'\n' + self._print(expr.func)
def _print_GltInterface(self, expr):
code = '\n'.join(self._print(i) for i in expr.imports)
return code +'\n' + self._print(expr.func)
def _print_MinusInterfaceOperator(self, expr):
return self._print(expr.args[0])
def _print_PlusInterfaceOperator(self, expr):
return self._print(expr.args[0])
def _print_FloorDiv(self, expr):
return "(({})//({}))".format(self._print(expr.arg1), self._print(expr.arg2))
# .........................................................
# SYMPY objects
# .........................................................
def _print_AppliedUndef(self, expr):
args = ','.join(self._print(i) for i in expr.args)
fname = self._print(expr.func.__name__)
return '{fname}({args})'.format(fname=fname, args=args)
def _print_PythonTuple(self, expr):
args = ', '.join(self._print(i) for i in expr.args)
return '('+args+')'
def _hprint_Pow(self, expr, rational=False, sqrt='math.sqrt'):
"""Printing helper function for ``Pow``
Notes
=====
This only preprocesses the ``sqrt`` as math formatter
Examples
========
>>> from sympy.functions import sqrt
>>> from sympy.printing.pycode import PythonCodePrinter
>>> from sympy.abc import x
Python code printer automatically looks up ``math.sqrt``.
>>> printer = PythonCodePrinter({'standard':'python3'})
>>> printer._hprint_Pow(sqrt(x), rational=True)
'x**(1/2)'
>>> printer._hprint_Pow(sqrt(x), rational=False)
'math.sqrt(x)'
>>> printer._hprint_Pow(1/sqrt(x), rational=True)
'x**(-1/2)'
>>> printer._hprint_Pow(1/sqrt(x), rational=False)
'1/math.sqrt(x)'
Using sqrt from numpy or mpmath
>>> printer._hprint_Pow(sqrt(x), sqrt='numpy.sqrt')
'numpy.sqrt(x)'
>>> printer._hprint_Pow(sqrt(x), sqrt='mpmath.sqrt')
'mpmath.sqrt(x)'
See Also
========
sympy.printing.str.StrPrinter._print_Pow
"""
PREC = precedence(expr)
if expr.exp == S.Half and not rational:
func = self._module_format(sqrt)
arg = self._print(expr.base)
return '{func}({arg})'.format(func=func, arg=arg)
if expr.is_commutative:
if -expr.exp is S.Half and not rational:
func = self._module_format(sqrt)
num = self._print(S.One)
arg = self._print(expr.base)
return "{num}/{func}({arg})".format(
num=num, func=func, arg=arg)
base_str = self.parenthesize(expr.base, PREC, strict=False)
exp_str = self.parenthesize(expr.exp, PREC, strict=False)
return "{}**{}".format(base_str, exp_str)
def _print_Pow(self, expr, rational=False):
return self._hprint_Pow(expr, rational=rational, sqrt='sqrt')
def _print_Idx(self, expr):
return self._print(str(expr))
#==============================================================================
def pycode(expr, **settings):
""" Converts an expr to a string of Python code
Parameters
==========
expr : Expr
A SymPy expression.
fully_qualified_modules : bool
Whether or not to write out full module names of functions
(``math.sin`` vs. ``sin``). default: ``True``.
enable_dependencies: bool
Whether or not to print dependencies too (EvalField, Kernel, etc)
Examples
========
>>> from sympy import tan, Symbol
>>> from sympy.printing.pycode import pycode
>>> pycode(tan(Symbol('x')) + 1)
'math.tan(x) + 1'
"""
return PythonCodePrinter(settings).doprint(expr)
| 37.639241 | 94 | 0.546998 |
c2ae3f3ce1cf9de877169d9dc9b7b78fd490b456 | 4,183 | py | Python | CAIL2020/htfltpu/torch_server.py | ShenDezhou/CAIL | c4cfa98ab4ecedbce34a7a5a186830486047540c | [
"Apache-2.0"
] | 71 | 2020-07-16T01:49:27.000Z | 2022-03-27T16:55:00.000Z | CAIL2020/htfltpu/torch_server.py | ShenDezhou/CAIL | c4cfa98ab4ecedbce34a7a5a186830486047540c | [
"Apache-2.0"
] | 11 | 2020-09-18T14:26:25.000Z | 2022-02-09T23:49:33.000Z | CAIL2020/htfltpu/torch_server.py | ShenDezhou/CAIL | c4cfa98ab4ecedbce34a7a5a186830486047540c | [
"Apache-2.0"
] | 16 | 2020-07-15T07:24:30.000Z | 2022-03-19T05:41:11.000Z | import argparse
import logging
import os
from types import SimpleNamespace
import falcon
import pandas
import torch
from falcon_cors import CORS
import json
import waitress
from data import Data
from torch.utils.data import DataLoader
from utils import load_torch_model
from model import BertForClassification, CharCNN
from evaluate import evaluate
import time
from classmerge import classy_dic
from dataclean import cleanall, shortenlines
logging.basicConfig(level=logging.INFO, format='%(asctime)-18s %(message)s')
logger = logging.getLogger()
cors_allow_all = CORS(allow_all_origins=True,
allow_origins_list=['*'],
allow_all_headers=True,
allow_all_methods=True,
allow_credentials_all_origins=True
)
parser = argparse.ArgumentParser()
parser.add_argument(
'-c', '--config_file', default='config/bert_config.json',
help='model config file')
args = parser.parse_args()
model_config=args.config_file
MODEL_MAP = {
'bert': BertForClassification,
'cnn': CharCNN
}
class TorchResource:
def __init__(self):
logger.info("...")
# 0. Load config
with open(model_config) as fin:
self.config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# 1. Load data
self.data = Data(vocab_file=os.path.join(self.config.model_path, 'vocab.txt'),
max_seq_len=self.config.max_seq_len,
model_type=self.config.model_type, config=self.config)
# 2. Load model
self.model = MODEL_MAP[self.config.model_type](self.config)
self.model = load_torch_model(
self.model, model_path=os.path.join(self.config.model_path, 'model.bin'))
self.model.to(self.device)
logger.info("###")
def bert_classification(self,title, content):
logger.info('1:{}, 2:{}'.format(title, content))
row = {'type1': '/', 'title': title, 'content': content}
df = pandas.DataFrame().append(row, ignore_index=True)
filename = "data/{}.csv".format(time.time())
df.to_csv(filename, index=False, columns=['type1', 'title', 'content'])
test_set = self.data.load_file(filename, train=False)
data_loader_test = DataLoader(
test_set, batch_size=self.config.batch_size, shuffle=False)
# Evaluate
answer_list = evaluate(self.model, data_loader_test, self.device)
answer_list = [classy_dic[i] for i in answer_list]
return {"answer": answer_list}
def on_get(self, req, resp):
logger.info("...")
resp.set_header('Access-Control-Allow-Origin', '*')
resp.set_header('Access-Control-Allow-Methods', '*')
resp.set_header('Access-Control-Allow-Headers', '*')
resp.set_header('Access-Control-Allow-Credentials','true')
title = req.get_param('1', True)
content = req.get_param('2', True)
clean_title = shortenlines(title)
clean_content = cleanall(content)
resp.media = self.bert_classification(clean_title, clean_content)
logger.info("###")
def on_post(self, req, resp):
"""Handles POST requests"""
resp.set_header('Access-Control-Allow-Origin', '*')
resp.set_header('Access-Control-Allow-Methods', '*')
resp.set_header('Access-Control-Allow-Headers', '*')
resp.set_header('Access-Control-Allow-Credentials', 'true')
resp.set_header("Cache-Control", "no-cache")
data = req.stream.read(req.content_length)
jsondata = json.loads(data)
clean_title = shortenlines(jsondata.title)
clean_content = cleanall(jsondata.content)
resp.media = self.bert_classification(clean_title, clean_content)
if __name__=="__main__":
api = falcon.API(middleware=[cors_allow_all.middleware])
api.req_options.auto_parse_form_urlencoded = True
api.add_route('/z', TorchResource())
waitress.serve(api, port=58080, threads=48, url_scheme='http')
| 37.348214 | 86 | 0.65551 |
82d6fffc1bf47b27b4640e75bb5ed873ab36bc81 | 586 | py | Python | app/view_conversation.py | rafaelbr/chatbot-clima | 26ecec47b5bce203f011a079645a5e69ae01d991 | [
"MIT"
] | 2 | 2019-06-04T22:52:26.000Z | 2019-06-25T23:15:41.000Z | app/view_conversation.py | rafaelbr/chatbot-clima | 26ecec47b5bce203f011a079645a5e69ae01d991 | [
"MIT"
] | 1 | 2019-07-10T00:23:16.000Z | 2019-07-10T00:23:42.000Z | app/view_conversation.py | rafaelbr/chatbot-clima | 26ecec47b5bce203f011a079645a5e69ae01d991 | [
"MIT"
] | 1 | 2019-06-04T22:52:35.000Z | 2019-06-04T22:52:35.000Z | import flask, json
import os
from utils import post_facebook_message, get_weather
from flask import Blueprint, request, jsonify, make_response
from flask_restful import Api, Resource
from cloudant_server import DatabaseAccess
conversationBP = Blueprint('conversation', __name__)
api = Api(conversationBP, prefix='/conversation')
class MessagesEndpoint(Resource):
def __init__(self):
self.database = DatabaseAccess()
def get(self):
return make_response(json.dumps(list(self.database.listConversations())))
api.add_resource(MessagesEndpoint, '/messages') | 30.842105 | 76 | 0.776451 |
e41ec5e3aee40a3ff11577c47188d3c8b9a36a24 | 10,508 | py | Python | tests/base_fixtures.py | eggsandbeer/scheduler | 18ad32bd7b824ca334e2c5a1bbd10f599dfc2c82 | [
"BSD-3-Clause"
] | null | null | null | tests/base_fixtures.py | eggsandbeer/scheduler | 18ad32bd7b824ca334e2c5a1bbd10f599dfc2c82 | [
"BSD-3-Clause"
] | null | null | null | tests/base_fixtures.py | eggsandbeer/scheduler | 18ad32bd7b824ca334e2c5a1bbd10f599dfc2c82 | [
"BSD-3-Clause"
] | null | null | null | __author__ = 'Bohdan Mushkevych'
import inspect
import random
from datetime import datetime, timedelta
from db.model import raw_data
from db.model.site_statistics import SiteStatistics
from db.model.single_session import SingleSession
from db.dao.single_session_dao import SingleSessionDao
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.db.model import unit_of_work
from synergy.db.model.unit_of_work import UnitOfWork
from synergy.db.model.synergy_mq_transmission import SynergyMqTransmission
from synergy.db.manager import ds_manager
from synergy.system import time_helper
from synergy.system.time_qualifier import *
from synergy.scheduler.scheduler_constants import TYPE_MANAGED
from synergy.conf import context
from synergy.system.data_logging import get_logger
from tests.ut_context import PROCESS_UNIT_TEST
TOTAL_ENTRIES = 101
class TestConsumer(object):
""" empty class that should substitute MQ Flopsy Consumer. Used for testing only """
def acknowledge(self, delivery_tag):
pass
def close(self):
pass
def reject(self, delivery_tag):
pass
def cancel(self, delivery_tag):
pass
class TestMessage(object):
""" empty class that should substitute MQ Message. Used for testing only """
def __init__(self, process_name=None, uow_id=None):
mq_request = SynergyMqTransmission(process_name=process_name, unit_of_work_id=uow_id)
self.body = mq_request.to_json()
self.delivery_tag = None
class TestSiteMembershipDictionary(dict):
""" this dictionary is used for testing period only to stub Synergy Construction replies"""
def __init__(self, array_of_sites):
super(TestSiteMembershipDictionary, self).__init__()
random.seed('RANDOM_SEED_OBJECT')
for site in array_of_sites:
super(TestSiteMembershipDictionary, self).__setitem__(site, ['portfolio_%d' % random.randint(0, 20)])
class TestRestClient():
def __init__(self, logger):
self.logger = logger
def get_group_mapping(self, timeperiod, list_of_sites):
return TestSiteMembershipDictionary(list_of_sites)
def get_client_mapping(self, timeperiod, list_of_sites):
return TestSiteMembershipDictionary(list_of_sites)
def get_portfolio_mapping(self, timeperiod, list_of_sites):
return TestSiteMembershipDictionary(list_of_sites)
def get_list_of_sci(self, timeperiod):
return ['client_id_0', 'client_id_1', 'client_id_2', 'client_id_3']
def get_field_starting_with(prefix, module):
"""method reads Python module and iterates thru all its fields
Those that are starting with defined prefix are returned as list
:param prefix: define prefix. For example EXPECTED_YEARLY_TEMPLATE
:param module: defines fully qualified name of the Python module. For example tests.yearly_fixtures"""
fields = []
for name, value in inspect.getmembers(module):
if name.startswith(prefix):
fields.append(value)
return fields
def compare_dictionaries(dict_actual, dict_expected):
""" method compares two presumably identical dictionaries
@raise assert exception: in case two dictionaries are _not_ identical """
for expected_key in dict_expected:
expected_value = dict_expected[expected_key]
actual_value = dict_actual.get(expected_key)
if isinstance(expected_value, dict):
compare_dictionaries(expected_value, actual_value)
elif isinstance(expected_value, list):
if isinstance(actual_value, set):
actual_value = list(actual_value)
assert actual_value.sort() == expected_value.sort()
elif actual_value != expected_value:
assert False, 'key %r: actual %r vs expected %r' % (expected_key, actual_value, expected_value)
def create_unit_of_work(process_name,
start_id,
end_id,
timeperiod='INVALID_TIMEPERIOD',
state=unit_of_work.STATE_REQUESTED,
creation_at=datetime.utcnow(),
uow_id=None):
""" method creates and returns unit_of_work """
try:
source_collection = context.process_context[process_name].source
sink_collection = context.process_context[process_name].sink
except KeyError:
source_collection = None
sink_collection = None
uow = UnitOfWork()
uow.process_name = process_name
uow.timeperiod = timeperiod
uow.start_id = start_id
uow.end_id = end_id
uow.start_timeperiod = timeperiod
uow.end_timeperiod = timeperiod
uow.created_at = creation_at
uow.source = source_collection
uow.sink = sink_collection
uow.state = state
uow.unit_of_work_type = TYPE_MANAGED
uow.number_of_retries = 0
uow.arguments = context.process_context[process_name].arguments
if uow_id is not None:
uow.db_id = uow_id
return uow
def create_and_insert_unit_of_work(process_name, start_id, end_id, state=unit_of_work.STATE_REQUESTED,
timeperiod='INVALID_TIMEPERIOD'):
""" method creates and inserts a unit_of_work into DB
:return id of the created object in the db"""
uow = create_unit_of_work(process_name, start_id, end_id, timeperiod, state)
logger = get_logger(process_name)
uow_dao = UnitOfWorkDao(logger)
uow_id = uow_dao.insert(uow)
return uow_id
def create_session_stats(composite_key_function, seed='RANDOM_SEED_OBJECT'):
logger = get_logger(PROCESS_UNIT_TEST)
ss_dao = SingleSessionDao(logger)
time_array = ['20010303102210', '20010303102212', '20010303102215', '20010303102250']
random.seed(seed)
object_ids = []
for i in range(TOTAL_ENTRIES):
key = composite_key_function(i, TOTAL_ENTRIES)
session = SingleSession()
session.key = (key[0], key[1], 'session_id_%s' % str(i))
session.ip = '192.168.0.2'
if i % 3 == 0:
session.user_profile.screen_res = (240, 360)
elif i % 5 == 0:
session.user_profile.screen_res = (360, 480)
else:
session.user_profile.screen_res = (760, 980)
if i % 2 == 0:
session.user_profile.os = 'Linux'
session.user_profile.browser = 'FF %s' % str(i % 4)
session.user_profile.language = 'en_ca'
session.user_profile.country = 'ca'
else:
session.user_profile.os = 'Windows'
session.user_profile.browser = 'IE %s' % str(i % 9)
session.user_profile.language = 'ua_uk'
session.user_profile.country = 'eu'
session.browsing_history.total_duration = random.randint(0, 200)
session.browsing_history.number_of_pageviews = random.randint(1, 5)
for index in range(random.randint(1, 4)):
session.browsing_history.number_of_entries = index + 1
session.browsing_history.set_entry_timestamp(index, time_array[index])
sess_id = ss_dao.update(session)
object_ids.append(sess_id)
return object_ids
def _generate_entries(token, number, value):
items = dict()
for i in range(number):
items[token + str(i)] = value
return items
def generate_site_composite_key(index, time_qualifier):
start_time = '20010303101010' # YYYYMMDDHHmmSS
iteration_index = index // 33 # number larger than number of hours in a day and days in a month
iteration_timeperiod = time_helper.cast_to_time_qualifier(time_qualifier, start_time)
if iteration_index:
iteration_timeperiod = time_helper.increment_timeperiod(time_qualifier,
iteration_timeperiod,
delta=iteration_index)
return 'domain_name_%s' % str(index - iteration_index * 33), iteration_timeperiod
def create_site_stats(collection_name, time_qualifier, seed='RANDOM_SEED_OBJECT'):
logger = get_logger(PROCESS_UNIT_TEST)
ds = ds_manager.ds_factory(logger)
random.seed(seed)
object_ids = []
for i in range(TOTAL_ENTRIES):
key = generate_site_composite_key(i, time_qualifier)
site_stat = SiteStatistics()
site_stat.key = key
site_stat.stat.number_of_visits = random.randint(1, 1000)
site_stat.stat.total_duration = random.randint(0, 100)
items = _generate_entries('os_', 5, i)
site_stat.stat.os = items
items = _generate_entries('browser_', 5, i)
site_stat.stat.browsers = items
items = dict()
items['(320, 240)'] = 3
items['(640, 480)'] = 5
items['(1024, 960)'] = 7
items['(1280, 768)'] = 9
site_stat.stat.screen_res = items
items = dict()
items['ca_en'] = 3
items['ca_fr'] = 5
items['ua_uk'] = 7
items['us_en'] = 9
site_stat.stat.languages = items
items = dict()
items['ca'] = 3
items['fr'] = 5
items['uk'] = 7
items['us'] = 9
site_stat.stat.countries = items
stat_id = ds.insert(collection_name, site_stat.document)
object_ids.append(stat_id)
return object_ids
def clean_site_entries(collection_name, time_qualifier):
logger = get_logger(PROCESS_UNIT_TEST)
ds = ds_manager.ds_factory(logger)
connection = ds.connection(collection_name)
for i in range(TOTAL_ENTRIES):
key = generate_site_composite_key(i, time_qualifier)
connection.remove({raw_data.DOMAIN_NAME: key[0], raw_data.TIMEPERIOD: key[1]})
def wind_actual_timeperiod(new_time):
""" method is used to overload actual_timeperiod method from the time_helper """
def actual_timeperiod(time_qualifier):
return time_helper.datetime_to_synergy(time_qualifier, new_time)
return actual_timeperiod
def wind_the_time(time_qualifier, timeperiod, delta):
""" method is used to calculate new timeperiod, shifted by number of units (hours or days)"""
pattern = time_helper.define_pattern(timeperiod)
t = datetime.strptime(timeperiod, pattern)
if time_qualifier == QUALIFIER_HOURLY:
t = t + timedelta(hours=delta)
return t.strftime('%Y%m%d%H')
elif time_qualifier == QUALIFIER_DAILY:
t = t + timedelta(days=delta)
return t.strftime('%Y%m%d00')
raise ValueError('unsupported time_qualifier')
if __name__ == '__main__':
pass
| 35.5 | 113 | 0.680815 |
fb7df07a146bad0732e974ad2777e37e1c007622 | 748 | py | Python | setup.py | phac-nml/bioCanon | 3797a16d3782f1784105ce1f6a4661aa69d0a150 | [
"MIT"
] | 1 | 2021-01-18T03:39:40.000Z | 2021-01-18T03:39:40.000Z | setup.py | phac-nml/bioCanon | 3797a16d3782f1784105ce1f6a4661aa69d0a150 | [
"MIT"
] | 2 | 2020-06-23T18:44:16.000Z | 2020-06-25T17:03:25.000Z | setup.py | phac-nml/bioCanon | 3797a16d3782f1784105ce1f6a4661aa69d0a150 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="bioCanon-phac", # Replace with your own username
version="0.0.8",
author="James Robertson, Amanda Saunders, Justin Schonfeld",
author_email="justin.schonfeld@canada.ca",
description="A package for generating potential biohansel schemes",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/phac-nml/bioCanon",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 32.521739 | 71 | 0.685829 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.