hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7f6e89e15a90d8fdaed00abaf8db9749abc5e01 | 14,872 | py | Python | oauth/oidc.py | sferich888/quay | 4672db1df76874238baf134d04e74112ac9f630d | [
"Apache-2.0"
] | null | null | null | oauth/oidc.py | sferich888/quay | 4672db1df76874238baf134d04e74112ac9f630d | [
"Apache-2.0"
] | null | null | null | oauth/oidc.py | sferich888/quay | 4672db1df76874238baf134d04e74112ac9f630d | [
"Apache-2.0"
] | null | null | null | import time
import json
import logging
import urllib.parse
import jwt
from cachetools.func import lru_cache
from cachetools.ttl import TTLCache
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import load_der_public_key
from jwkest.jwk import KEYS
from oauth.base import (
OAuthService,
OAuthExchangeCodeException,
OAuthGetUserInfoException,
OAuthEndpoint,
)
from oauth.login import OAuthLoginException
from util.security.jwtutil import decode, InvalidTokenError
logger = logging.getLogger(__name__)
OIDC_WELLKNOWN = ".well-known/openid-configuration"
PUBLIC_KEY_CACHE_TTL = 3600 # 1 hour
ALLOWED_ALGORITHMS = ["RS256"]
JWT_CLOCK_SKEW_SECONDS = 30
class DiscoveryFailureException(Exception):
"""
Exception raised when OIDC discovery fails.
"""
pass
class PublicKeyLoadException(Exception):
"""
Exception raised if loading the OIDC public key fails.
"""
pass
class OIDCLoginService(OAuthService):
"""
Defines a generic service for all OpenID-connect compatible login services.
"""
def __init__(self, config, key_name, client=None):
super(OIDCLoginService, self).__init__(config, key_name)
self._id = key_name[0 : key_name.find("_")].lower()
self._http_client = client or config.get("HTTPCLIENT")
self._mailing = config.get("FEATURE_MAILING", False)
self._public_key_cache = _PublicKeyCache(self, 1, PUBLIC_KEY_CACHE_TTL)
def service_id(self):
return self._id
def service_name(self):
return self.config.get("SERVICE_NAME", self.service_id())
def get_icon(self):
return self.config.get("SERVICE_ICON", "fa-user-circle")
def get_login_scopes(self):
default_scopes = ["openid"]
if self.user_endpoint() is not None:
default_scopes.append("profile")
if self._mailing:
default_scopes.append("email")
supported_scopes = self._oidc_config().get("scopes_supported", default_scopes)
login_scopes = self.config.get("LOGIN_SCOPES") or supported_scopes
return list(set(login_scopes) & set(supported_scopes))
def authorize_endpoint(self):
return self._get_endpoint("authorization_endpoint").with_param("response_type", "code")
def token_endpoint(self):
return self._get_endpoint("token_endpoint")
def user_endpoint(self):
return self._get_endpoint("userinfo_endpoint")
def _get_endpoint(self, endpoint_key, **kwargs):
"""
Returns the OIDC endpoint with the given key found in the OIDC discovery document, with the
given kwargs added as query parameters.
Additionally, any defined parameters found in the OIDC configuration block are also added.
"""
endpoint = self._oidc_config().get(endpoint_key, "")
if not endpoint:
return None
(scheme, netloc, path, query, fragment) = urllib.parse.urlsplit(endpoint)
# Add the query parameters from the kwargs and the config.
custom_parameters = self.config.get("OIDC_ENDPOINT_CUSTOM_PARAMS", {}).get(endpoint_key, {})
query_params = urllib.parse.parse_qs(query, keep_blank_values=True)
query_params.update(kwargs)
query_params.update(custom_parameters)
return OAuthEndpoint(
urllib.parse.urlunsplit((scheme, netloc, path, {}, fragment)), query_params
)
def validate(self):
return bool(self.get_login_scopes())
def validate_client_id_and_secret(self, http_client, url_scheme_and_hostname):
# TODO: find a way to verify client secret too.
check_auth_url = http_client.get(self.get_auth_url(url_scheme_and_hostname, "", "", []))
if check_auth_url.status_code // 100 != 2:
raise Exception("Got non-200 status code for authorization endpoint")
def requires_form_encoding(self):
return True
def get_public_config(self):
return {
"CLIENT_ID": self.client_id(),
"OIDC": True,
}
def exchange_code_for_tokens(self, app_config, http_client, code, redirect_suffix):
# Exchange the code for the access token and id_token
try:
json_data = self.exchange_code(
app_config,
http_client,
code,
redirect_suffix=redirect_suffix,
form_encode=self.requires_form_encoding(),
)
except OAuthExchangeCodeException as oce:
raise OAuthLoginException(str(oce))
# Make sure we received both.
access_token = json_data.get("access_token", None)
if access_token is None:
logger.debug("Missing access_token in response: %s", json_data)
raise OAuthLoginException("Missing `access_token` in OIDC response")
id_token = json_data.get("id_token", None)
if id_token is None:
logger.debug("Missing id_token in response: %s", json_data)
raise OAuthLoginException("Missing `id_token` in OIDC response")
return id_token, access_token
def exchange_code_for_login(self, app_config, http_client, code, redirect_suffix):
# Exchange the code for the access token and id_token
id_token, access_token = self.exchange_code_for_tokens(
app_config, http_client, code, redirect_suffix
)
# Decode the id_token.
try:
decoded_id_token = self.decode_user_jwt(id_token)
except InvalidTokenError as ite:
logger.exception("Got invalid token error on OIDC decode: %s", ite)
raise OAuthLoginException("Could not decode OIDC token")
except PublicKeyLoadException as pke:
logger.exception("Could not load public key during OIDC decode: %s", pke)
raise OAuthLoginException("Could find public OIDC key")
# If there is a user endpoint, use it to retrieve the user's information. Otherwise, we use
# the decoded ID token.
if self.user_endpoint():
# Retrieve the user information.
try:
user_info = self.get_user_info(http_client, access_token)
except OAuthGetUserInfoException as oge:
raise OAuthLoginException(str(oge))
else:
user_info = decoded_id_token
# Verify subs.
if user_info["sub"] != decoded_id_token["sub"]:
logger.debug(
"Mismatch in `sub` returned by OIDC user info endpoint: %s vs %s",
user_info["sub"],
decoded_id_token["sub"],
)
raise OAuthLoginException("Mismatch in `sub` returned by OIDC user info endpoint")
# Check if we have a verified email address.
if self.config.get("VERIFIED_EMAIL_CLAIM_NAME"):
email_address = user_info.get(self.config["VERIFIED_EMAIL_CLAIM_NAME"])
else:
email_address = user_info.get("email") if user_info.get("email_verified") else None
logger.debug("Found e-mail address `%s` for sub `%s`", email_address, user_info["sub"])
if self._mailing:
if email_address is None:
raise OAuthLoginException(
"A verified email address is required to login with this service"
)
# Check for a preferred username.
if self.config.get("PREFERRED_USERNAME_CLAIM_NAME"):
lusername = user_info.get(self.config["PREFERRED_USERNAME_CLAIM_NAME"])
else:
lusername = user_info.get("preferred_username")
if lusername is None:
# Note: Active Directory provides `unique_name` and `upn`.
# https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-id-and-access-tokens
lusername = user_info.get("unique_name", user_info.get("upn"))
if lusername is None:
lusername = user_info["sub"]
if lusername.find("@") >= 0:
lusername = lusername[0 : lusername.find("@")]
return decoded_id_token["sub"], lusername, email_address
@property
def _issuer(self):
# Read the issuer from the OIDC config, falling back to the configured OIDC server.
issuer = self._oidc_config().get("issuer", self.config["OIDC_SERVER"])
# If specified, use the overridden OIDC issuer.
return self.config.get("OIDC_ISSUER", issuer)
@lru_cache(maxsize=1)
def _oidc_config(self):
if self.config.get("OIDC_SERVER"):
return self._load_oidc_config_via_discovery(self.config.get("DEBUGGING", False))
else:
return {}
def _load_oidc_config_via_discovery(self, is_debugging):
"""
Attempts to load the OIDC config via the OIDC discovery mechanism.
If is_debugging is True, non-secure connections are alllowed. Raises an
DiscoveryFailureException on failure.
"""
oidc_server = self.config["OIDC_SERVER"]
if not oidc_server.startswith("https://") and not is_debugging:
raise DiscoveryFailureException("OIDC server must be accessed over SSL")
discovery_url = urllib.parse.urljoin(oidc_server, OIDC_WELLKNOWN)
discovery = self._http_client.get(discovery_url, timeout=5, verify=is_debugging is False)
if discovery.status_code // 100 != 2:
logger.debug(
"Got %s response for OIDC discovery: %s", discovery.status_code, discovery.text
)
raise DiscoveryFailureException("Could not load OIDC discovery information")
try:
return json.loads(discovery.text)
except ValueError:
logger.exception("Could not parse OIDC discovery for url: %s", discovery_url)
raise DiscoveryFailureException("Could not parse OIDC discovery information")
def decode_user_jwt(self, token):
"""
Decodes the given JWT under the given provider and returns it.
Raises an InvalidTokenError exception on an invalid token or a PublicKeyLoadException if the
public key could not be loaded for decoding.
"""
# Find the key to use.
headers = jwt.get_unverified_header(token)
kid = headers.get("kid", None)
if kid is None:
raise InvalidTokenError("Missing `kid` header")
logger.debug(
"Using key `%s`, attempting to decode token `%s` with aud `%s` and iss `%s`",
kid,
token,
self.client_id(),
self._issuer,
)
try:
return decode(
token,
self._get_public_key(kid),
algorithms=ALLOWED_ALGORITHMS,
audience=self.client_id(),
issuer=self._issuer,
leeway=JWT_CLOCK_SKEW_SECONDS,
options=dict(require_nbf=False),
)
except InvalidTokenError as ite:
logger.warning(
"Could not decode token `%s` for OIDC: %s. Will attempt again after "
+ "retrieving public keys.",
token,
ite,
)
# Public key may have expired. Try to retrieve an updated public key and use it to decode.
try:
return decode(
token,
self._get_public_key(kid, force_refresh=True),
algorithms=ALLOWED_ALGORITHMS,
audience=self.client_id(),
issuer=self._issuer,
leeway=JWT_CLOCK_SKEW_SECONDS,
options=dict(require_nbf=False),
)
except InvalidTokenError as ite:
logger.warning(
"Could not decode token `%s` for OIDC: %s. Attempted again after "
+ "retrieving public keys.",
token,
ite,
)
# Decode again with verify=False, and log the decoded token to allow for easier debugging.
nonverified = decode(
token,
self._get_public_key(kid, force_refresh=True),
algorithms=ALLOWED_ALGORITHMS,
audience=self.client_id(),
issuer=self._issuer,
leeway=JWT_CLOCK_SKEW_SECONDS,
options=dict(require_nbf=False, verify=False),
)
logger.debug("Got an error when trying to verify OIDC JWT: %s", nonverified)
raise ite
def _get_public_key(self, kid, force_refresh=False):
"""
Retrieves the public key for this handler with the given kid.
Raises a PublicKeyLoadException on failure.
"""
# If force_refresh is true, we expire all the items in the cache by setting the time to
# the current time + the expiration TTL.
if force_refresh:
self._public_key_cache.expire(time=time.time() + PUBLIC_KEY_CACHE_TTL)
# Retrieve the public key from the cache. If the cache does not contain the public key,
# it will internally call _load_public_key to retrieve it and then save it.
return self._public_key_cache[kid]
class _PublicKeyCache(TTLCache):
def __init__(self, login_service, *args, **kwargs):
super(_PublicKeyCache, self).__init__(*args, **kwargs)
self._login_service = login_service
def __missing__(self, kid):
"""
Loads the public key for this handler from the OIDC service.
Raises PublicKeyLoadException on failure.
"""
keys_url = self._login_service._oidc_config()["jwks_uri"]
# Load the keys.
try:
keys = KEYS()
keys.load_from_url(
keys_url, verify=not self._login_service.config.get("DEBUGGING", False)
)
except Exception as ex:
logger.exception("Exception loading public key")
raise PublicKeyLoadException(str(ex))
# Find the matching key.
keys_found = keys.by_kid(kid)
if len(keys_found) == 0:
raise PublicKeyLoadException("Public key %s not found" % kid)
rsa_keys = [key for key in keys_found if key.kty == "RSA"]
if len(rsa_keys) == 0:
raise PublicKeyLoadException("No RSA form of public key %s not found" % kid)
matching_key = rsa_keys[0]
matching_key.deserialize()
# Reload the key so that we can give a key *instance* to PyJWT to work around its weird parsing
# issues.
final_key = load_der_public_key(
matching_key.key.exportKey("DER"), backend=default_backend()
)
self[kid] = final_key
return final_key
| 37.555556 | 106 | 0.626412 | import time
import json
import logging
import urllib.parse
import jwt
from cachetools.func import lru_cache
from cachetools.ttl import TTLCache
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import load_der_public_key
from jwkest.jwk import KEYS
from oauth.base import (
OAuthService,
OAuthExchangeCodeException,
OAuthGetUserInfoException,
OAuthEndpoint,
)
from oauth.login import OAuthLoginException
from util.security.jwtutil import decode, InvalidTokenError
logger = logging.getLogger(__name__)
OIDC_WELLKNOWN = ".well-known/openid-configuration"
PUBLIC_KEY_CACHE_TTL = 3600
ALLOWED_ALGORITHMS = ["RS256"]
JWT_CLOCK_SKEW_SECONDS = 30
class DiscoveryFailureException(Exception):
pass
class PublicKeyLoadException(Exception):
pass
class OIDCLoginService(OAuthService):
def __init__(self, config, key_name, client=None):
super(OIDCLoginService, self).__init__(config, key_name)
self._id = key_name[0 : key_name.find("_")].lower()
self._http_client = client or config.get("HTTPCLIENT")
self._mailing = config.get("FEATURE_MAILING", False)
self._public_key_cache = _PublicKeyCache(self, 1, PUBLIC_KEY_CACHE_TTL)
def service_id(self):
return self._id
def service_name(self):
return self.config.get("SERVICE_NAME", self.service_id())
def get_icon(self):
return self.config.get("SERVICE_ICON", "fa-user-circle")
def get_login_scopes(self):
default_scopes = ["openid"]
if self.user_endpoint() is not None:
default_scopes.append("profile")
if self._mailing:
default_scopes.append("email")
supported_scopes = self._oidc_config().get("scopes_supported", default_scopes)
login_scopes = self.config.get("LOGIN_SCOPES") or supported_scopes
return list(set(login_scopes) & set(supported_scopes))
def authorize_endpoint(self):
return self._get_endpoint("authorization_endpoint").with_param("response_type", "code")
def token_endpoint(self):
return self._get_endpoint("token_endpoint")
def user_endpoint(self):
return self._get_endpoint("userinfo_endpoint")
def _get_endpoint(self, endpoint_key, **kwargs):
endpoint = self._oidc_config().get(endpoint_key, "")
if not endpoint:
return None
(scheme, netloc, path, query, fragment) = urllib.parse.urlsplit(endpoint)
custom_parameters = self.config.get("OIDC_ENDPOINT_CUSTOM_PARAMS", {}).get(endpoint_key, {})
query_params = urllib.parse.parse_qs(query, keep_blank_values=True)
query_params.update(kwargs)
query_params.update(custom_parameters)
return OAuthEndpoint(
urllib.parse.urlunsplit((scheme, netloc, path, {}, fragment)), query_params
)
def validate(self):
return bool(self.get_login_scopes())
def validate_client_id_and_secret(self, http_client, url_scheme_and_hostname):
check_auth_url = http_client.get(self.get_auth_url(url_scheme_and_hostname, "", "", []))
if check_auth_url.status_code // 100 != 2:
raise Exception("Got non-200 status code for authorization endpoint")
def requires_form_encoding(self):
return True
def get_public_config(self):
return {
"CLIENT_ID": self.client_id(),
"OIDC": True,
}
def exchange_code_for_tokens(self, app_config, http_client, code, redirect_suffix):
try:
json_data = self.exchange_code(
app_config,
http_client,
code,
redirect_suffix=redirect_suffix,
form_encode=self.requires_form_encoding(),
)
except OAuthExchangeCodeException as oce:
raise OAuthLoginException(str(oce))
access_token = json_data.get("access_token", None)
if access_token is None:
logger.debug("Missing access_token in response: %s", json_data)
raise OAuthLoginException("Missing `access_token` in OIDC response")
id_token = json_data.get("id_token", None)
if id_token is None:
logger.debug("Missing id_token in response: %s", json_data)
raise OAuthLoginException("Missing `id_token` in OIDC response")
return id_token, access_token
def exchange_code_for_login(self, app_config, http_client, code, redirect_suffix):
id_token, access_token = self.exchange_code_for_tokens(
app_config, http_client, code, redirect_suffix
)
try:
decoded_id_token = self.decode_user_jwt(id_token)
except InvalidTokenError as ite:
logger.exception("Got invalid token error on OIDC decode: %s", ite)
raise OAuthLoginException("Could not decode OIDC token")
except PublicKeyLoadException as pke:
logger.exception("Could not load public key during OIDC decode: %s", pke)
raise OAuthLoginException("Could find public OIDC key")
# the decoded ID token.
if self.user_endpoint():
# Retrieve the user information.
try:
user_info = self.get_user_info(http_client, access_token)
except OAuthGetUserInfoException as oge:
raise OAuthLoginException(str(oge))
else:
user_info = decoded_id_token
# Verify subs.
if user_info["sub"] != decoded_id_token["sub"]:
logger.debug(
"Mismatch in `sub` returned by OIDC user info endpoint: %s vs %s",
user_info["sub"],
decoded_id_token["sub"],
)
raise OAuthLoginException("Mismatch in `sub` returned by OIDC user info endpoint")
# Check if we have a verified email address.
if self.config.get("VERIFIED_EMAIL_CLAIM_NAME"):
email_address = user_info.get(self.config["VERIFIED_EMAIL_CLAIM_NAME"])
else:
email_address = user_info.get("email") if user_info.get("email_verified") else None
logger.debug("Found e-mail address `%s` for sub `%s`", email_address, user_info["sub"])
if self._mailing:
if email_address is None:
raise OAuthLoginException(
"A verified email address is required to login with this service"
)
# Check for a preferred username.
if self.config.get("PREFERRED_USERNAME_CLAIM_NAME"):
lusername = user_info.get(self.config["PREFERRED_USERNAME_CLAIM_NAME"])
else:
lusername = user_info.get("preferred_username")
if lusername is None:
# Note: Active Directory provides `unique_name` and `upn`.
# https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-id-and-access-tokens
lusername = user_info.get("unique_name", user_info.get("upn"))
if lusername is None:
lusername = user_info["sub"]
if lusername.find("@") >= 0:
lusername = lusername[0 : lusername.find("@")]
return decoded_id_token["sub"], lusername, email_address
@property
def _issuer(self):
# Read the issuer from the OIDC config, falling back to the configured OIDC server.
issuer = self._oidc_config().get("issuer", self.config["OIDC_SERVER"])
# If specified, use the overridden OIDC issuer.
return self.config.get("OIDC_ISSUER", issuer)
@lru_cache(maxsize=1)
def _oidc_config(self):
if self.config.get("OIDC_SERVER"):
return self._load_oidc_config_via_discovery(self.config.get("DEBUGGING", False))
else:
return {}
def _load_oidc_config_via_discovery(self, is_debugging):
oidc_server = self.config["OIDC_SERVER"]
if not oidc_server.startswith("https://") and not is_debugging:
raise DiscoveryFailureException("OIDC server must be accessed over SSL")
discovery_url = urllib.parse.urljoin(oidc_server, OIDC_WELLKNOWN)
discovery = self._http_client.get(discovery_url, timeout=5, verify=is_debugging is False)
if discovery.status_code // 100 != 2:
logger.debug(
"Got %s response for OIDC discovery: %s", discovery.status_code, discovery.text
)
raise DiscoveryFailureException("Could not load OIDC discovery information")
try:
return json.loads(discovery.text)
except ValueError:
logger.exception("Could not parse OIDC discovery for url: %s", discovery_url)
raise DiscoveryFailureException("Could not parse OIDC discovery information")
def decode_user_jwt(self, token):
# Find the key to use.
headers = jwt.get_unverified_header(token)
kid = headers.get("kid", None)
if kid is None:
raise InvalidTokenError("Missing `kid` header")
logger.debug(
"Using key `%s`, attempting to decode token `%s` with aud `%s` and iss `%s`",
kid,
token,
self.client_id(),
self._issuer,
)
try:
return decode(
token,
self._get_public_key(kid),
algorithms=ALLOWED_ALGORITHMS,
audience=self.client_id(),
issuer=self._issuer,
leeway=JWT_CLOCK_SKEW_SECONDS,
options=dict(require_nbf=False),
)
except InvalidTokenError as ite:
logger.warning(
"Could not decode token `%s` for OIDC: %s. Will attempt again after "
+ "retrieving public keys.",
token,
ite,
)
# Public key may have expired. Try to retrieve an updated public key and use it to decode.
try:
return decode(
token,
self._get_public_key(kid, force_refresh=True),
algorithms=ALLOWED_ALGORITHMS,
audience=self.client_id(),
issuer=self._issuer,
leeway=JWT_CLOCK_SKEW_SECONDS,
options=dict(require_nbf=False),
)
except InvalidTokenError as ite:
logger.warning(
"Could not decode token `%s` for OIDC: %s. Attempted again after "
+ "retrieving public keys.",
token,
ite,
)
# Decode again with verify=False, and log the decoded token to allow for easier debugging.
nonverified = decode(
token,
self._get_public_key(kid, force_refresh=True),
algorithms=ALLOWED_ALGORITHMS,
audience=self.client_id(),
issuer=self._issuer,
leeway=JWT_CLOCK_SKEW_SECONDS,
options=dict(require_nbf=False, verify=False),
)
logger.debug("Got an error when trying to verify OIDC JWT: %s", nonverified)
raise ite
def _get_public_key(self, kid, force_refresh=False):
# If force_refresh is true, we expire all the items in the cache by setting the time to
# the current time + the expiration TTL.
if force_refresh:
self._public_key_cache.expire(time=time.time() + PUBLIC_KEY_CACHE_TTL)
# Retrieve the public key from the cache. If the cache does not contain the public key,
# it will internally call _load_public_key to retrieve it and then save it.
return self._public_key_cache[kid]
class _PublicKeyCache(TTLCache):
def __init__(self, login_service, *args, **kwargs):
super(_PublicKeyCache, self).__init__(*args, **kwargs)
self._login_service = login_service
def __missing__(self, kid):
keys_url = self._login_service._oidc_config()["jwks_uri"]
# Load the keys.
try:
keys = KEYS()
keys.load_from_url(
keys_url, verify=not self._login_service.config.get("DEBUGGING", False)
)
except Exception as ex:
logger.exception("Exception loading public key")
raise PublicKeyLoadException(str(ex))
# Find the matching key.
keys_found = keys.by_kid(kid)
if len(keys_found) == 0:
raise PublicKeyLoadException("Public key %s not found" % kid)
rsa_keys = [key for key in keys_found if key.kty == "RSA"]
if len(rsa_keys) == 0:
raise PublicKeyLoadException("No RSA form of public key %s not found" % kid)
matching_key = rsa_keys[0]
matching_key.deserialize()
# Reload the key so that we can give a key *instance* to PyJWT to work around its weird parsing
# issues.
final_key = load_der_public_key(
matching_key.key.exportKey("DER"), backend=default_backend()
)
self[kid] = final_key
return final_key
| true | true |
f7f6e8d8f60b2a3366b18929b2d59a9f0703a9dd | 771 | py | Python | google/cloud/aiplatform/training_utils/cloud_profiler/cloud_profiler_utils.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | 180 | 2020-09-23T17:21:15.000Z | 2022-03-30T17:25:47.000Z | google/cloud/aiplatform/training_utils/cloud_profiler/cloud_profiler_utils.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | 601 | 2020-09-23T16:23:44.000Z | 2022-03-31T19:08:23.000Z | google/cloud/aiplatform/training_utils/cloud_profiler/cloud_profiler_utils.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | 109 | 2020-09-23T16:22:04.000Z | 2022-03-28T21:18:29.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import_error_msg = (
"Could not load the cloud profiler. To use the profiler, "
"install the SDK using 'pip install google-cloud-aiplatform[cloud-profiler]'"
)
| 35.045455 | 81 | 0.740597 |
import_error_msg = (
"Could not load the cloud profiler. To use the profiler, "
"install the SDK using 'pip install google-cloud-aiplatform[cloud-profiler]'"
)
| true | true |
f7f6e9525f84940691488a6a43f0fbbd4ea19caa | 1,991 | py | Python | experiments/gen_white_table.py | Derek-Wds/MAD-VAE | 267ce6ca98f1b1ecc8ebec22ddeca32e2c502d5b | [
"MIT"
] | 3 | 2019-12-12T14:35:42.000Z | 2021-12-16T03:06:16.000Z | experiments/gen_white_table.py | Derek-Wds/MAD-VAE | 267ce6ca98f1b1ecc8ebec22ddeca32e2c502d5b | [
"MIT"
] | null | null | null | experiments/gen_white_table.py | Derek-Wds/MAD-VAE | 267ce6ca98f1b1ecc8ebec22ddeca32e2c502d5b | [
"MIT"
] | null | null | null | import json
import numpy as np
if __name__ == "__main__":
models = {'vanilla': 0, 'classification': 0, 'proxi_dist': 0, 'combined': 0}
models_list = ['vanilla', 'classification', 'proxi_dist', 'combined'] # for consistency in older versions
for flavor in models_list:
with open(f'./accuracy_{flavor}.txt', 'r') as f:
models[flavor] = json.load(f)
# Models initialized with their base accuracy
acc = {'fgsm': [0.1316], 'r-fgsm': [0.1521], 'cw': [0.0075], 'mi-fgsm': [0.0074], 'pgd': [0.0073], 'single': [0.9977]}
acc_name = {'fgsm': 'FGSM', 'r-fgsm': 'Rand-FGSM', 'cw': 'CW', 'mi-fgsm': 'MI-FGSM', 'pgd': 'PGD', 'single': 'Single Pixel'}
acc_list = list(acc.keys())
for model in acc_list:
for flavor in models_list:
acc[model].append(models[flavor][model])
argmax = np.argmax(acc[model][1:]) + 1
acc[model][argmax] = f'\\textbf{{{acc[model][argmax]}}}'
with open('./whitebox_table.tex', 'w') as f:
c = ['c'] * (len(models_list) + 3)
f.write("\\begin{table}[H]\n\centering\n\\begin{tabular}{")
f.write('|'.join(c))
f.write("}\nAttack & No Attack & No Defense & Vanilla & Classification & Proximity and Distance & Combined \\\\ \\hline\n")
for model in acc_list:
acc[model].insert(0, 0.9931)
acc[model].insert(0, acc_name[model])
f.write(' & '.join(str(x) for x in acc[model]))
f.write('\\\\\n')
f.write('\\end{tabular}\n')
f.write('\\caption{Classification accuracy of different models based on the FGSM, Rand-FGSM, CW, Momentum Iterative FGSM, PGD, and Single Pixel White-Box attack on the classifier with the default parameters. The models are trained on the data generated using the first three attack methods while the other three attacks are not included in the training dataset.}\n')
f.write('\\label{table:whitebox-result}\n')
f.write('\\end{table}\n') | 53.810811 | 374 | 0.602712 | import json
import numpy as np
if __name__ == "__main__":
models = {'vanilla': 0, 'classification': 0, 'proxi_dist': 0, 'combined': 0}
models_list = ['vanilla', 'classification', 'proxi_dist', 'combined']
for flavor in models_list:
with open(f'./accuracy_{flavor}.txt', 'r') as f:
models[flavor] = json.load(f)
acc = {'fgsm': [0.1316], 'r-fgsm': [0.1521], 'cw': [0.0075], 'mi-fgsm': [0.0074], 'pgd': [0.0073], 'single': [0.9977]}
acc_name = {'fgsm': 'FGSM', 'r-fgsm': 'Rand-FGSM', 'cw': 'CW', 'mi-fgsm': 'MI-FGSM', 'pgd': 'PGD', 'single': 'Single Pixel'}
acc_list = list(acc.keys())
for model in acc_list:
for flavor in models_list:
acc[model].append(models[flavor][model])
argmax = np.argmax(acc[model][1:]) + 1
acc[model][argmax] = f'\\textbf{{{acc[model][argmax]}}}'
with open('./whitebox_table.tex', 'w') as f:
c = ['c'] * (len(models_list) + 3)
f.write("\\begin{table}[H]\n\centering\n\\begin{tabular}{")
f.write('|'.join(c))
f.write("}\nAttack & No Attack & No Defense & Vanilla & Classification & Proximity and Distance & Combined \\\\ \\hline\n")
for model in acc_list:
acc[model].insert(0, 0.9931)
acc[model].insert(0, acc_name[model])
f.write(' & '.join(str(x) for x in acc[model]))
f.write('\\\\\n')
f.write('\\end{tabular}\n')
f.write('\\caption{Classification accuracy of different models based on the FGSM, Rand-FGSM, CW, Momentum Iterative FGSM, PGD, and Single Pixel White-Box attack on the classifier with the default parameters. The models are trained on the data generated using the first three attack methods while the other three attacks are not included in the training dataset.}\n')
f.write('\\label{table:whitebox-result}\n')
f.write('\\end{table}\n') | true | true |
f7f6e9843e0892bf302ce20849464558acc7b351 | 11,155 | py | Python | django_auth_adfs/backend.py | samukasmk/django-auth-adfs | e7d0ff27e3610f8412293086c440fc0a6357f1fa | [
"BSD-2-Clause"
] | null | null | null | django_auth_adfs/backend.py | samukasmk/django-auth-adfs | e7d0ff27e3610f8412293086c440fc0a6357f1fa | [
"BSD-2-Clause"
] | null | null | null | django_auth_adfs/backend.py | samukasmk/django-auth-adfs | e7d0ff27e3610f8412293086c440fc0a6357f1fa | [
"BSD-2-Clause"
] | null | null | null | import logging
from pprint import pformat
import jwt
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import Group
from django.core.exceptions import ImproperlyConfigured, PermissionDenied, ObjectDoesNotExist
from django_auth_adfs.config import settings, provider_config
from django_auth_adfs import signals
logger = logging.getLogger("django_auth_adfs")
class AdfsBackend(ModelBackend):
"""
Authentication backend to allow authenticating users against a
Microsoft ADFS server.
It's based on the ``RemoteUserBackend`` from Django.
"""
def authenticate(self, request, authorization_code=None, **kwargs):
# If loaded data is too old, reload it again
provider_config.load_config()
# If there's no token or code, we pass control to the next authentication backend
if authorization_code is None or authorization_code == '':
logger.debug("django_auth_adfs was called but no authorization code was received")
return
data = {
'grant_type': 'authorization_code',
'client_id': settings.CLIENT_ID,
'redirect_uri': provider_config.redirect_uri(request),
'code': authorization_code,
}
if settings.CLIENT_SECRET:
data['client_secret'] = settings.CLIENT_SECRET
logger.debug("Received authorization code: " + authorization_code)
logger.debug("Getting access token at: " + provider_config.token_endpoint)
response = provider_config.session.post(provider_config.token_endpoint, data, timeout=settings.TIMEOUT)
# 200 = valid token received
# 400 = 'something' is wrong in our request
if response.status_code == 400:
logger.error("ADFS server returned an error: " + response.json()["error_description"])
raise PermissionDenied
if response.status_code != 200:
logger.error("Unexpected ADFS response: " + response.content.decode())
raise PermissionDenied
adfs_response = response.json()
access_token = adfs_response["access_token"]
logger.debug("Received access token: " + access_token)
claims = jwt.decode(access_token, verify=False)
logger.debug("JWT claims:\n" + pformat(claims))
claims = self.verify_access_token(access_token)
if not claims:
logger.error("Access token payload empty, cannot authenticate the request")
raise PermissionDenied
user = self.create_user(claims)
self.update_user_attributes(user, claims)
self.update_user_groups(user, claims)
self.update_user_flags(user, claims)
signals.post_authenticate.send(
sender=self,
user=user,
claims=claims,
adfs_response=adfs_response
)
user.save()
return user
def verify_access_token(self, access_token):
for idx, key in enumerate(provider_config.signing_keys):
try:
# Explicitly define the verification option.
# The list below is the default the jwt module uses.
# Explicit is better then implicit and it protects against
# changes in the defaults the jwt module uses.
options = {
'verify_signature': True,
'verify_exp': True,
'verify_nbf': True,
'verify_iat': True,
'verify_aud': (True if settings.AUDIENCE else False),
'verify_iss': True,
'require_exp': False,
'require_iat': False,
'require_nbf': False
}
# Validate token and extract claims
claims = jwt.decode(
access_token,
key=key,
algorithms=['RS256', 'RS384', 'RS512'],
verify=True,
audience=settings.AUDIENCE,
issuer=provider_config.issuer,
options=options,
)
# Don't try next key if this one is valid
return claims
except jwt.ExpiredSignature as error:
logger.info("Signature has expired: %s" % error)
raise PermissionDenied
except jwt.DecodeError as error:
# If it's not the last certificate in the list, skip to the next one
if idx < len(provider_config.signing_keys) - 1:
continue
else:
logger.info('Error decoding signature: %s' % error)
raise PermissionDenied
except jwt.InvalidTokenError as error:
logger.info(str(error))
raise PermissionDenied
def create_user(self, claims):
"""
Create the user if it doesn't exist yet
Args:
claims (dict): claims from the access token
Returns:
django.contrib.auth.models.User: A Django user
"""
# Create the user
username_claim = settings.USERNAME_CLAIM
usermodel = get_user_model()
user, created = usermodel.objects.get_or_create(**{
usermodel.USERNAME_FIELD: claims[username_claim]
})
if created:
logger.debug("User '{}' has been created.".format(claims[username_claim]))
return user
def update_user_attributes(self, user, claims):
"""
Updates user attributes based on the CLAIM_MAPPING setting.
Args:
user (django.contrib.auth.models.User): User model instance
claims (dict): claims from the access token
"""
required_fields = [field.name for field in user._meta.fields if field.blank is False]
for field, claim in settings.CLAIM_MAPPING.items():
if hasattr(user, field):
if claim in claims:
setattr(user, field, claims[claim])
logger.debug("Attribute '{}' for user '{}' was set to '{}'.".format(user, field, claims[claim]))
else:
if field in required_fields:
msg = "Claim not found in access token: '{}'. Check ADFS claims mapping."
raise ImproperlyConfigured(msg.format(claim))
else:
msg = "Claim '{}' for user field '{}' was not found in the access token for user '{}'. " \
"Field is not required and will be left empty".format(claim, field, user)
logger.warning(msg)
else:
msg = "User model has no field named '{}'. Check ADFS claims mapping."
raise ImproperlyConfigured(msg.format(field))
def update_user_groups(self, user, claims):
"""
Updates user group memberships based on the GROUPS_CLAIM setting.
Args:
user (django.contrib.auth.models.User): User model instance
claims (dict): Claims from the access token
"""
if settings.GROUPS_CLAIM is not None:
# Update the user's group memberships
django_groups = [group.name for group in user.groups.all()]
if settings.GROUPS_CLAIM in claims:
claim_groups = claims[settings.GROUPS_CLAIM]
if not isinstance(claim_groups, list):
claim_groups = [claim_groups, ]
else:
logger.debug(
"The configured groups claim '{}' was not found in the access token".format(settings.GROUPS_CLAIM))
claim_groups = []
# Make a diff of the user's groups.
# Removing a user from all groups and then re-add them would cause
# the autoincrement value for the database table storing the
# user-to-group mappings to increment for no reason.
groups_to_remove = set(django_groups) - set(claim_groups)
groups_to_add = set(claim_groups) - set(django_groups)
# Loop through the groups in the group claim and
# add the user to these groups as needed.
for group_name in groups_to_remove:
group = Group.objects.get(name=group_name)
user.groups.remove(group)
logger.debug("User removed from group '{}'".format(group_name))
for group_name in groups_to_add:
try:
if settings.MIRROR_GROUPS:
group, _ = Group.objects.get_or_create(name=group_name)
logger.debug("Created group '{}'".format(group_name))
else:
group = Group.objects.get(name=group_name)
user.groups.add(group)
logger.debug("User added to group '{}'".format(group_name))
except ObjectDoesNotExist:
# Silently fail for non-existing groups.
pass
def update_user_flags(self, user, claims):
"""
Updates user boolean attributes based on the BOOLEAN_CLAIM_MAPPING setting.
Args:
user (django.contrib.auth.models.User): User model instance
claims (dict): Claims from the access token
"""
if settings.GROUPS_CLAIM is not None:
if settings.GROUPS_CLAIM in claims:
access_token_groups = claims[settings.GROUPS_CLAIM]
if not isinstance(access_token_groups, list):
access_token_groups = [access_token_groups, ]
else:
logger.debug("The configured group claim was not found in the access token")
access_token_groups = []
for flag, group in settings.GROUP_TO_FLAG_MAPPING.items():
if hasattr(user, flag):
if group in access_token_groups:
value = True
else:
value = False
setattr(user, flag, value)
logger.debug('Attribute "{}" for user "{}" was set to "{}".'.format(user, flag, value))
else:
msg = "User model has no field named '{}'. Check ADFS boolean claims mapping."
raise ImproperlyConfigured(msg.format(flag))
for field, claim in settings.BOOLEAN_CLAIM_MAPPING.items():
if hasattr(user, field):
bool_val = False
if claim in claims and str(claims[claim]).lower() in ['y', 'yes', 't', 'true', 'on', '1']:
bool_val = True
setattr(user, field, bool_val)
logger.debug('Attribute "{}" for user "{}" was set to "{}".'.format(user, field, bool_val))
else:
msg = "User model has no field named '{}'. Check ADFS boolean claims mapping."
raise ImproperlyConfigured(msg.format(field))
| 42.576336 | 119 | 0.575885 | import logging
from pprint import pformat
import jwt
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import Group
from django.core.exceptions import ImproperlyConfigured, PermissionDenied, ObjectDoesNotExist
from django_auth_adfs.config import settings, provider_config
from django_auth_adfs import signals
logger = logging.getLogger("django_auth_adfs")
class AdfsBackend(ModelBackend):
def authenticate(self, request, authorization_code=None, **kwargs):
provider_config.load_config()
if authorization_code is None or authorization_code == '':
logger.debug("django_auth_adfs was called but no authorization code was received")
return
data = {
'grant_type': 'authorization_code',
'client_id': settings.CLIENT_ID,
'redirect_uri': provider_config.redirect_uri(request),
'code': authorization_code,
}
if settings.CLIENT_SECRET:
data['client_secret'] = settings.CLIENT_SECRET
logger.debug("Received authorization code: " + authorization_code)
logger.debug("Getting access token at: " + provider_config.token_endpoint)
response = provider_config.session.post(provider_config.token_endpoint, data, timeout=settings.TIMEOUT)
# 200 = valid token received
# 400 = 'something' is wrong in our request
if response.status_code == 400:
logger.error("ADFS server returned an error: " + response.json()["error_description"])
raise PermissionDenied
if response.status_code != 200:
logger.error("Unexpected ADFS response: " + response.content.decode())
raise PermissionDenied
adfs_response = response.json()
access_token = adfs_response["access_token"]
logger.debug("Received access token: " + access_token)
claims = jwt.decode(access_token, verify=False)
logger.debug("JWT claims:\n" + pformat(claims))
claims = self.verify_access_token(access_token)
if not claims:
logger.error("Access token payload empty, cannot authenticate the request")
raise PermissionDenied
user = self.create_user(claims)
self.update_user_attributes(user, claims)
self.update_user_groups(user, claims)
self.update_user_flags(user, claims)
signals.post_authenticate.send(
sender=self,
user=user,
claims=claims,
adfs_response=adfs_response
)
user.save()
return user
def verify_access_token(self, access_token):
for idx, key in enumerate(provider_config.signing_keys):
try:
# Explicitly define the verification option.
# The list below is the default the jwt module uses.
# Explicit is better then implicit and it protects against
# changes in the defaults the jwt module uses.
options = {
'verify_signature': True,
'verify_exp': True,
'verify_nbf': True,
'verify_iat': True,
'verify_aud': (True if settings.AUDIENCE else False),
'verify_iss': True,
'require_exp': False,
'require_iat': False,
'require_nbf': False
}
# Validate token and extract claims
claims = jwt.decode(
access_token,
key=key,
algorithms=['RS256', 'RS384', 'RS512'],
verify=True,
audience=settings.AUDIENCE,
issuer=provider_config.issuer,
options=options,
)
# Don't try next key if this one is valid
return claims
except jwt.ExpiredSignature as error:
logger.info("Signature has expired: %s" % error)
raise PermissionDenied
except jwt.DecodeError as error:
if idx < len(provider_config.signing_keys) - 1:
continue
else:
logger.info('Error decoding signature: %s' % error)
raise PermissionDenied
except jwt.InvalidTokenError as error:
logger.info(str(error))
raise PermissionDenied
def create_user(self, claims):
# Create the user
username_claim = settings.USERNAME_CLAIM
usermodel = get_user_model()
user, created = usermodel.objects.get_or_create(**{
usermodel.USERNAME_FIELD: claims[username_claim]
})
if created:
logger.debug("User '{}' has been created.".format(claims[username_claim]))
return user
def update_user_attributes(self, user, claims):
required_fields = [field.name for field in user._meta.fields if field.blank is False]
for field, claim in settings.CLAIM_MAPPING.items():
if hasattr(user, field):
if claim in claims:
setattr(user, field, claims[claim])
logger.debug("Attribute '{}' for user '{}' was set to '{}'.".format(user, field, claims[claim]))
else:
if field in required_fields:
msg = "Claim not found in access token: '{}'. Check ADFS claims mapping."
raise ImproperlyConfigured(msg.format(claim))
else:
msg = "Claim '{}' for user field '{}' was not found in the access token for user '{}'. " \
"Field is not required and will be left empty".format(claim, field, user)
logger.warning(msg)
else:
msg = "User model has no field named '{}'. Check ADFS claims mapping."
raise ImproperlyConfigured(msg.format(field))
def update_user_groups(self, user, claims):
if settings.GROUPS_CLAIM is not None:
# Update the user's group memberships
django_groups = [group.name for group in user.groups.all()]
if settings.GROUPS_CLAIM in claims:
claim_groups = claims[settings.GROUPS_CLAIM]
if not isinstance(claim_groups, list):
claim_groups = [claim_groups, ]
else:
logger.debug(
"The configured groups claim '{}' was not found in the access token".format(settings.GROUPS_CLAIM))
claim_groups = []
# Removing a user from all groups and then re-add them would cause
# the autoincrement value for the database table storing the
# user-to-group mappings to increment for no reason.
groups_to_remove = set(django_groups) - set(claim_groups)
groups_to_add = set(claim_groups) - set(django_groups)
# Loop through the groups in the group claim and
# add the user to these groups as needed.
for group_name in groups_to_remove:
group = Group.objects.get(name=group_name)
user.groups.remove(group)
logger.debug("User removed from group '{}'".format(group_name))
for group_name in groups_to_add:
try:
if settings.MIRROR_GROUPS:
group, _ = Group.objects.get_or_create(name=group_name)
logger.debug("Created group '{}'".format(group_name))
else:
group = Group.objects.get(name=group_name)
user.groups.add(group)
logger.debug("User added to group '{}'".format(group_name))
except ObjectDoesNotExist:
# Silently fail for non-existing groups.
pass
def update_user_flags(self, user, claims):
if settings.GROUPS_CLAIM is not None:
if settings.GROUPS_CLAIM in claims:
access_token_groups = claims[settings.GROUPS_CLAIM]
if not isinstance(access_token_groups, list):
access_token_groups = [access_token_groups, ]
else:
logger.debug("The configured group claim was not found in the access token")
access_token_groups = []
for flag, group in settings.GROUP_TO_FLAG_MAPPING.items():
if hasattr(user, flag):
if group in access_token_groups:
value = True
else:
value = False
setattr(user, flag, value)
logger.debug('Attribute "{}" for user "{}" was set to "{}".'.format(user, flag, value))
else:
msg = "User model has no field named '{}'. Check ADFS boolean claims mapping."
raise ImproperlyConfigured(msg.format(flag))
for field, claim in settings.BOOLEAN_CLAIM_MAPPING.items():
if hasattr(user, field):
bool_val = False
if claim in claims and str(claims[claim]).lower() in ['y', 'yes', 't', 'true', 'on', '1']:
bool_val = True
setattr(user, field, bool_val)
logger.debug('Attribute "{}" for user "{}" was set to "{}".'.format(user, field, bool_val))
else:
msg = "User model has no field named '{}'. Check ADFS boolean claims mapping."
raise ImproperlyConfigured(msg.format(field))
| true | true |
f7f6e9b68385293a77f39a1cdf629efcf677b67c | 415 | py | Python | saleor/product/migrations/0065_product_code.py | fcopantoja/saleor-ecommerce | d8745de423e1afc7aa60a59dbe05fec43b67ba80 | [
"BSD-3-Clause"
] | null | null | null | saleor/product/migrations/0065_product_code.py | fcopantoja/saleor-ecommerce | d8745de423e1afc7aa60a59dbe05fec43b67ba80 | [
"BSD-3-Clause"
] | 1 | 2022-02-13T22:52:38.000Z | 2022-02-13T22:52:38.000Z | saleor/product/migrations/0065_product_code.py | fcopantoja/saleor-ecommerce | d8745de423e1afc7aa60a59dbe05fec43b67ba80 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.0.3 on 2018-09-18 23:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0064_productvariant_handle_stock'),
]
operations = [
migrations.AddField(
model_name='product',
name='code',
field=models.CharField(blank=True, max_length=40, null=True),
),
]
| 21.842105 | 73 | 0.612048 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0064_productvariant_handle_stock'),
]
operations = [
migrations.AddField(
model_name='product',
name='code',
field=models.CharField(blank=True, max_length=40, null=True),
),
]
| true | true |
f7f6eab3596e65d5440c386cb70b4bdee2535f77 | 1,415 | py | Python | tests/search/test_nodal_surface.py | zx-sdu/NodeFinder | edaeeba8fb5a1ca28222313f6de7a6dfa8253093 | [
"Apache-2.0"
] | null | null | null | tests/search/test_nodal_surface.py | zx-sdu/NodeFinder | edaeeba8fb5a1ca28222313f6de7a6dfa8253093 | [
"Apache-2.0"
] | null | null | null | tests/search/test_nodal_surface.py | zx-sdu/NodeFinder | edaeeba8fb5a1ca28222313f6de7a6dfa8253093 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# © 2017-2019, ETH Zurich, Institut für Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Tests with a nodal line.
"""
import numpy as np
import pytest
from nodefinder.search import run
from nodefinder.search.refinement_stencil import get_mesh_stencil
@pytest.fixture
def nodal_surface_properties():
"""
Fixture which defines the helper functions describing the nodal surface.
"""
def dist_fct(pos):
_, _, dz = (np.array(pos) % 1) - 0.5
return abs(dz)
def gap_fct(pos):
dx, _, dz = (np.array(pos) % 1) - 0.5
return dz**2 * (0.1 + 10 * dx**2)
def parametrization(s, t):
return [s, t, 0.5]
return dist_fct, gap_fct, parametrization
def test_nodal_surface(nodal_surface_properties, score_nodal_surface): # pylint: disable=redefined-outer-name
"""
Test that a nodal surface is found.
"""
dist_fct, gap_fct, parametrization = nodal_surface_properties
result = run(
gap_fct=gap_fct,
gap_threshold=1e-4,
feature_size=1e-1,
refinement_stencil=get_mesh_stencil(mesh_size=(2, 2, 2)),
initial_mesh_size=(3, 3, 3),
use_fake_potential=False,
)
score_nodal_surface(
result=result,
dist_func=dist_fct,
surface_parametrization=parametrization,
cutoff_accuracy=2e-3,
cutoff_coverage=1e-1,
)
| 24.824561 | 110 | 0.650883 |
import numpy as np
import pytest
from nodefinder.search import run
from nodefinder.search.refinement_stencil import get_mesh_stencil
@pytest.fixture
def nodal_surface_properties():
def dist_fct(pos):
_, _, dz = (np.array(pos) % 1) - 0.5
return abs(dz)
def gap_fct(pos):
dx, _, dz = (np.array(pos) % 1) - 0.5
return dz**2 * (0.1 + 10 * dx**2)
def parametrization(s, t):
return [s, t, 0.5]
return dist_fct, gap_fct, parametrization
def test_nodal_surface(nodal_surface_properties, score_nodal_surface):
dist_fct, gap_fct, parametrization = nodal_surface_properties
result = run(
gap_fct=gap_fct,
gap_threshold=1e-4,
feature_size=1e-1,
refinement_stencil=get_mesh_stencil(mesh_size=(2, 2, 2)),
initial_mesh_size=(3, 3, 3),
use_fake_potential=False,
)
score_nodal_surface(
result=result,
dist_func=dist_fct,
surface_parametrization=parametrization,
cutoff_accuracy=2e-3,
cutoff_coverage=1e-1,
)
| true | true |
f7f6ebce3e0c34b4be5f37c8d7b552c6a51e54a1 | 1,170 | py | Python | opsplugins/vlan.py | OpenSwitchNOS/openswitch-ops-vland | 426c825ab4247d1a19d27f3a97d85645a32f8f6d | [
"Apache-2.0"
] | null | null | null | opsplugins/vlan.py | OpenSwitchNOS/openswitch-ops-vland | 426c825ab4247d1a19d27f3a97d85645a32f8f6d | [
"Apache-2.0"
] | null | null | null | opsplugins/vlan.py | OpenSwitchNOS/openswitch-ops-vland | 426c825ab4247d1a19d27f3a97d85645a32f8f6d | [
"Apache-2.0"
] | null | null | null | from opsvalidator.base import BaseValidator
from opsvalidator import error
from opsvalidator.error import ValidationError
from opsrest.utils.utils import get_column_data_from_row
class VlanValidator(BaseValidator):
resource = "vlan"
def validate_deletion(self, validation_args):
vlan_row = validation_args.resource_row
vlan_name = get_column_data_from_row(vlan_row, "name")
if vlan_name == 'DEFAULT_VLAN_1':
details = "DEFAULT_VLAN_1 cannot be deleted"
raise ValidationError(error.VERIFICATION_FAILED, details)
def validate_modification(self, validation_args):
vlan_row = validation_args.resource_row
vlan_id = get_column_data_from_row(vlan_row, "id")
is_new = validation_args.is_new
idl = validation_args.idl
if is_new:
vlan_id_list = []
for ovs_rec in idl.tables["VLAN"].rows.itervalues():
vlan_id_list.append(ovs_rec.id)
if (len(vlan_id_list) != len(set(vlan_id_list))):
details = "VLAN id %d already exists" % (vlan_id)
raise ValidationError(error.VERIFICATION_FAILED, details)
| 40.344828 | 73 | 0.688889 | from opsvalidator.base import BaseValidator
from opsvalidator import error
from opsvalidator.error import ValidationError
from opsrest.utils.utils import get_column_data_from_row
class VlanValidator(BaseValidator):
resource = "vlan"
def validate_deletion(self, validation_args):
vlan_row = validation_args.resource_row
vlan_name = get_column_data_from_row(vlan_row, "name")
if vlan_name == 'DEFAULT_VLAN_1':
details = "DEFAULT_VLAN_1 cannot be deleted"
raise ValidationError(error.VERIFICATION_FAILED, details)
def validate_modification(self, validation_args):
vlan_row = validation_args.resource_row
vlan_id = get_column_data_from_row(vlan_row, "id")
is_new = validation_args.is_new
idl = validation_args.idl
if is_new:
vlan_id_list = []
for ovs_rec in idl.tables["VLAN"].rows.itervalues():
vlan_id_list.append(ovs_rec.id)
if (len(vlan_id_list) != len(set(vlan_id_list))):
details = "VLAN id %d already exists" % (vlan_id)
raise ValidationError(error.VERIFICATION_FAILED, details)
| true | true |
f7f6ed0e62e70df672d08b36fce19754a9ac746b | 1,036 | py | Python | tests/utils/test_helper_ab_test.py | avisionh/abtest | 9b4e640b67b2b4d2c3501f1549b63ddd675f058e | [
"MIT"
] | 1 | 2021-01-13T06:39:08.000Z | 2021-01-13T06:39:08.000Z | tests/utils/test_helper_ab_test.py | avisionh/abtest | 9b4e640b67b2b4d2c3501f1549b63ddd675f058e | [
"MIT"
] | null | null | null | tests/utils/test_helper_ab_test.py | avisionh/abtest | 9b4e640b67b2b4d2c3501f1549b63ddd675f058e | [
"MIT"
] | null | null | null | import src.utils.helper_ab_test as f
def test_report_conversions(df_ab_test, out_report_conversions):
assert (
f.report_conversions(
data=df_ab_test,
group_col="group",
group_filter="control",
convert_col="converted",
page_col="landing_page",
)
== out_report_conversions
)
def test_get_sample_size(baseline_rate, out_sample_size):
assert f.get_sample_size(baseline_rate=baseline_rate) == out_sample_size
def test_get_ab_test_ci(in_ab_test_ci, out_ab_test_ci):
lower_bound, upper_bound = f.get_ab_test_ci(
conversions_control=in_ab_test_ci["control_conv"],
conversions_treatment=in_ab_test_ci["treatment_conv"],
total_users_control=in_ab_test_ci["control_size"],
total_users_treatment=in_ab_test_ci["treatment_size"],
)
lower_bound, upper_bound = round(number=lower_bound, ndigits=4), round(
number=upper_bound, ndigits=2
)
assert lower_bound, upper_bound == out_ab_test_ci
| 31.393939 | 76 | 0.705598 | import src.utils.helper_ab_test as f
def test_report_conversions(df_ab_test, out_report_conversions):
assert (
f.report_conversions(
data=df_ab_test,
group_col="group",
group_filter="control",
convert_col="converted",
page_col="landing_page",
)
== out_report_conversions
)
def test_get_sample_size(baseline_rate, out_sample_size):
assert f.get_sample_size(baseline_rate=baseline_rate) == out_sample_size
def test_get_ab_test_ci(in_ab_test_ci, out_ab_test_ci):
lower_bound, upper_bound = f.get_ab_test_ci(
conversions_control=in_ab_test_ci["control_conv"],
conversions_treatment=in_ab_test_ci["treatment_conv"],
total_users_control=in_ab_test_ci["control_size"],
total_users_treatment=in_ab_test_ci["treatment_size"],
)
lower_bound, upper_bound = round(number=lower_bound, ndigits=4), round(
number=upper_bound, ndigits=2
)
assert lower_bound, upper_bound == out_ab_test_ci
| true | true |
f7f6ed3bd4c9d8bd8935a951ff4dfd52fcbf6d89 | 808 | py | Python | src/context/__init__.py | vanelk/vpp | 09ad7fdd6ed80b0a9a2c3f8fcffd1c24d6e1dfcd | [
"MIT"
] | null | null | null | src/context/__init__.py | vanelk/vpp | 09ad7fdd6ed80b0a9a2c3f8fcffd1c24d6e1dfcd | [
"MIT"
] | null | null | null | src/context/__init__.py | vanelk/vpp | 09ad7fdd6ed80b0a9a2c3f8fcffd1c24d6e1dfcd | [
"MIT"
] | null | null | null | from src.valtypes import Value
class SymbolTable:
def __init__(self, parent = None):
self.symbols = {}
self.parent = parent
def get(self, name)->Value:
value = self.symbols.get(name, None)
if value == None and self.parent:
return self.parent.get(name)
return value
def set(self, name, value):
self.symbols[name] = value
def delete(self, name):
del self.symbols[name]
def copy(self):
tl = SymbolTable(self.parent)
tl.symbols = self.symbols.copy()
return tl
class Context:
def __init__(self, display_name, parent=None, parent_entry_pos=None):
self.display_name = display_name
self.parent = parent
self.parent_entry_pos = parent_entry_pos
self.symbol_table: SymbolTable = None | 29.925926 | 70 | 0.641089 | from src.valtypes import Value
class SymbolTable:
def __init__(self, parent = None):
self.symbols = {}
self.parent = parent
def get(self, name)->Value:
value = self.symbols.get(name, None)
if value == None and self.parent:
return self.parent.get(name)
return value
def set(self, name, value):
self.symbols[name] = value
def delete(self, name):
del self.symbols[name]
def copy(self):
tl = SymbolTable(self.parent)
tl.symbols = self.symbols.copy()
return tl
class Context:
def __init__(self, display_name, parent=None, parent_entry_pos=None):
self.display_name = display_name
self.parent = parent
self.parent_entry_pos = parent_entry_pos
self.symbol_table: SymbolTable = None | true | true |
f7f6ed5cc2dee07926555f7b276346aa758afe9f | 11,292 | py | Python | protobuf/python/content_provider_pb2.py | HsingPeng/ExtendBinderExperiment | 10a8d15e1640396dd7f351a9d146f182432955fb | [
"MIT"
] | null | null | null | protobuf/python/content_provider_pb2.py | HsingPeng/ExtendBinderExperiment | 10a8d15e1640396dd7f351a9d146f182432955fb | [
"MIT"
] | null | null | null | protobuf/python/content_provider_pb2.py | HsingPeng/ExtendBinderExperiment | 10a8d15e1640396dd7f351a9d146f182432955fb | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: content_provider.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='content_provider.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x16\x63ontent_provider.proto\"\xb1\x02\n\x07Request\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\x1f\n\x06method\x18\x02 \x01(\x0e\x32\x0f.Request.Method\x12\x0b\n\x03uri\x18\x03 \x01(\t\x12\x11\n\tselection\x18\x04 \x01(\t\x12\x15\n\rselectionArgs\x18\x05 \x03(\t\x12$\n\x06values\x18\x06 \x03(\x0b\x32\x14.Request.ValuesEntry\x12\x12\n\nprojection\x18\x07 \x03(\t\x12\x11\n\tsortOrder\x18\x08 \x01(\t\x1a-\n\x0bValuesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"D\n\x06Method\x12\n\n\x06\x64\x65lete\x10\x00\x12\x0b\n\x07getType\x10\x01\x12\n\n\x06insert\x10\x02\x12\t\n\x05query\x10\x03\x12\n\n\x06update\x10\x04\"\xc6\x01\n\x08Response\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12 \n\x06status\x18\x02 \x01(\x0e\x32\x10.Response.Status\x12\x11\n\tintResult\x18\x03 \x01(\x05\x12\x14\n\x0cstringResult\x18\x04 \x01(\t\"a\n\x06Status\x12\x06\n\x02Ok\x10\x00\x12\x0f\n\x0bUriNotFound\x10\x01\x12\x12\n\x0eMethodNotFound\x10\x02\x12\x18\n\x14PermissionNotAllowed\x10\x03\x12\x10\n\x0cUnknownError\x10\x04\x42=\n\'com.github.hsingpeng.extendbinder.protoB\x12\x43ontentProviderMsgb\x06proto3')
)
_REQUEST_METHOD = _descriptor.EnumDescriptor(
name='Method',
full_name='Request.Method',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='delete', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='getType', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='insert', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='query', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='update', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=264,
serialized_end=332,
)
_sym_db.RegisterEnumDescriptor(_REQUEST_METHOD)
_RESPONSE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='Response.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='Ok', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UriNotFound', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MethodNotFound', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PermissionNotAllowed', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UnknownError', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=436,
serialized_end=533,
)
_sym_db.RegisterEnumDescriptor(_RESPONSE_STATUS)
_REQUEST_VALUESENTRY = _descriptor.Descriptor(
name='ValuesEntry',
full_name='Request.ValuesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='Request.ValuesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='Request.ValuesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=217,
serialized_end=262,
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='Request.uuid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='method', full_name='Request.method', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uri', full_name='Request.uri', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='selection', full_name='Request.selection', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='selectionArgs', full_name='Request.selectionArgs', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='values', full_name='Request.values', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='projection', full_name='Request.projection', index=6,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sortOrder', full_name='Request.sortOrder', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_REQUEST_VALUESENTRY, ],
enum_types=[
_REQUEST_METHOD,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=27,
serialized_end=332,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='Response.uuid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='Response.status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='intResult', full_name='Response.intResult', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stringResult', full_name='Response.stringResult', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_RESPONSE_STATUS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=335,
serialized_end=533,
)
_REQUEST_VALUESENTRY.containing_type = _REQUEST
_REQUEST.fields_by_name['method'].enum_type = _REQUEST_METHOD
_REQUEST.fields_by_name['values'].message_type = _REQUEST_VALUESENTRY
_REQUEST_METHOD.containing_type = _REQUEST
_RESPONSE.fields_by_name['status'].enum_type = _RESPONSE_STATUS
_RESPONSE_STATUS.containing_type = _RESPONSE
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(
ValuesEntry = _reflection.GeneratedProtocolMessageType('ValuesEntry', (_message.Message,), dict(
DESCRIPTOR = _REQUEST_VALUESENTRY,
__module__ = 'content_provider_pb2'
# @@protoc_insertion_point(class_scope:Request.ValuesEntry)
))
,
DESCRIPTOR = _REQUEST,
__module__ = 'content_provider_pb2'
# @@protoc_insertion_point(class_scope:Request)
))
_sym_db.RegisterMessage(Request)
_sym_db.RegisterMessage(Request.ValuesEntry)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
DESCRIPTOR = _RESPONSE,
__module__ = 'content_provider_pb2'
# @@protoc_insertion_point(class_scope:Response)
))
_sym_db.RegisterMessage(Response)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\'com.github.hsingpeng.extendbinder.protoB\022ContentProviderMsg'))
_REQUEST_VALUESENTRY.has_options = True
_REQUEST_VALUESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
| 37.144737 | 1,141 | 0.725735 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='content_provider.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x16\x63ontent_provider.proto\"\xb1\x02\n\x07Request\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\x1f\n\x06method\x18\x02 \x01(\x0e\x32\x0f.Request.Method\x12\x0b\n\x03uri\x18\x03 \x01(\t\x12\x11\n\tselection\x18\x04 \x01(\t\x12\x15\n\rselectionArgs\x18\x05 \x03(\t\x12$\n\x06values\x18\x06 \x03(\x0b\x32\x14.Request.ValuesEntry\x12\x12\n\nprojection\x18\x07 \x03(\t\x12\x11\n\tsortOrder\x18\x08 \x01(\t\x1a-\n\x0bValuesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"D\n\x06Method\x12\n\n\x06\x64\x65lete\x10\x00\x12\x0b\n\x07getType\x10\x01\x12\n\n\x06insert\x10\x02\x12\t\n\x05query\x10\x03\x12\n\n\x06update\x10\x04\"\xc6\x01\n\x08Response\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12 \n\x06status\x18\x02 \x01(\x0e\x32\x10.Response.Status\x12\x11\n\tintResult\x18\x03 \x01(\x05\x12\x14\n\x0cstringResult\x18\x04 \x01(\t\"a\n\x06Status\x12\x06\n\x02Ok\x10\x00\x12\x0f\n\x0bUriNotFound\x10\x01\x12\x12\n\x0eMethodNotFound\x10\x02\x12\x18\n\x14PermissionNotAllowed\x10\x03\x12\x10\n\x0cUnknownError\x10\x04\x42=\n\'com.github.hsingpeng.extendbinder.protoB\x12\x43ontentProviderMsgb\x06proto3')
)
_REQUEST_METHOD = _descriptor.EnumDescriptor(
name='Method',
full_name='Request.Method',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='delete', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='getType', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='insert', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='query', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='update', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=264,
serialized_end=332,
)
_sym_db.RegisterEnumDescriptor(_REQUEST_METHOD)
_RESPONSE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='Response.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='Ok', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UriNotFound', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MethodNotFound', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PermissionNotAllowed', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UnknownError', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=436,
serialized_end=533,
)
_sym_db.RegisterEnumDescriptor(_RESPONSE_STATUS)
_REQUEST_VALUESENTRY = _descriptor.Descriptor(
name='ValuesEntry',
full_name='Request.ValuesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='Request.ValuesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='Request.ValuesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=217,
serialized_end=262,
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='Request.uuid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='method', full_name='Request.method', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uri', full_name='Request.uri', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='selection', full_name='Request.selection', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='selectionArgs', full_name='Request.selectionArgs', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='values', full_name='Request.values', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='projection', full_name='Request.projection', index=6,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sortOrder', full_name='Request.sortOrder', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_REQUEST_VALUESENTRY, ],
enum_types=[
_REQUEST_METHOD,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=27,
serialized_end=332,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='Response.uuid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='Response.status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='intResult', full_name='Response.intResult', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stringResult', full_name='Response.stringResult', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_RESPONSE_STATUS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=335,
serialized_end=533,
)
_REQUEST_VALUESENTRY.containing_type = _REQUEST
_REQUEST.fields_by_name['method'].enum_type = _REQUEST_METHOD
_REQUEST.fields_by_name['values'].message_type = _REQUEST_VALUESENTRY
_REQUEST_METHOD.containing_type = _REQUEST
_RESPONSE.fields_by_name['status'].enum_type = _RESPONSE_STATUS
_RESPONSE_STATUS.containing_type = _RESPONSE
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(
ValuesEntry = _reflection.GeneratedProtocolMessageType('ValuesEntry', (_message.Message,), dict(
DESCRIPTOR = _REQUEST_VALUESENTRY,
__module__ = 'content_provider_pb2'
# @@protoc_insertion_point(class_scope:Request.ValuesEntry)
))
,
DESCRIPTOR = _REQUEST,
__module__ = 'content_provider_pb2'
# @@protoc_insertion_point(class_scope:Request)
))
_sym_db.RegisterMessage(Request)
_sym_db.RegisterMessage(Request.ValuesEntry)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
DESCRIPTOR = _RESPONSE,
__module__ = 'content_provider_pb2'
# @@protoc_insertion_point(class_scope:Response)
))
_sym_db.RegisterMessage(Response)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\'com.github.hsingpeng.extendbinder.protoB\022ContentProviderMsg'))
_REQUEST_VALUESENTRY.has_options = True
_REQUEST_VALUESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
| true | true |
f7f6ef3b9447e120b4323c22f27c9a826fd71637 | 3,988 | py | Python | processing/tasks/delete_items.py | voyagersearch/tasks | 04e6541d757285628cbb1341b2754f41da609d69 | [
"Apache-2.0"
] | 5 | 2017-09-13T12:57:24.000Z | 2021-12-28T19:40:09.000Z | processing/tasks/delete_items.py | voyagersearch/tasks | 04e6541d757285628cbb1341b2754f41da609d69 | [
"Apache-2.0"
] | 11 | 2015-02-12T19:21:42.000Z | 2018-04-10T21:18:14.000Z | processing/tasks/delete_items.py | voyagersearch/tasks | 04e6541d757285628cbb1341b2754f41da609d69 | [
"Apache-2.0"
] | 6 | 2015-02-12T18:48:34.000Z | 2019-07-23T18:32:39.000Z | # -*- coding: utf-8 -*-
# (C) Copyright 2014 Voyager Search
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import requests
from utils import status
from utils import task_utils
import warnings
from requests.packages.urllib3.exceptions import InsecureRequestWarning
warnings.simplefilter('ignore', InsecureRequestWarning)
# Get SSL trust setting.
verify_ssl = task_utils.get_ssl_mode()
status_writer = status.Writer()
errors_reasons = {}
def delete_items(fq_query, q_query, thumbs, metadata, layers, owner):
"""Delete items from the index using the Voyager API."""
try:
voyager_server = sys.argv[2].split('=')[1].split('solr')[0][:-1]
# voyager_server = "http://localhost:8888"
if not q_query and fq_query:
query = fq_query
fq = ""
else:
query = q_query
fq = "&fq={0}".format(fq_query)
url = "{0}/api/rest/index/records?query={1}{2}&items=true&thumbnails={3}&metadata={4}&layers={5}".format(voyager_server, query, fq, thumbs, metadata, layers)
response = requests.delete(url, verify=verify_ssl, headers={'Content-type': 'application/json', 'x-access-token': task_utils.get_security_token(owner)})
if response.status_code == 200:
return True, 'Deleted items: {0}'.format(response.json())
else:
return False, 'Error deleting items: {0}: {1}'.format('delete_items', response.reason)
except requests.HTTPError as http_error:
return False, http_error
except requests.exceptions.InvalidURL as url_error:
return False, url_error
except requests.RequestException as re:
return False, re
def execute(request):
"""Delete items.
:param request: json as a dict.
"""
query = ''
errors = 0
parameters = request['params']
archive_location = request['folder']
if not os.path.exists(archive_location):
os.makedirs(archive_location)
# Parameter values
delete_thumbs = task_utils.get_parameter_value(parameters, 'delete_thumbnails', 'value') or False
delete_metadata = task_utils.get_parameter_value(parameters, 'delete_metadata', 'value') or False
delete_layers = task_utils.get_parameter_value(parameters, 'delete_layers', 'value') or False
request_owner = request['owner']
result_count, response_index = task_utils.get_result_count(parameters)
fq = ''
if 'fq' in parameters[response_index]['query']:
if isinstance(parameters[response_index]['query']['fq'], list):
for q in parameters[response_index]['query']['fq']:
if '{!tag=' in q:
q = q.split('}')[1]
fq += q + ' AND '
fq = fq.strip(' AND ')
else:
# Replace spaces with %20 & remove \\ to avoid HTTP Error 400.
fq = parameters[response_index]['query']['fq'].replace("\\", "")
if 'q' in parameters[response_index]['query']:
query = parameters[response_index]['query']['q']
result = delete_items(fq, query, delete_thumbs, delete_metadata, delete_layers, request_owner)
if not result[0]:
errors += 1
errors_reasons['delete_items'] = result[1]
# Update state if necessary.
if errors > 0:
status_writer.send_state(status.STAT_FAILED)
else:
status_writer.send_status(result[1])
task_utils.report(os.path.join(request['folder'], '__report.json'), 1, 0, errors, errors_details=errors_reasons)
| 38.718447 | 165 | 0.669759 |
import os
import sys
import requests
from utils import status
from utils import task_utils
import warnings
from requests.packages.urllib3.exceptions import InsecureRequestWarning
warnings.simplefilter('ignore', InsecureRequestWarning)
verify_ssl = task_utils.get_ssl_mode()
status_writer = status.Writer()
errors_reasons = {}
def delete_items(fq_query, q_query, thumbs, metadata, layers, owner):
try:
voyager_server = sys.argv[2].split('=')[1].split('solr')[0][:-1]
if not q_query and fq_query:
query = fq_query
fq = ""
else:
query = q_query
fq = "&fq={0}".format(fq_query)
url = "{0}/api/rest/index/records?query={1}{2}&items=true&thumbnails={3}&metadata={4}&layers={5}".format(voyager_server, query, fq, thumbs, metadata, layers)
response = requests.delete(url, verify=verify_ssl, headers={'Content-type': 'application/json', 'x-access-token': task_utils.get_security_token(owner)})
if response.status_code == 200:
return True, 'Deleted items: {0}'.format(response.json())
else:
return False, 'Error deleting items: {0}: {1}'.format('delete_items', response.reason)
except requests.HTTPError as http_error:
return False, http_error
except requests.exceptions.InvalidURL as url_error:
return False, url_error
except requests.RequestException as re:
return False, re
def execute(request):
query = ''
errors = 0
parameters = request['params']
archive_location = request['folder']
if not os.path.exists(archive_location):
os.makedirs(archive_location)
delete_thumbs = task_utils.get_parameter_value(parameters, 'delete_thumbnails', 'value') or False
delete_metadata = task_utils.get_parameter_value(parameters, 'delete_metadata', 'value') or False
delete_layers = task_utils.get_parameter_value(parameters, 'delete_layers', 'value') or False
request_owner = request['owner']
result_count, response_index = task_utils.get_result_count(parameters)
fq = ''
if 'fq' in parameters[response_index]['query']:
if isinstance(parameters[response_index]['query']['fq'], list):
for q in parameters[response_index]['query']['fq']:
if '{!tag=' in q:
q = q.split('}')[1]
fq += q + ' AND '
fq = fq.strip(' AND ')
else:
fq = parameters[response_index]['query']['fq'].replace("\\", "")
if 'q' in parameters[response_index]['query']:
query = parameters[response_index]['query']['q']
result = delete_items(fq, query, delete_thumbs, delete_metadata, delete_layers, request_owner)
if not result[0]:
errors += 1
errors_reasons['delete_items'] = result[1]
if errors > 0:
status_writer.send_state(status.STAT_FAILED)
else:
status_writer.send_status(result[1])
task_utils.report(os.path.join(request['folder'], '__report.json'), 1, 0, errors, errors_details=errors_reasons)
| true | true |
f7f6ef4d563cfc3eb9ca06cff2f6513e07df9a98 | 2,723 | py | Python | experiments/steven-images/discrete_classic_envs.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/steven-images/discrete_classic_envs.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/steven-images/discrete_classic_envs.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | """
Run DQN on grid world.
"""
import gym
import numpy as np
from rlkit.torch.dqn.double_dqn import DoubleDQN
import rlkit.misc.hyperparameter as hyp
import rlkit.torch.pytorch_util as ptu
from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.networks import Mlp
from rlkit.torch.networks.experimental import HuberLoss
from rlkit.envs.wrappers import DiscretizeEnv
from rlkit.launchers.launcher_util import setup_logger
def experiment(variant):
env = gym.make(variant['env_id'])
training_env = gym.make(variant['env_id'])
env = DiscretizeEnv(env, variant['bins'])
training_env = DiscretizeEnv(training_env, variant['bins'])
qf = Mlp(
hidden_sizes=[32, 32],
input_size=int(np.prod(env.observation_space.shape)),
output_size=env.action_space.n,
)
qf_criterion = variant['qf_criterion_class']()
algorithm = variant['algo_class'](
env,
training_env=training_env,
qf=qf,
qf_criterion=qf_criterion,
**variant['algo_params']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
# noinspection PyTypeChecker
variant = dict(
algo_params=dict(
num_epochs=1000,
num_steps_per_epoch=1000,
num_steps_per_eval=500,
batch_size=128,
max_path_length=200,
discount=0.99,
epsilon=.2,
tau=0.001,
hard_update_period=1000,
replay_buffer_size=10000,
save_environment=True, # Can't serialize CartPole for some reason
),
algo_class=DoubleDQN,#DDPG,#DoubleDQN,
qf_criterion_class=HuberLoss,
bins=9,
env_id='InvertedPendulum-v2',
)
search_space = {
'env_id': [
'Reacher-v2',
],
'bins': [9],
'algo_class': [
DoubleDQN,
],
'learning_rate': [
1e-3,
1e-4
],
'qf_criterion_class': [
HuberLoss,
],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
setup_logger('dqn-images-experiment', variant=variant)
experiment(variant)
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
#for i in range(2):
run_experiment(
experiment,
variant=variant,
exp_id=exp_id,
exp_prefix="dqn-Pusher2D-test",
mode='ec2',
# use_gpu=False,
# exp_prefix="double-vs-dqn-huber-sweep-cartpole",
# mode='local',
# use_gpu=True,
)
| 28.072165 | 78 | 0.594198 |
import gym
import numpy as np
from rlkit.torch.dqn.double_dqn import DoubleDQN
import rlkit.misc.hyperparameter as hyp
import rlkit.torch.pytorch_util as ptu
from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.networks import Mlp
from rlkit.torch.networks.experimental import HuberLoss
from rlkit.envs.wrappers import DiscretizeEnv
from rlkit.launchers.launcher_util import setup_logger
def experiment(variant):
env = gym.make(variant['env_id'])
training_env = gym.make(variant['env_id'])
env = DiscretizeEnv(env, variant['bins'])
training_env = DiscretizeEnv(training_env, variant['bins'])
qf = Mlp(
hidden_sizes=[32, 32],
input_size=int(np.prod(env.observation_space.shape)),
output_size=env.action_space.n,
)
qf_criterion = variant['qf_criterion_class']()
algorithm = variant['algo_class'](
env,
training_env=training_env,
qf=qf,
qf_criterion=qf_criterion,
**variant['algo_params']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
variant = dict(
algo_params=dict(
num_epochs=1000,
num_steps_per_epoch=1000,
num_steps_per_eval=500,
batch_size=128,
max_path_length=200,
discount=0.99,
epsilon=.2,
tau=0.001,
hard_update_period=1000,
replay_buffer_size=10000,
save_environment=True,
),
algo_class=DoubleDQN,#DDPG,#DoubleDQN,
qf_criterion_class=HuberLoss,
bins=9,
env_id='InvertedPendulum-v2',
)
search_space = {
'env_id': [
'Reacher-v2',
],
'bins': [9],
'algo_class': [
DoubleDQN,
],
'learning_rate': [
1e-3,
1e-4
],
'qf_criterion_class': [
HuberLoss,
],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
setup_logger('dqn-images-experiment', variant=variant)
experiment(variant)
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
#for i in range(2):
run_experiment(
experiment,
variant=variant,
exp_id=exp_id,
exp_prefix="dqn-Pusher2D-test",
mode='ec2',
# use_gpu=False,
# exp_prefix="double-vs-dqn-huber-sweep-cartpole",
# mode='local',
# use_gpu=True,
)
| true | true |
f7f6f2c8b3f7e336ad74f8b08e1c28298b4a4d13 | 1,283 | py | Python | cli/polyaxon/schemas/fields/tensor.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | cli/polyaxon/schemas/fields/tensor.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | cli/polyaxon/schemas/fields/tensor.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from __future__ import absolute_import, division, print_function
import six
from marshmallow import ValidationError, fields
class Tensor(fields.Field):
def _deserialize(self, value, attr, data, **kwargs):
if isinstance(value, six.string_types):
return [value, 0, 0]
if isinstance(value, list) and len(value) == 3:
condition = (
isinstance(value[0], str)
and isinstance(value[1], int)
and isinstance(value[1], int)
)
if condition:
return value
raise ValidationError("This field expects a str or a list of [str, int, int].")
| 33.763158 | 87 | 0.670304 |
from __future__ import absolute_import, division, print_function
import six
from marshmallow import ValidationError, fields
class Tensor(fields.Field):
def _deserialize(self, value, attr, data, **kwargs):
if isinstance(value, six.string_types):
return [value, 0, 0]
if isinstance(value, list) and len(value) == 3:
condition = (
isinstance(value[0], str)
and isinstance(value[1], int)
and isinstance(value[1], int)
)
if condition:
return value
raise ValidationError("This field expects a str or a list of [str, int, int].")
| true | true |
f7f6f2e3841a377ef759e9e7376a0cf572113a16 | 885 | py | Python | web/gatekeeping/api/submission_reason.py | gabegm/Headcount-Planning-Management-System | 6946509e6d8c530b9f0e51b68047c4cb01dedd2e | [
"MIT"
] | 2 | 2020-03-17T10:55:39.000Z | 2022-03-21T00:04:38.000Z | web/gatekeeping/api/submission_reason.py | gabegm/Headcount-Planning-Management-System | 6946509e6d8c530b9f0e51b68047c4cb01dedd2e | [
"MIT"
] | null | null | null | web/gatekeeping/api/submission_reason.py | gabegm/Headcount-Planning-Management-System | 6946509e6d8c530b9f0e51b68047c4cb01dedd2e | [
"MIT"
] | null | null | null | from gatekeeping.db import get_db
from flask import abort
def get_submission_reasons():
submission_reasons = get_db().execute(
'SELECT *'
' FROM submission_reason'
).fetchall()
return submission_reasons
def get_submission_reason(id):
submission_status = get_db().execute(
'SELECT *'
' FROM submission_reason'
' WHERE id = ?',
(id,)
).fetchone()
if submission_status is None:
abort(404, "Submission reason id {0} doesn't exist.".format(id))
return submission_status
def get_submission_reason_by_name(name):
submission_reason = get_db().execute(
'SELECT *'
' FROM submission_reason'
' WHERE name = ?',
(name,)
).fetchone()
if submission_reason is None:
abort(404, "Submission reason {0} doesn't exist.".format(id))
return submission_reason | 24.583333 | 72 | 0.637288 | from gatekeeping.db import get_db
from flask import abort
def get_submission_reasons():
submission_reasons = get_db().execute(
'SELECT *'
' FROM submission_reason'
).fetchall()
return submission_reasons
def get_submission_reason(id):
submission_status = get_db().execute(
'SELECT *'
' FROM submission_reason'
' WHERE id = ?',
(id,)
).fetchone()
if submission_status is None:
abort(404, "Submission reason id {0} doesn't exist.".format(id))
return submission_status
def get_submission_reason_by_name(name):
submission_reason = get_db().execute(
'SELECT *'
' FROM submission_reason'
' WHERE name = ?',
(name,)
).fetchone()
if submission_reason is None:
abort(404, "Submission reason {0} doesn't exist.".format(id))
return submission_reason | true | true |
f7f6f3bacdda0b558edd2631aae48cf8ea97b618 | 4,501 | py | Python | gen_CARAE_con_logP_SAS_TPSA.py | gicsaw/ARAE_SMILES | 05cd508be0450ad0e8944e6280b8fa2863cc8dd0 | [
"BSD-3-Clause"
] | 19 | 2019-03-20T08:26:59.000Z | 2021-11-20T11:29:15.000Z | gen_CARAE_con_logP_SAS_TPSA.py | gicsaw/ARAE_SMILES | 05cd508be0450ad0e8944e6280b8fa2863cc8dd0 | [
"BSD-3-Clause"
] | 1 | 2020-05-31T11:34:55.000Z | 2021-06-20T11:58:09.000Z | gen_CARAE_con_logP_SAS_TPSA.py | gicsaw/ARAE_SMILES | 05cd508be0450ad0e8944e6280b8fa2863cc8dd0 | [
"BSD-3-Clause"
] | 2 | 2019-12-21T06:06:58.000Z | 2021-04-06T19:40:41.000Z | from model.CARAE import ARAE
#from utils.utils import *
import numpy as np
import os, sys
import time
import tensorflow as tf
import collections
import copy
from six.moves import cPickle
#os.environ["CUDA_VISIBLE_DEVICES"] = ""
def convert_to_smiles(vector, char):
smiles=""
for i in vector:
smiles+=char[i]
return smiles
def cal_accuracy(S1, S2, length):
count = 0
for i in range(len(S1)):
if np.array_equal(S1[i][1:length[i]+1],S2[i][:length[i]]):
count+=1
return count
char_list= ["H","C","N","O","F","P","S","Cl","Br","I",
"n","c","o","s",
"1","2","3","4","5","6","7","8",
"(",")","[","]",
"-","=","#","/","\\","+","@","X","Y"]
char_dict={'H': 0, 'C': 1, 'N': 2, 'O': 3, 'F': 4, 'P': 5,
'S': 6, 'Cl': 7, 'Br': 8, 'I': 9,
'n': 10, 'c': 11, 'o': 12, 's': 13,
'1': 14, '2': 15, '3': 16, '4': 17, '5': 18, '6': 19, '7': 20, '8': 21,
'(': 22, ')': 23, '[': 24, ']': 25, '-': 26, '=': 27, '#': 28,
'/': 29, '\\': 30, '+': 31, '@': 32, 'X': 33, 'Y': 34}
vocab_size = len(char_list)
latent_size = 300
batch_size = 100
sample_size = 100
seq_length = 110
dev = 0.0
#input properties, [logP,SAS,TPSA]
#task_val=np.array([1.5,2,30])
if len(sys.argv)<=3:
print("python gen_CARAE_con_logP_SAS_TPSA logP SAS TPSA ")
sys.exit()
logP_set=float(sys.argv[1])
SAS_set=float(sys.argv[2])
TPSA_set=float(sys.argv[3])
task_val=np.array([logP_set,SAS_set,TPSA_set])
print(task_val)
model_name="CARAE_logP_SAS_TPSA"
save_dir="./save/"+model_name
out_dir0="out_"+model_name+"G_%d_%d_%d" %(int(logP_set*10),int(SAS_set),int(TPSA_set))
if not os.path.exists(out_dir0):
os.makedirs(out_dir0)
property_task=3
task_nor=np.array([10.0,10.0,150.0])
task_low=np.array([-1.0,1.0,0.0])
task_high=np.array([5.0,8.0,150.0])
task_low=task_low/task_nor
task_high=task_high/task_nor
task_val=task_val/task_nor
Ntest=10000
num_test_batches = int(Ntest/batch_size)
model = ARAE(vocab_size = vocab_size,
batch_size = batch_size,
latent_size = latent_size,
sample_size = sample_size,
property_task = property_task
)
total_st=time.time()
epochs=[39]
for epoch in epochs:
out_dir=out_dir0+"/%d" %epoch
if not os.path.exists(out_dir):
os.makedirs(out_dir)
output_file=out_dir+"/result_"+model_name+"_%d.txt" %epoch
fp0=open(output_file,"w")
model.restore(save_dir+"/model.ckpt-%d" %epoch)
latent_vector_fake=[]
Y_fake=[]
P_fake=[]
smiles_fake=[]
for itest in range(num_test_batches):
# fp0.write('**********************************************\n')
decoder_state = model.get_decoder_state()
s = np.random.normal(0.0, 0.25, [batch_size, sample_size]).clip(-1.0,1.0)
# p = p_batches2[itest]
# cp = np.random.uniform(task_low,task_high, [batch_size, property_task])
p=np.empty([batch_size,property_task])
p[:,0].fill(task_val[0])
p[:,1].fill(task_val[1])
p[:,2].fill(task_val[2])
P_fake.append(p)
latent_vector = model.generate_latent_vector(s)
latent_vector_fake.append(latent_vector)
start_token = np.array([char_list.index('X') for i in range(batch_size)])
start_token = np.reshape(start_token, [batch_size, 1])
length = np.array([1 for i in range(batch_size)])
smiles = ['' for i in range(batch_size)]
Y=[]
for i in range(seq_length):
m, state = model.generate_molecule(start_token, latent_vector, length, p, decoder_state)
decoder_state = state
start_token = np.argmax(m,2)
Y.append(start_token[:,0])
smiles = [s + str(char_list[start_token[j][0]]) for j,s in enumerate(smiles)]
Y=list(map(list,zip(*Y)))
Y_fake.append(Y)
smiles_fake+=smiles
latent_vector_fake=np.array(latent_vector_fake,dtype="float32").reshape(-1,latent_size)
P_fake=np.array(P_fake,dtype="float32").reshape(-1,property_task)
Y_fake=np.array(Y_fake,dtype="int32").reshape(-1,seq_length)
outfile=out_dir+"/Zfake.npy"
np.save(outfile,latent_vector_fake)
outfile=out_dir+"/Pfake.npy"
np.save(outfile,P_fake)
outfile=out_dir+"/Yfake.npy"
np.save(outfile,Y_fake)
outfile=out_dir+"/smiles_fake.txt"
fp_out=open(outfile,'w')
for line in smiles_fake:
line_out=line+"\n"
fp_out.write(line_out)
fp_out.close()
total_et=time.time()
print ("total_time : ", total_et-total_st)
| 27.956522 | 100 | 0.610975 | from model.CARAE import ARAE
import numpy as np
import os, sys
import time
import tensorflow as tf
import collections
import copy
from six.moves import cPickle
def convert_to_smiles(vector, char):
smiles=""
for i in vector:
smiles+=char[i]
return smiles
def cal_accuracy(S1, S2, length):
count = 0
for i in range(len(S1)):
if np.array_equal(S1[i][1:length[i]+1],S2[i][:length[i]]):
count+=1
return count
char_list= ["H","C","N","O","F","P","S","Cl","Br","I",
"n","c","o","s",
"1","2","3","4","5","6","7","8",
"(",")","[","]",
"-","=","#","/","\\","+","@","X","Y"]
char_dict={'H': 0, 'C': 1, 'N': 2, 'O': 3, 'F': 4, 'P': 5,
'S': 6, 'Cl': 7, 'Br': 8, 'I': 9,
'n': 10, 'c': 11, 'o': 12, 's': 13,
'1': 14, '2': 15, '3': 16, '4': 17, '5': 18, '6': 19, '7': 20, '8': 21,
'(': 22, ')': 23, '[': 24, ']': 25, '-': 26, '=': 27, '#': 28,
'/': 29, '\\': 30, '+': 31, '@': 32, 'X': 33, 'Y': 34}
vocab_size = len(char_list)
latent_size = 300
batch_size = 100
sample_size = 100
seq_length = 110
dev = 0.0
if len(sys.argv)<=3:
print("python gen_CARAE_con_logP_SAS_TPSA logP SAS TPSA ")
sys.exit()
logP_set=float(sys.argv[1])
SAS_set=float(sys.argv[2])
TPSA_set=float(sys.argv[3])
task_val=np.array([logP_set,SAS_set,TPSA_set])
print(task_val)
model_name="CARAE_logP_SAS_TPSA"
save_dir="./save/"+model_name
out_dir0="out_"+model_name+"G_%d_%d_%d" %(int(logP_set*10),int(SAS_set),int(TPSA_set))
if not os.path.exists(out_dir0):
os.makedirs(out_dir0)
property_task=3
task_nor=np.array([10.0,10.0,150.0])
task_low=np.array([-1.0,1.0,0.0])
task_high=np.array([5.0,8.0,150.0])
task_low=task_low/task_nor
task_high=task_high/task_nor
task_val=task_val/task_nor
Ntest=10000
num_test_batches = int(Ntest/batch_size)
model = ARAE(vocab_size = vocab_size,
batch_size = batch_size,
latent_size = latent_size,
sample_size = sample_size,
property_task = property_task
)
total_st=time.time()
epochs=[39]
for epoch in epochs:
out_dir=out_dir0+"/%d" %epoch
if not os.path.exists(out_dir):
os.makedirs(out_dir)
output_file=out_dir+"/result_"+model_name+"_%d.txt" %epoch
fp0=open(output_file,"w")
model.restore(save_dir+"/model.ckpt-%d" %epoch)
latent_vector_fake=[]
Y_fake=[]
P_fake=[]
smiles_fake=[]
for itest in range(num_test_batches):
decoder_state = model.get_decoder_state()
s = np.random.normal(0.0, 0.25, [batch_size, sample_size]).clip(-1.0,1.0)
p=np.empty([batch_size,property_task])
p[:,0].fill(task_val[0])
p[:,1].fill(task_val[1])
p[:,2].fill(task_val[2])
P_fake.append(p)
latent_vector = model.generate_latent_vector(s)
latent_vector_fake.append(latent_vector)
start_token = np.array([char_list.index('X') for i in range(batch_size)])
start_token = np.reshape(start_token, [batch_size, 1])
length = np.array([1 for i in range(batch_size)])
smiles = ['' for i in range(batch_size)]
Y=[]
for i in range(seq_length):
m, state = model.generate_molecule(start_token, latent_vector, length, p, decoder_state)
decoder_state = state
start_token = np.argmax(m,2)
Y.append(start_token[:,0])
smiles = [s + str(char_list[start_token[j][0]]) for j,s in enumerate(smiles)]
Y=list(map(list,zip(*Y)))
Y_fake.append(Y)
smiles_fake+=smiles
latent_vector_fake=np.array(latent_vector_fake,dtype="float32").reshape(-1,latent_size)
P_fake=np.array(P_fake,dtype="float32").reshape(-1,property_task)
Y_fake=np.array(Y_fake,dtype="int32").reshape(-1,seq_length)
outfile=out_dir+"/Zfake.npy"
np.save(outfile,latent_vector_fake)
outfile=out_dir+"/Pfake.npy"
np.save(outfile,P_fake)
outfile=out_dir+"/Yfake.npy"
np.save(outfile,Y_fake)
outfile=out_dir+"/smiles_fake.txt"
fp_out=open(outfile,'w')
for line in smiles_fake:
line_out=line+"\n"
fp_out.write(line_out)
fp_out.close()
total_et=time.time()
print ("total_time : ", total_et-total_st)
| true | true |
f7f6f491f228f56ba1af199f3b02bb6c0fecda27 | 685 | py | Python | xsec-tools/nesteddict.py | jsturdy/wsu-ci-analysis-tools | 672661d12ea751190a67e64dd9864ad54c5ad267 | [
"MIT"
] | null | null | null | xsec-tools/nesteddict.py | jsturdy/wsu-ci-analysis-tools | 672661d12ea751190a67e64dd9864ad54c5ad267 | [
"MIT"
] | null | null | null | xsec-tools/nesteddict.py | jsturdy/wsu-ci-analysis-tools | 672661d12ea751190a67e64dd9864ad54c5ad267 | [
"MIT"
] | null | null | null | from collections import defaultdict as cdict
class nesteddict(dict):
"""
Helper class for creating dictionaries that self-initialize
rather than having to create sub-level of a dictionary, one
can simply index all the keys above the desired level
Effectively, mkdir -p for dicts!
Example:
mydict = nesteddict()
mydict[key1][subkey2] = value
## from https://stackoverflow.com/questions/635483/what-is-the-best-way-to-implement-nested-dictionaries#652284
"""
def __missing__(self, key):
value = self[key] = type(self)() # retain local pointer to value
return value # faster to return than dict lookup
| 31.136364 | 115 | 0.683212 | from collections import defaultdict as cdict
class nesteddict(dict):
def __missing__(self, key):
value = self[key] = type(self)()
return value
| true | true |
f7f6f5ec9d1bf323f10952f740fa415fa974483a | 3,134 | py | Python | rec_to_nwb/processing/validation/ntrode_validator.py | jihyunbak/rec_to_nwb | 6e65f8bf0a4faa4d986483ec2442ba19d70c92a9 | [
"Apache-2.0"
] | 8 | 2020-05-29T13:48:35.000Z | 2021-11-19T04:24:48.000Z | rec_to_nwb/processing/validation/ntrode_validator.py | jihyunbak/rec_to_nwb | 6e65f8bf0a4faa4d986483ec2442ba19d70c92a9 | [
"Apache-2.0"
] | 8 | 2020-07-13T00:42:35.000Z | 2020-11-16T16:17:12.000Z | rec_to_nwb/processing/validation/ntrode_validator.py | jihyunbak/rec_to_nwb | 6e65f8bf0a4faa4d986483ec2442ba19d70c92a9 | [
"Apache-2.0"
] | 1 | 2020-08-28T01:34:35.000Z | 2020-08-28T01:34:35.000Z | import logging.config
import os
from rec_to_nwb.processing.exceptions.invalid_header_exception import InvalidHeaderException
from rec_to_nwb.processing.exceptions.invalid_metadata_exception import InvalidMetadataException
from rec_to_nwb.processing.header.module.header import Header
from rec_to_nwb.processing.tools.beartype.beartype import beartype
from rec_to_nwb.processing.tools.count_electrodes_in_ntrode import count_electrodes_in_ntrode
from rec_to_nwb.processing.tools.count_electrodes_in_probe import count_electrodes_in_probe
from rec_to_nwb.processing.tools.filter_probe_by_type import filter_probe_by_type
from rec_to_nwb.processing.validation.ntrode_validation_summary import NTrodeValidationSummary
from rec_to_nwb.processing.validation.validator import Validator
path = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig(fname=str(path) + '/../../logging.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
class NTrodeValidator(Validator):
@beartype
def __init__(self, metadata: dict, header: Header, probes_metadata: list):
self.metadata = metadata
self.header = header
self.probes_metadata = probes_metadata
def create_summary(self):
ntrodes = self.metadata['ntrode electrode group channel map']
if len(ntrodes) == 0:
raise InvalidMetadataException("There are no ntrodes defined in metadata.yml file.")
if self.header is None or \
self.header.configuration.spike_configuration is None or \
self.header.configuration.spike_configuration.spike_n_trodes is None:
raise InvalidHeaderException("Rec header does not contain spike_n_trodes data")
spike_ntrodes = self.header.configuration.spike_configuration.spike_n_trodes
ntrodes_num = len(ntrodes)
spike_ntrodes_num = len(spike_ntrodes)
self.validate_ntrode_metadata_with_probe_metadata(self.metadata, self.probes_metadata)
return NTrodeValidationSummary(ntrodes_num, spike_ntrodes_num)
@staticmethod
def validate_ntrode_metadata_with_probe_metadata(metadata, probes_metadata):
for electrode_group in metadata['electrode groups']:
probe_metadata = filter_probe_by_type(probes_metadata, electrode_group['device_type'])
electrodes_in_probe = count_electrodes_in_probe(probe_metadata)
electrodes_in_group = count_electrodes_in_ntrode(
metadata['ntrode electrode group channel map'],
electrode_group['id']
)
if electrodes_in_probe != electrodes_in_group:
raise InvalidMetadataException(
'Ntrode definition in metadata is not compatible with probe schema.' +
'Probe_type: ' + str(electrode_group['device_type']) +
' electrodes in this probe_type: ' + str(electrodes_in_probe) +
'. Ntrode_metadata for electrode_group of id: ' + str(electrode_group['id']) +
' electrodes in this electrode_group: ' + str(electrodes_in_group)
)
| 51.377049 | 98 | 0.736758 | import logging.config
import os
from rec_to_nwb.processing.exceptions.invalid_header_exception import InvalidHeaderException
from rec_to_nwb.processing.exceptions.invalid_metadata_exception import InvalidMetadataException
from rec_to_nwb.processing.header.module.header import Header
from rec_to_nwb.processing.tools.beartype.beartype import beartype
from rec_to_nwb.processing.tools.count_electrodes_in_ntrode import count_electrodes_in_ntrode
from rec_to_nwb.processing.tools.count_electrodes_in_probe import count_electrodes_in_probe
from rec_to_nwb.processing.tools.filter_probe_by_type import filter_probe_by_type
from rec_to_nwb.processing.validation.ntrode_validation_summary import NTrodeValidationSummary
from rec_to_nwb.processing.validation.validator import Validator
path = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig(fname=str(path) + '/../../logging.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
class NTrodeValidator(Validator):
@beartype
def __init__(self, metadata: dict, header: Header, probes_metadata: list):
self.metadata = metadata
self.header = header
self.probes_metadata = probes_metadata
def create_summary(self):
ntrodes = self.metadata['ntrode electrode group channel map']
if len(ntrodes) == 0:
raise InvalidMetadataException("There are no ntrodes defined in metadata.yml file.")
if self.header is None or \
self.header.configuration.spike_configuration is None or \
self.header.configuration.spike_configuration.spike_n_trodes is None:
raise InvalidHeaderException("Rec header does not contain spike_n_trodes data")
spike_ntrodes = self.header.configuration.spike_configuration.spike_n_trodes
ntrodes_num = len(ntrodes)
spike_ntrodes_num = len(spike_ntrodes)
self.validate_ntrode_metadata_with_probe_metadata(self.metadata, self.probes_metadata)
return NTrodeValidationSummary(ntrodes_num, spike_ntrodes_num)
@staticmethod
def validate_ntrode_metadata_with_probe_metadata(metadata, probes_metadata):
for electrode_group in metadata['electrode groups']:
probe_metadata = filter_probe_by_type(probes_metadata, electrode_group['device_type'])
electrodes_in_probe = count_electrodes_in_probe(probe_metadata)
electrodes_in_group = count_electrodes_in_ntrode(
metadata['ntrode electrode group channel map'],
electrode_group['id']
)
if electrodes_in_probe != electrodes_in_group:
raise InvalidMetadataException(
'Ntrode definition in metadata is not compatible with probe schema.' +
'Probe_type: ' + str(electrode_group['device_type']) +
' electrodes in this probe_type: ' + str(electrodes_in_probe) +
'. Ntrode_metadata for electrode_group of id: ' + str(electrode_group['id']) +
' electrodes in this electrode_group: ' + str(electrodes_in_group)
)
| true | true |
f7f6f6d9152763afcbf0216e257c13b489de2a2d | 20,049 | py | Python | sunpy/instr/tests/test_lyra.py | mridullpandey/sunpy | 65bf70731a8147899b8c0fca8b3b1a386e47c010 | [
"BSD-2-Clause"
] | null | null | null | sunpy/instr/tests/test_lyra.py | mridullpandey/sunpy | 65bf70731a8147899b8c0fca8b3b1a386e47c010 | [
"BSD-2-Clause"
] | 2 | 2015-06-15T17:16:11.000Z | 2016-11-23T17:12:07.000Z | sunpy/instr/tests/test_lyra.py | mridullpandey/sunpy | 65bf70731a8147899b8c0fca8b3b1a386e47c010 | [
"BSD-2-Clause"
] | 2 | 2016-11-22T01:35:15.000Z | 2017-04-26T19:37:53.000Z | import os.path
import datetime
import tempfile
import numpy as np
import pandas
import pytest
import astropy.units as u
from astropy.time import TimeDelta
from sunpy import timeseries
from sunpy.data.test import rootdir
from sunpy.instr import lyra
from sunpy.time import is_time_equal, parse_time
# Define location for test LYTAF database files
TEST_DATA_PATH = rootdir
# Define some test data for test_remove_lytaf_events()
TIME = parse_time(np.array([datetime.datetime(2013, 2, 1) + datetime.timedelta(minutes=i)
for i in range(120)]))
CHANNELS = [np.zeros(len(TIME)) + 0.4, np.zeros(len(TIME)) + 0.1]
EMPTY_LYTAF = np.empty((0,), dtype=[("insertion_time", object),
("begin_time", object),
("reference_time", object),
("end_time", object),
("event_type", object),
("event_definition", object)])
LYTAF_TEST = np.append(
EMPTY_LYTAF,
np.array([(parse_time(datetime.datetime.utcfromtimestamp(1371459961)),
parse_time(datetime.datetime.utcfromtimestamp(1359677220)),
parse_time(datetime.datetime.utcfromtimestamp(1359677250)),
parse_time(datetime.datetime.utcfromtimestamp(1359677400)),
"LAR", "Large Angle Rotation.")],
dtype=EMPTY_LYTAF.dtype))
LYTAF_TEST = np.append(
LYTAF_TEST,
np.array([(parse_time(datetime.datetime.utcfromtimestamp(1371460063)),
parse_time(datetime.datetime.utcfromtimestamp(1359681764)),
parse_time(datetime.datetime.utcfromtimestamp(1359682450)),
parse_time(datetime.datetime.utcfromtimestamp(1359683136)),
"UV occ.", "Occultation in the UV spectrum.")],
dtype=LYTAF_TEST.dtype))
@pytest.mark.remote_data
def test_split_series_using_lytaf():
"""
test the downloading of the LYTAF file and subsequent queries.
"""
# test split_series_using_lytaf
# construct a dummy signal for testing purposes
basetime = parse_time('2010-06-13 02:00')
seconds = 3600
dummy_time = [basetime + TimeDelta(s*u.second) for s in range(seconds)]
dummy_data = np.random.random(seconds)
lytaf_tmp = lyra.get_lytaf_events('2010-06-13 02:00', '2010-06-13 06:00',
combine_files=["ppt"])
split = lyra.split_series_using_lytaf(dummy_time, dummy_data, lytaf_tmp)
assert type(split) == list
assert len(split) == 4
assert is_time_equal(split[0]['subtimes'][0], parse_time((2010, 6, 13, 2, 0)))
assert is_time_equal(split[0]['subtimes'][-1], parse_time((2010, 6, 13, 2, 7, 2)))
assert is_time_equal(split[3]['subtimes'][0], parse_time((2010, 6, 13, 2, 59, 42)))
assert is_time_equal(split[3]['subtimes'][-1], parse_time((2010, 6, 13, 2, 59, 58)))
# Test case when no LYTAF events found in time series.
split_no_lytaf = lyra.split_series_using_lytaf(dummy_time,
dummy_data, LYTAF_TEST)
assert type(split_no_lytaf) == list
assert type(split_no_lytaf[0]) == dict
assert not set(split_no_lytaf[0].keys()).symmetric_difference({'subtimes', 'subdata'})
assert split_no_lytaf[0]["subtimes"] == dummy_time
assert split_no_lytaf[0]["subdata"].all() == dummy_data.all()
@pytest.fixture
def lyra_ts():
# Create sample TimeSeries
lyrats = timeseries.TimeSeries(
os.path.join(rootdir, 'lyra_20150101-000000_lev3_std_truncated.fits.gz'),
source='LYRA')
lyrats.data = pandas.DataFrame(index=TIME,
data={"CHANNEL1": CHANNELS[0],
"CHANNEL2": CHANNELS[1],
"CHANNEL3": CHANNELS[0],
"CHANNEL4": CHANNELS[1]})
return lyrats
@pytest.mark.remote_data
def test_remove_lytaf_events_from_timeseries(lyra_ts):
"""
Test if artifact are correctly removed from a TimeSeries.
"""
# Check correct errors are raised due to bad input
with pytest.raises(AttributeError):
ts_test = lyra.remove_lytaf_events_from_timeseries(
[], force_use_local_lytaf=True)
# Run remove_artifacts_from_timeseries, returning artifact
# status
ts_test, artifact_status_test = \
lyra.remove_lytaf_events_from_timeseries(
lyra_ts, artifacts=["LAR", "Offpoint"], return_artifacts=True,
force_use_local_lytaf=True)
# Generate expected data by calling _remove_lytaf_events and
# constructing expected dataframe manually.
time, channels, artifact_status_expected = lyra._remove_lytaf_events(
lyra_ts.data.index, channels=[np.asanyarray(lyra_ts.data["CHANNEL1"]),
np.asanyarray(lyra_ts.data["CHANNEL2"]),
np.asanyarray(lyra_ts.data["CHANNEL3"]),
np.asanyarray(lyra_ts.data["CHANNEL4"])],
artifacts=["LAR", "Offpoint"], return_artifacts=True,
force_use_local_lytaf=True)
dataframe_expected = pandas.DataFrame(index=time,
data={"CHANNEL1": channels[0],
"CHANNEL2": channels[1],
"CHANNEL3": channels[2],
"CHANNEL4": channels[3]})
# Assert expected result is returned
pandas.util.testing.assert_frame_equal(ts_test.data, dataframe_expected)
assert artifact_status_test.keys() == artifact_status_expected.keys()
np.testing.assert_array_equal(artifact_status_test["lytaf"],
artifact_status_expected["lytaf"])
np.testing.assert_array_equal(artifact_status_test["removed"],
artifact_status_expected["removed"])
np.testing.assert_array_equal(artifact_status_test["not_removed"],
artifact_status_expected["not_removed"])
assert artifact_status_test["not_found"] == \
artifact_status_expected["not_found"]
# Run remove_artifacts_from_timeseries, without returning
# artifact status
ts_test = \
lyra.remove_lytaf_events_from_timeseries(
lyra_ts, artifacts=["LAR", "Offpoint"],
force_use_local_lytaf=True)
# Assert expected result is returned
pandas.util.testing.assert_frame_equal(ts_test.data, dataframe_expected)
@pytest.fixture()
def local_cache(sunpy_cache):
sunpy_cache = sunpy_cache('sunpy.instr.lyra.cache')
sunpy_cache.add('http://proba2.oma.be/lyra/data/lytaf/annotation_lyra.db',
os.path.join(TEST_DATA_PATH, 'annotation_lyra.db'))
sunpy_cache.add('http://proba2.oma.be/lyra/data/lytaf/annotation_manual.db',
os.path.join(TEST_DATA_PATH, 'annotation_manual.db'))
sunpy_cache.add('http://proba2.oma.be/lyra/data/lytaf/annotation_ppt.db',
os.path.join(TEST_DATA_PATH, 'annotation_ppt.db'))
sunpy_cache.add('http://proba2.oma.be/lyra/data/lytaf/annotation_science.db',
os.path.join(TEST_DATA_PATH, 'annotation_science.db'))
def test_remove_lytaf_events_1(local_cache):
"""
Test _remove_lytaf_events() with some artifacts found and others not.
"""
# Run _remove_lytaf_events
time_test, channels_test, artifacts_status_test = \
lyra._remove_lytaf_events(
TIME, channels=CHANNELS, artifacts=["LAR", "Offpoint"],
return_artifacts=True, force_use_local_lytaf=True)
# Generated expected result
bad_indices = np.logical_and(TIME >= LYTAF_TEST["begin_time"][0],
TIME <= LYTAF_TEST["end_time"][0])
bad_indices = np.arange(len(TIME))[bad_indices]
time_expected = np.delete(TIME, bad_indices)
channels_expected = [np.delete(CHANNELS[0], bad_indices),
np.delete(CHANNELS[1], bad_indices)]
artifacts_status_expected = {"lytaf": LYTAF_TEST, "removed": LYTAF_TEST[0],
"not_removed": LYTAF_TEST[1],
"not_found": ["Offpoint"]}
# Assert test values are same as expected
assert time_test.all() == time_expected.all()
assert (channels_test[0]).all() == (channels_expected[0]).all()
assert (channels_test[1]).all() == (channels_expected[1]).all()
assert artifacts_status_test.keys() == artifacts_status_expected.keys()
np.testing.assert_array_equal(artifacts_status_test["lytaf"],
artifacts_status_expected["lytaf"])
np.testing.assert_array_equal(artifacts_status_test["removed"],
artifacts_status_expected["removed"])
np.testing.assert_array_equal(artifacts_status_test["not_removed"],
artifacts_status_expected["not_removed"])
assert artifacts_status_test["not_found"] == \
artifacts_status_expected["not_found"]
# Test that correct values are returned when channels kwarg not
# supplied.
# Run _remove_lytaf_events
time_test, artifacts_status_test = \
lyra._remove_lytaf_events(
TIME, artifacts=["LAR", "Offpoint"],
return_artifacts=True, force_use_local_lytaf=True)
# Assert test values are same as expected
assert time_test.all() == time_expected.all()
assert artifacts_status_test.keys() == artifacts_status_expected.keys()
np.testing.assert_array_equal(artifacts_status_test["lytaf"],
artifacts_status_expected["lytaf"])
np.testing.assert_array_equal(artifacts_status_test["removed"],
artifacts_status_expected["removed"])
np.testing.assert_array_equal(artifacts_status_test["not_removed"],
artifacts_status_expected["not_removed"])
assert artifacts_status_test["not_found"] == \
artifacts_status_expected["not_found"]
def test_remove_lytaf_events_2(local_cache):
"""
Test _remove_lytaf_events() with no user artifacts found.
"""
# Run _remove_lytaf_events
with pytest.warns(UserWarning, match='None of user supplied artifacts were found.'):
time_test, channels_test, artifacts_status_test = \
lyra._remove_lytaf_events(
TIME, channels=CHANNELS, artifacts="Offpoint",
return_artifacts=True, force_use_local_lytaf=True)
# Generated expected result
time_expected = TIME
channels_expected = CHANNELS
artifacts_status_expected = {"lytaf": LYTAF_TEST, "removed": EMPTY_LYTAF,
"not_removed": LYTAF_TEST,
"not_found": ["Offpoint"]}
# Assert test values are same as expected
assert np.all(time_test == time_expected)
assert (channels_test[0]).all() == (channels_expected[0]).all()
assert (channels_test[1]).all() == (channels_expected[1]).all()
assert artifacts_status_test.keys() == artifacts_status_expected.keys()
np.testing.assert_array_equal(artifacts_status_test["lytaf"],
artifacts_status_expected["lytaf"])
np.testing.assert_array_equal(artifacts_status_test["removed"],
artifacts_status_expected["removed"])
np.testing.assert_array_equal(artifacts_status_test["not_removed"],
artifacts_status_expected["not_removed"])
assert artifacts_status_test["not_found"] == \
artifacts_status_expected["not_found"]
# Test correct values are returned when return_artifacts kwarg not
# supplied.
# Case 1: channels kwarg is True
# Run _remove_lytaf_events
with pytest.warns(UserWarning, match='None of user supplied artifacts were found.'):
time_test, channels_test = lyra._remove_lytaf_events(
TIME, channels=CHANNELS, artifacts=["Offpoint"], force_use_local_lytaf=True)
assert np.all(time_test == time_expected)
assert (channels_test[0]).all() == (channels_expected[0]).all()
assert (channels_test[1]).all() == (channels_expected[1]).all()
# Case 2: channels kwarg is False
# Run _remove_lytaf_events
with pytest.warns(UserWarning, match='None of user supplied artifacts were found.'):
time_test = lyra._remove_lytaf_events(
TIME, artifacts=["Offpoint"], force_use_local_lytaf=True)
assert np.all(time_test == time_expected)
def test_remove_lytaf_events_3(local_cache):
"""
Test if correct errors are raised by _remove_lytaf_events().
"""
with pytest.raises(TypeError):
lyra._remove_lytaf_events(TIME, channels=6, artifacts=["LAR"],
force_use_local_lytaf=True)
with pytest.raises(ValueError):
lyra._remove_lytaf_events(TIME,
force_use_local_lytaf=True)
with pytest.raises(TypeError):
lyra._remove_lytaf_events(TIME, artifacts=[6],
force_use_local_lytaf=True)
with pytest.raises(ValueError):
lyra._remove_lytaf_events(TIME,
artifacts=["LAR", "incorrect artifact type"],
force_use_local_lytaf=True)
def test_get_lytaf_events(local_cache):
"""
Test if LYTAF events are correctly downloaded and read in.
"""
# Run get_lytaf_events
lytaf_test = lyra.get_lytaf_events("2008-01-01", "2014-01-01",
force_use_local_lytaf=True)
# Form expected result of extract_combined_lytaf
insertion_time = [datetime.datetime.utcfromtimestamp(1371459961),
datetime.datetime.utcfromtimestamp(1371460063),
datetime.datetime.utcfromtimestamp(1371460411),
datetime.datetime.utcfromtimestamp(1371460493),
datetime.datetime.utcfromtimestamp(1371460403),
datetime.datetime.utcfromtimestamp(1371470988),
datetime.datetime.utcfromtimestamp(1371211791),
datetime.datetime.utcfromtimestamp(1371212303)]
begin_time = [datetime.datetime.utcfromtimestamp(1359677220),
datetime.datetime.utcfromtimestamp(1359681764),
datetime.datetime.utcfromtimestamp(1360748513),
datetime.datetime.utcfromtimestamp(1361115900),
datetime.datetime.utcfromtimestamp(1361980964),
datetime.datetime.utcfromtimestamp(1368581100),
datetime.datetime.utcfromtimestamp(1371032084),
datetime.datetime.utcfromtimestamp(1371158167)]
reference_time = [datetime.datetime.utcfromtimestamp(1359677250),
datetime.datetime.utcfromtimestamp(1359682450),
datetime.datetime.utcfromtimestamp(1360751528),
datetime.datetime.utcfromtimestamp(1361116200),
datetime.datetime.utcfromtimestamp(1361983979),
datetime.datetime.utcfromtimestamp(1368582480),
datetime.datetime.utcfromtimestamp(1371045475),
datetime.datetime.utcfromtimestamp(1371162600)]
end_time = [datetime.datetime.utcfromtimestamp(1359677400),
datetime.datetime.utcfromtimestamp(1359683136),
datetime.datetime.utcfromtimestamp(1360754543),
datetime.datetime.utcfromtimestamp(1361116320),
datetime.datetime.utcfromtimestamp(1361986994),
datetime.datetime.utcfromtimestamp(1368583080),
datetime.datetime.utcfromtimestamp(1371050025),
datetime.datetime.utcfromtimestamp(1371167100)]
event_type = ["LAR", "UV occ.", "Vis LED on", "M Flare", "UV LED on",
"X Flare", "Off-limb event", "Unexplained feature"]
event_description = ["Large Angle Rotation.",
"Occultation in the UV spectrum.",
"Visual LED is turned on.",
"M class solar flare.",
"UV LED is turned on.",
"X class solar flare.",
"Off-limb event in SWAP.",
"Unexplained feature."]
lytaf_expected = np.empty((8,), dtype=[("insertion_time", object),
("begin_time", object),
("reference_time", object),
("end_time", object),
("event_type", object),
("event_definition", object)])
lytaf_expected["insertion_time"] = insertion_time
lytaf_expected["begin_time"] = begin_time
lytaf_expected["reference_time"] = reference_time
lytaf_expected["end_time"] = end_time
lytaf_expected["event_type"] = event_type
lytaf_expected["event_definition"] = event_description
# Assert that extract_combined_lytaf gives the right result
np.testing.assert_array_equal(lytaf_test, lytaf_expected)
# Check correct error is raised if names of different lytaf files
# are incorrectly input.
with pytest.raises(ValueError):
lytaf_test = lyra.get_lytaf_events("2008-01-01", "2014-01-01",
combine_files=["gigo"],
force_use_local_lytaf=True)
def test_get_lytaf_event_types(local_cache):
"""
Test that LYTAF event types are printed.
"""
lyra.get_lytaf_event_types()
def test_lytaf_event2string():
"""
Test _lytaf_event2string() associates correct numbers and events.
"""
out_test = lyra._lytaf_event2string(list(range(12)))
assert out_test == ['LAR', 'N/A', 'UV occult.', 'Vis. occult.', 'Offpoint',
'SAA', 'Auroral zone', 'Moon in LYRA', 'Moon in SWAP',
'Venus in LYRA', 'Venus in SWAP']
out_test_single = lyra._lytaf_event2string(1)
assert out_test_single == ['LAR']
def test_prep_columns():
"""
Test whether _prep_columns correctly prepares data.
"""
# Generate simple input data
time_input = TIME[0:2]
time_input.precision = 9
channels_input = [CHANNELS[0][0:2], CHANNELS[1][0:2]]
filecolumns_input = ["time", "channel0", "channel1"]
# Test case when channels and filecolumns are supplied by user.
string_time_test, filecolumns_test = lyra._prep_columns(
time_input, channels_input, filecolumns_input)
# Generate expected output and verify _prep_columns() works
string_time_expected = np.array(time_input.isot)
filecolumns_expected = ["time", "channel0", "channel1"]
np.testing.assert_array_equal(string_time_test, string_time_expected)
assert filecolumns_test == filecolumns_expected
# Test case when channels supplied by user by not filecolumns
string_time_test, filecolumns_test = lyra._prep_columns(time_input,
channels_input)
np.testing.assert_array_equal(string_time_test, string_time_expected)
assert filecolumns_test == filecolumns_expected
# Test case when neither channels nor filecolumns supplied by user
string_time_test, filecolumns_test = lyra._prep_columns(time_input)
np.testing.assert_array_equal(string_time_test, string_time_expected)
assert filecolumns_test == ["time"]
# Test correct exceptions are raised
with pytest.raises(TypeError):
string_time_test, filecolumns_test = lyra._prep_columns(
time_input, channels_input, ["channel0", 1])
with pytest.raises(ValueError):
string_time_test = lyra._prep_columns(time_input,
filecolumns=filecolumns_input)
| 49.139706 | 90 | 0.633049 | import os.path
import datetime
import tempfile
import numpy as np
import pandas
import pytest
import astropy.units as u
from astropy.time import TimeDelta
from sunpy import timeseries
from sunpy.data.test import rootdir
from sunpy.instr import lyra
from sunpy.time import is_time_equal, parse_time
TEST_DATA_PATH = rootdir
TIME = parse_time(np.array([datetime.datetime(2013, 2, 1) + datetime.timedelta(minutes=i)
for i in range(120)]))
CHANNELS = [np.zeros(len(TIME)) + 0.4, np.zeros(len(TIME)) + 0.1]
EMPTY_LYTAF = np.empty((0,), dtype=[("insertion_time", object),
("begin_time", object),
("reference_time", object),
("end_time", object),
("event_type", object),
("event_definition", object)])
LYTAF_TEST = np.append(
EMPTY_LYTAF,
np.array([(parse_time(datetime.datetime.utcfromtimestamp(1371459961)),
parse_time(datetime.datetime.utcfromtimestamp(1359677220)),
parse_time(datetime.datetime.utcfromtimestamp(1359677250)),
parse_time(datetime.datetime.utcfromtimestamp(1359677400)),
"LAR", "Large Angle Rotation.")],
dtype=EMPTY_LYTAF.dtype))
LYTAF_TEST = np.append(
LYTAF_TEST,
np.array([(parse_time(datetime.datetime.utcfromtimestamp(1371460063)),
parse_time(datetime.datetime.utcfromtimestamp(1359681764)),
parse_time(datetime.datetime.utcfromtimestamp(1359682450)),
parse_time(datetime.datetime.utcfromtimestamp(1359683136)),
"UV occ.", "Occultation in the UV spectrum.")],
dtype=LYTAF_TEST.dtype))
@pytest.mark.remote_data
def test_split_series_using_lytaf():
basetime = parse_time('2010-06-13 02:00')
seconds = 3600
dummy_time = [basetime + TimeDelta(s*u.second) for s in range(seconds)]
dummy_data = np.random.random(seconds)
lytaf_tmp = lyra.get_lytaf_events('2010-06-13 02:00', '2010-06-13 06:00',
combine_files=["ppt"])
split = lyra.split_series_using_lytaf(dummy_time, dummy_data, lytaf_tmp)
assert type(split) == list
assert len(split) == 4
assert is_time_equal(split[0]['subtimes'][0], parse_time((2010, 6, 13, 2, 0)))
assert is_time_equal(split[0]['subtimes'][-1], parse_time((2010, 6, 13, 2, 7, 2)))
assert is_time_equal(split[3]['subtimes'][0], parse_time((2010, 6, 13, 2, 59, 42)))
assert is_time_equal(split[3]['subtimes'][-1], parse_time((2010, 6, 13, 2, 59, 58)))
split_no_lytaf = lyra.split_series_using_lytaf(dummy_time,
dummy_data, LYTAF_TEST)
assert type(split_no_lytaf) == list
assert type(split_no_lytaf[0]) == dict
assert not set(split_no_lytaf[0].keys()).symmetric_difference({'subtimes', 'subdata'})
assert split_no_lytaf[0]["subtimes"] == dummy_time
assert split_no_lytaf[0]["subdata"].all() == dummy_data.all()
@pytest.fixture
def lyra_ts():
lyrats = timeseries.TimeSeries(
os.path.join(rootdir, 'lyra_20150101-000000_lev3_std_truncated.fits.gz'),
source='LYRA')
lyrats.data = pandas.DataFrame(index=TIME,
data={"CHANNEL1": CHANNELS[0],
"CHANNEL2": CHANNELS[1],
"CHANNEL3": CHANNELS[0],
"CHANNEL4": CHANNELS[1]})
return lyrats
@pytest.mark.remote_data
def test_remove_lytaf_events_from_timeseries(lyra_ts):
with pytest.raises(AttributeError):
ts_test = lyra.remove_lytaf_events_from_timeseries(
[], force_use_local_lytaf=True)
ts_test, artifact_status_test = \
lyra.remove_lytaf_events_from_timeseries(
lyra_ts, artifacts=["LAR", "Offpoint"], return_artifacts=True,
force_use_local_lytaf=True)
time, channels, artifact_status_expected = lyra._remove_lytaf_events(
lyra_ts.data.index, channels=[np.asanyarray(lyra_ts.data["CHANNEL1"]),
np.asanyarray(lyra_ts.data["CHANNEL2"]),
np.asanyarray(lyra_ts.data["CHANNEL3"]),
np.asanyarray(lyra_ts.data["CHANNEL4"])],
artifacts=["LAR", "Offpoint"], return_artifacts=True,
force_use_local_lytaf=True)
dataframe_expected = pandas.DataFrame(index=time,
data={"CHANNEL1": channels[0],
"CHANNEL2": channels[1],
"CHANNEL3": channels[2],
"CHANNEL4": channels[3]})
pandas.util.testing.assert_frame_equal(ts_test.data, dataframe_expected)
assert artifact_status_test.keys() == artifact_status_expected.keys()
np.testing.assert_array_equal(artifact_status_test["lytaf"],
artifact_status_expected["lytaf"])
np.testing.assert_array_equal(artifact_status_test["removed"],
artifact_status_expected["removed"])
np.testing.assert_array_equal(artifact_status_test["not_removed"],
artifact_status_expected["not_removed"])
assert artifact_status_test["not_found"] == \
artifact_status_expected["not_found"]
ts_test = \
lyra.remove_lytaf_events_from_timeseries(
lyra_ts, artifacts=["LAR", "Offpoint"],
force_use_local_lytaf=True)
pandas.util.testing.assert_frame_equal(ts_test.data, dataframe_expected)
@pytest.fixture()
def local_cache(sunpy_cache):
sunpy_cache = sunpy_cache('sunpy.instr.lyra.cache')
sunpy_cache.add('http://proba2.oma.be/lyra/data/lytaf/annotation_lyra.db',
os.path.join(TEST_DATA_PATH, 'annotation_lyra.db'))
sunpy_cache.add('http://proba2.oma.be/lyra/data/lytaf/annotation_manual.db',
os.path.join(TEST_DATA_PATH, 'annotation_manual.db'))
sunpy_cache.add('http://proba2.oma.be/lyra/data/lytaf/annotation_ppt.db',
os.path.join(TEST_DATA_PATH, 'annotation_ppt.db'))
sunpy_cache.add('http://proba2.oma.be/lyra/data/lytaf/annotation_science.db',
os.path.join(TEST_DATA_PATH, 'annotation_science.db'))
def test_remove_lytaf_events_1(local_cache):
time_test, channels_test, artifacts_status_test = \
lyra._remove_lytaf_events(
TIME, channels=CHANNELS, artifacts=["LAR", "Offpoint"],
return_artifacts=True, force_use_local_lytaf=True)
bad_indices = np.logical_and(TIME >= LYTAF_TEST["begin_time"][0],
TIME <= LYTAF_TEST["end_time"][0])
bad_indices = np.arange(len(TIME))[bad_indices]
time_expected = np.delete(TIME, bad_indices)
channels_expected = [np.delete(CHANNELS[0], bad_indices),
np.delete(CHANNELS[1], bad_indices)]
artifacts_status_expected = {"lytaf": LYTAF_TEST, "removed": LYTAF_TEST[0],
"not_removed": LYTAF_TEST[1],
"not_found": ["Offpoint"]}
assert time_test.all() == time_expected.all()
assert (channels_test[0]).all() == (channels_expected[0]).all()
assert (channels_test[1]).all() == (channels_expected[1]).all()
assert artifacts_status_test.keys() == artifacts_status_expected.keys()
np.testing.assert_array_equal(artifacts_status_test["lytaf"],
artifacts_status_expected["lytaf"])
np.testing.assert_array_equal(artifacts_status_test["removed"],
artifacts_status_expected["removed"])
np.testing.assert_array_equal(artifacts_status_test["not_removed"],
artifacts_status_expected["not_removed"])
assert artifacts_status_test["not_found"] == \
artifacts_status_expected["not_found"]
time_test, artifacts_status_test = \
lyra._remove_lytaf_events(
TIME, artifacts=["LAR", "Offpoint"],
return_artifacts=True, force_use_local_lytaf=True)
assert time_test.all() == time_expected.all()
assert artifacts_status_test.keys() == artifacts_status_expected.keys()
np.testing.assert_array_equal(artifacts_status_test["lytaf"],
artifacts_status_expected["lytaf"])
np.testing.assert_array_equal(artifacts_status_test["removed"],
artifacts_status_expected["removed"])
np.testing.assert_array_equal(artifacts_status_test["not_removed"],
artifacts_status_expected["not_removed"])
assert artifacts_status_test["not_found"] == \
artifacts_status_expected["not_found"]
def test_remove_lytaf_events_2(local_cache):
with pytest.warns(UserWarning, match='None of user supplied artifacts were found.'):
time_test, channels_test, artifacts_status_test = \
lyra._remove_lytaf_events(
TIME, channels=CHANNELS, artifacts="Offpoint",
return_artifacts=True, force_use_local_lytaf=True)
time_expected = TIME
channels_expected = CHANNELS
artifacts_status_expected = {"lytaf": LYTAF_TEST, "removed": EMPTY_LYTAF,
"not_removed": LYTAF_TEST,
"not_found": ["Offpoint"]}
assert np.all(time_test == time_expected)
assert (channels_test[0]).all() == (channels_expected[0]).all()
assert (channels_test[1]).all() == (channels_expected[1]).all()
assert artifacts_status_test.keys() == artifacts_status_expected.keys()
np.testing.assert_array_equal(artifacts_status_test["lytaf"],
artifacts_status_expected["lytaf"])
np.testing.assert_array_equal(artifacts_status_test["removed"],
artifacts_status_expected["removed"])
np.testing.assert_array_equal(artifacts_status_test["not_removed"],
artifacts_status_expected["not_removed"])
assert artifacts_status_test["not_found"] == \
artifacts_status_expected["not_found"]
with pytest.warns(UserWarning, match='None of user supplied artifacts were found.'):
time_test, channels_test = lyra._remove_lytaf_events(
TIME, channels=CHANNELS, artifacts=["Offpoint"], force_use_local_lytaf=True)
assert np.all(time_test == time_expected)
assert (channels_test[0]).all() == (channels_expected[0]).all()
assert (channels_test[1]).all() == (channels_expected[1]).all()
with pytest.warns(UserWarning, match='None of user supplied artifacts were found.'):
time_test = lyra._remove_lytaf_events(
TIME, artifacts=["Offpoint"], force_use_local_lytaf=True)
assert np.all(time_test == time_expected)
def test_remove_lytaf_events_3(local_cache):
with pytest.raises(TypeError):
lyra._remove_lytaf_events(TIME, channels=6, artifacts=["LAR"],
force_use_local_lytaf=True)
with pytest.raises(ValueError):
lyra._remove_lytaf_events(TIME,
force_use_local_lytaf=True)
with pytest.raises(TypeError):
lyra._remove_lytaf_events(TIME, artifacts=[6],
force_use_local_lytaf=True)
with pytest.raises(ValueError):
lyra._remove_lytaf_events(TIME,
artifacts=["LAR", "incorrect artifact type"],
force_use_local_lytaf=True)
def test_get_lytaf_events(local_cache):
lytaf_test = lyra.get_lytaf_events("2008-01-01", "2014-01-01",
force_use_local_lytaf=True)
insertion_time = [datetime.datetime.utcfromtimestamp(1371459961),
datetime.datetime.utcfromtimestamp(1371460063),
datetime.datetime.utcfromtimestamp(1371460411),
datetime.datetime.utcfromtimestamp(1371460493),
datetime.datetime.utcfromtimestamp(1371460403),
datetime.datetime.utcfromtimestamp(1371470988),
datetime.datetime.utcfromtimestamp(1371211791),
datetime.datetime.utcfromtimestamp(1371212303)]
begin_time = [datetime.datetime.utcfromtimestamp(1359677220),
datetime.datetime.utcfromtimestamp(1359681764),
datetime.datetime.utcfromtimestamp(1360748513),
datetime.datetime.utcfromtimestamp(1361115900),
datetime.datetime.utcfromtimestamp(1361980964),
datetime.datetime.utcfromtimestamp(1368581100),
datetime.datetime.utcfromtimestamp(1371032084),
datetime.datetime.utcfromtimestamp(1371158167)]
reference_time = [datetime.datetime.utcfromtimestamp(1359677250),
datetime.datetime.utcfromtimestamp(1359682450),
datetime.datetime.utcfromtimestamp(1360751528),
datetime.datetime.utcfromtimestamp(1361116200),
datetime.datetime.utcfromtimestamp(1361983979),
datetime.datetime.utcfromtimestamp(1368582480),
datetime.datetime.utcfromtimestamp(1371045475),
datetime.datetime.utcfromtimestamp(1371162600)]
end_time = [datetime.datetime.utcfromtimestamp(1359677400),
datetime.datetime.utcfromtimestamp(1359683136),
datetime.datetime.utcfromtimestamp(1360754543),
datetime.datetime.utcfromtimestamp(1361116320),
datetime.datetime.utcfromtimestamp(1361986994),
datetime.datetime.utcfromtimestamp(1368583080),
datetime.datetime.utcfromtimestamp(1371050025),
datetime.datetime.utcfromtimestamp(1371167100)]
event_type = ["LAR", "UV occ.", "Vis LED on", "M Flare", "UV LED on",
"X Flare", "Off-limb event", "Unexplained feature"]
event_description = ["Large Angle Rotation.",
"Occultation in the UV spectrum.",
"Visual LED is turned on.",
"M class solar flare.",
"UV LED is turned on.",
"X class solar flare.",
"Off-limb event in SWAP.",
"Unexplained feature."]
lytaf_expected = np.empty((8,), dtype=[("insertion_time", object),
("begin_time", object),
("reference_time", object),
("end_time", object),
("event_type", object),
("event_definition", object)])
lytaf_expected["insertion_time"] = insertion_time
lytaf_expected["begin_time"] = begin_time
lytaf_expected["reference_time"] = reference_time
lytaf_expected["end_time"] = end_time
lytaf_expected["event_type"] = event_type
lytaf_expected["event_definition"] = event_description
np.testing.assert_array_equal(lytaf_test, lytaf_expected)
with pytest.raises(ValueError):
lytaf_test = lyra.get_lytaf_events("2008-01-01", "2014-01-01",
combine_files=["gigo"],
force_use_local_lytaf=True)
def test_get_lytaf_event_types(local_cache):
lyra.get_lytaf_event_types()
def test_lytaf_event2string():
out_test = lyra._lytaf_event2string(list(range(12)))
assert out_test == ['LAR', 'N/A', 'UV occult.', 'Vis. occult.', 'Offpoint',
'SAA', 'Auroral zone', 'Moon in LYRA', 'Moon in SWAP',
'Venus in LYRA', 'Venus in SWAP']
out_test_single = lyra._lytaf_event2string(1)
assert out_test_single == ['LAR']
def test_prep_columns():
time_input = TIME[0:2]
time_input.precision = 9
channels_input = [CHANNELS[0][0:2], CHANNELS[1][0:2]]
filecolumns_input = ["time", "channel0", "channel1"]
string_time_test, filecolumns_test = lyra._prep_columns(
time_input, channels_input, filecolumns_input)
string_time_expected = np.array(time_input.isot)
filecolumns_expected = ["time", "channel0", "channel1"]
np.testing.assert_array_equal(string_time_test, string_time_expected)
assert filecolumns_test == filecolumns_expected
string_time_test, filecolumns_test = lyra._prep_columns(time_input,
channels_input)
np.testing.assert_array_equal(string_time_test, string_time_expected)
assert filecolumns_test == filecolumns_expected
string_time_test, filecolumns_test = lyra._prep_columns(time_input)
np.testing.assert_array_equal(string_time_test, string_time_expected)
assert filecolumns_test == ["time"]
with pytest.raises(TypeError):
string_time_test, filecolumns_test = lyra._prep_columns(
time_input, channels_input, ["channel0", 1])
with pytest.raises(ValueError):
string_time_test = lyra._prep_columns(time_input,
filecolumns=filecolumns_input)
| true | true |
f7f6f7720b316ca02583ff5e36719b1bb5fcee5f | 2,940 | py | Python | src/utils/resize.py | BolachasDaAvo/PyTorch-StudioGAN | 21d0f1d976d0c5c3d240295e9efa83c105e40ac7 | [
"MIT"
] | null | null | null | src/utils/resize.py | BolachasDaAvo/PyTorch-StudioGAN | 21d0f1d976d0c5c3d240295e9efa83c105e40ac7 | [
"MIT"
] | null | null | null | src/utils/resize.py | BolachasDaAvo/PyTorch-StudioGAN | 21d0f1d976d0c5c3d240295e9efa83c105e40ac7 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2021 Gaurav Parmar
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
### On Buggy Resizing Libraries and Surprising Subtleties in FID Calculation
### (https://www.cs.cmu.edu/~clean-fid/)
### Gaurav Parmar, Richard Zhang, Jun-Yan Zhu
### https://github.com/GaParmar/clean-fid/blob/main/cleanfid/resize.py
import os
from PIL import Image
import torch
import torch.nn.functional as F
import numpy as np
dict_name_to_filter = {
"PIL": {
"bicubic": Image.BICUBIC,
"bilinear": Image.BILINEAR,
"nearest": Image.NEAREST,
"lanczos": Image.LANCZOS,
"box": Image.BOX
}
}
def build_resizer(mode, size):
if mode == "clean":
return make_resizer("PIL", "bilinear", (size, size))
elif mode == "legacy":
return make_resizer("PyTorch", "bilinear", (size, size))
else:
raise ValueError(f"Invalid mode {mode} specified")
def make_resizer(library, filter, output_size):
if library == "PIL":
s1, s2 = output_size
def resize_single_channel(x_np):
img = Image.fromarray(x_np.astype(np.float32), mode='F')
img = img.resize(output_size, resample=dict_name_to_filter[library][filter])
return np.asarray(img).reshape(s1, s2, 1)
def func(x):
x = [resize_single_channel(x[:, :, idx]) for idx in range(3)]
x = np.concatenate(x, axis=2).astype(np.float32)
return x
elif library == "PyTorch":
import warnings
# ignore the numpy warnings
warnings.filterwarnings("ignore")
def func(x):
x = torch.Tensor(x.transpose((2, 0, 1)))[None, ...]
x = F.interpolate(x, size=output_size, mode=filter, align_corners=False)
x = x[0, ...].cpu().data.numpy().transpose((1, 2, 0)).clip(0, 255)
return x
else:
raise NotImplementedError('library [%s] is not include' % library)
return func
| 35.853659 | 88 | 0.679252 |
egacy":
return make_resizer("PyTorch", "bilinear", (size, size))
else:
raise ValueError(f"Invalid mode {mode} specified")
def make_resizer(library, filter, output_size):
if library == "PIL":
s1, s2 = output_size
def resize_single_channel(x_np):
img = Image.fromarray(x_np.astype(np.float32), mode='F')
img = img.resize(output_size, resample=dict_name_to_filter[library][filter])
return np.asarray(img).reshape(s1, s2, 1)
def func(x):
x = [resize_single_channel(x[:, :, idx]) for idx in range(3)]
x = np.concatenate(x, axis=2).astype(np.float32)
return x
elif library == "PyTorch":
import warnings
warnings.filterwarnings("ignore")
def func(x):
x = torch.Tensor(x.transpose((2, 0, 1)))[None, ...]
x = F.interpolate(x, size=output_size, mode=filter, align_corners=False)
x = x[0, ...].cpu().data.numpy().transpose((1, 2, 0)).clip(0, 255)
return x
else:
raise NotImplementedError('library [%s] is not include' % library)
return func
| true | true |
f7f6f7bd485f7f9e2bbbdff540a933edbb81011a | 598 | py | Python | src/Chebyshev_Nodes/examples.py | Roseck16/Interpolation | 20513e02241824e37c9eab6642fc2f3139dd8e00 | [
"MIT"
] | 1 | 2021-07-14T03:33:57.000Z | 2021-07-14T03:33:57.000Z | src/Chebyshev_Nodes/examples.py | Roseck16/Interpolation | 20513e02241824e37c9eab6642fc2f3139dd8e00 | [
"MIT"
] | null | null | null | src/Chebyshev_Nodes/examples.py | Roseck16/Interpolation | 20513e02241824e37c9eab6642fc2f3139dd8e00 | [
"MIT"
] | null | null | null | #%%
from Chebyshev_Nodes import chebyshev_nodes, lagrange, graf, np
# %%
WID = 15
HEI = 10
#%% Example 1
# Evenly spaced nodes
f = lambda a: abs(a) + a/2 - a**2
#%%
x = np.linspace(-1, 1, 9)
y = f(x)
p = np.linspace(np.min(x), np.max(x),100)
#%%
pol = lagrange(x,y, p)
#%%
graf(p, f, pol, WID, HEI, save_path="../../images/Chebyshev nodes/Example1_chebyshev1.png")
#%%
# Chebyshev nodes
x = chebyshev_nodes(-1, 1, 9)
y = f(x)
p = np.linspace(np.min(x), np.max(x),100)
# %%
pol = lagrange(x,y, p)
# %%
graf(p, f, pol, WID, HEI, save_path="../../images/Chebyshev nodes/Example1_chebyshev2.png")
# %%
| 22.148148 | 91 | 0.618729 |
from Chebyshev_Nodes import chebyshev_nodes, lagrange, graf, np
WID = 15
HEI = 10
f = lambda a: abs(a) + a/2 - a**2
x = np.linspace(-1, 1, 9)
y = f(x)
p = np.linspace(np.min(x), np.max(x),100)
pol = lagrange(x,y, p)
graf(p, f, pol, WID, HEI, save_path="../../images/Chebyshev nodes/Example1_chebyshev1.png")
x = chebyshev_nodes(-1, 1, 9)
y = f(x)
p = np.linspace(np.min(x), np.max(x),100)
pol = lagrange(x,y, p)
graf(p, f, pol, WID, HEI, save_path="../../images/Chebyshev nodes/Example1_chebyshev2.png")
| true | true |
f7f6f93d9c5a52aba801c6cc168a2dbacaa2e2ff | 570 | py | Python | api_sync/v1/models/jobs_alternate_titles.py | robinsonkwame/skills-airflow | 085f9eede4c174b37766856ea437cdd39e06b22c | [
"Apache-2.0"
] | null | null | null | api_sync/v1/models/jobs_alternate_titles.py | robinsonkwame/skills-airflow | 085f9eede4c174b37766856ea437cdd39e06b22c | [
"Apache-2.0"
] | null | null | null | api_sync/v1/models/jobs_alternate_titles.py | robinsonkwame/skills-airflow | 085f9eede4c174b37766856ea437cdd39e06b22c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from . import Base
import sqlalchemy as db
class JobAlternateTitle(Base):
__tablename__ = 'jobs_alternate_titles'
uuid = db.Column(db.String, primary_key=True)
title = db.Column(db.String)
nlp_a = db.Column(db.String)
job_uuid = db.Column(db.String, db.ForeignKey('jobs_master.uuid'))
def __init__(self, uuid, title, nlp_a, job_uuid):
self.uuid = uuid
self.title = title
self.nlp_a = nlp_a
self.job_uuid = job_uuid
def __repr__(self):
return '<uuid {}>'.format(self.uuid)
| 24.782609 | 70 | 0.645614 |
from . import Base
import sqlalchemy as db
class JobAlternateTitle(Base):
__tablename__ = 'jobs_alternate_titles'
uuid = db.Column(db.String, primary_key=True)
title = db.Column(db.String)
nlp_a = db.Column(db.String)
job_uuid = db.Column(db.String, db.ForeignKey('jobs_master.uuid'))
def __init__(self, uuid, title, nlp_a, job_uuid):
self.uuid = uuid
self.title = title
self.nlp_a = nlp_a
self.job_uuid = job_uuid
def __repr__(self):
return '<uuid {}>'.format(self.uuid)
| true | true |
f7f6f94b13c230923629b7b9ecf8e3b0a7608dc7 | 20,820 | py | Python | driver/ixsystems/common.py | mlipscombe/cinder-driver-truenas | 34b29024da05f154314086ad3e9a4ab13de67c0a | [
"BSD-3-Clause"
] | null | null | null | driver/ixsystems/common.py | mlipscombe/cinder-driver-truenas | 34b29024da05f154314086ad3e9a4ab13de67c0a | [
"BSD-3-Clause"
] | null | null | null | driver/ixsystems/common.py | mlipscombe/cinder-driver-truenas | 34b29024da05f154314086ad3e9a4ab13de67c0a | [
"BSD-3-Clause"
] | null | null | null | #vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016 iXsystems
from oslo_log import log as logging
from cinder import exception
from cinder.volume.drivers.ixsystems.freenasapi import FreeNASApiError
from cinder.volume.drivers.ixsystems.freenasapi import FreeNASServer
from oslo_config import cfg
import os
import urllib.parse
from cinder.volume.drivers.ixsystems import utils as ix_utils
import simplejson as json
LOG = logging.getLogger(__name__)
class TrueNASCommon(object):
VERSION = "2.0.0"
IGROUP_PREFIX = 'openstack-'
required_flags = ['ixsystems_transport_type', 'ixsystems_server_hostname',
'ixsystems_server_port', 'ixsystems_server_iscsi_port',
'ixsystems_volume_backend_name', 'ixsystems_vendor_name', 'ixsystems_storage_protocol',
'ixsystems_datastore_pool', 'ixsystems_dataset_path', 'ixsystems_iqn_prefix', ]
def __init__(self, configuration=None):
self.configuration = configuration
self.backend_name = self.configuration.ixsystems_volume_backend_name
self.vendor_name = self.configuration.ixsystems_vendor_name
self.storage_protocol = self.configuration.ixsystems_storage_protocol
self.stats = {}
def _create_handle(self, **kwargs):
"""Instantiate handle (client) for API communication with
iXsystems FREENAS server
"""
host_system = kwargs['hostname']
LOG.debug('Using iXsystems FREENAS server: %s', host_system)
auth_style = FreeNASServer.STYLE_LOGIN_PASSWORD
if kwargs['api_key']:
auth_style = FreeNASServer.STYLE_API_KEY
self.handle = FreeNASServer(host=host_system,
port=kwargs['port'],
username=kwargs['login'],
password=kwargs['password'],
api_key=kwargs['api_key'],
api_version=kwargs['api_version'],
transport_type=kwargs['transport_type'],
style=auth_style)
if not self.handle:
raise FreeNASApiError("Failed to create handle for FREENAS server")
def _check_flags(self):
"""Check if any required iXsystems FREENAS configuration flag is missing."""
for flag in self.required_flags:
if not getattr(self.configuration, flag, None):
print("missing flag :", flag)
raise exception.CinderException(_('%s is not set') % flag)
if not getattr(self.configuration, 'ixsystems_api_key'):
for flag in ['ixsystems_login', 'ixsystems_password']:
if not getattr(self.configuration, flag, None):
print("missing flag :", flag)
raise exception.CinderException(_('%s is not set and ixsystems_api_key is not set') % flag)
def _do_custom_setup(self):
"""Setup iXsystems FREENAS driver."""
self._create_handle(hostname=self.configuration.ixsystems_server_hostname,
port=self.configuration.ixsystems_server_port,
login=self.configuration.ixsystems_login,
password=self.configuration.ixsystems_password,
api_key=self.configuration.ixsystems_api_key,
api_version=self.configuration.ixsystems_api_version,
transport_type=
self.configuration.ixsystems_transport_type)
if not self.handle:
raise FreeNASApiError("Failed to create handle for FREENAS server")
def _create_volume(self, name, size):
"""Creates a volume of specified size
"""
params = {}
params['name'] = self.configuration.ixsystems_dataset_path + '/' + name
params['type'] = 'VOLUME'
params['volsize'] = ix_utils.get_bytes_from_gb(size)
jparams = json.dumps(params)
jparams = jparams.encode('utf8')
request_urn = ('%s') % (FreeNASServer.REST_API_VOLUME)
LOG.debug('_create_volume params : %s', params)
LOG.debug('_create_volume urn : %s', request_urn)
ret = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,
request_urn, jparams)
LOG.debug('_create_volume response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while creating volume: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
def _target_to_extent(self, target_id, extent_id):
"""Create relationship between iscsi target to iscsi extent"""
LOG.debug('_target_to_extent target id : %s extend id : %s', target_id, extent_id)
request_urn = ('%s/') % (FreeNASServer.REST_API_TARGET_TO_EXTENT)
params = {}
params['target'] = target_id
params['extent'] = extent_id
# params['iscsi_lunid'] = 0 # no longer needed with API v2.0
jparams = json.dumps(params)
jparams = jparams.encode('utf8')
LOG.debug('_create_target_to_extent params : %s', json.dumps(params))
tgt_ext = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND, request_urn, jparams)
LOG.debug('_target_to_extent response : %s', json.dumps(tgt_ext))
if tgt_ext['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while creating relation between target and extent: %s' % tgt_ext['response'])
raise FreeNASApiError('Unexpected error', msg)
def _create_target(self, name):
targetgroup_params = [{}] # v2.0 API - targetgroup can now be added when target is created
targetgroup_params[0]['portal'] = int(self.configuration.ixsystems_portal_id) #TODO: Decide to create portal or not
targetgroup_params[0]['initiator'] = int(self.configuration.ixsystems_initiator_id) #TODO: Decide to create initiator or not
tgt_params = {}
tgt_params['name'] = name
tgt_params['groups'] = targetgroup_params
jtgt_params = json.dumps(tgt_params)
jtgt_params = jtgt_params.encode('utf8')
LOG.debug('_create_target params : %s', json.dumps(tgt_params))
request_urn = ('%s/') % (FreeNASServer.REST_API_TARGET)
target = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,
request_urn, jtgt_params)
LOG.debug('_create_target response : %s', json.dumps(target))
if target['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while creating iscsi target: %s' % target['response'])
raise FreeNASApiError('Unexpected error', msg)
target_id = json.loads(target['response'])['id']
# self._create_target_group(target_id)
return target_id
def _create_extent(self, name, volume_name,from_snapshot=False):
ext_params = {}
if from_snapshot:
ext_params['Source'] = volume_name
else:
ext_params['type'] = 'DISK'
ext_params['name'] = name
ext_params['disk'] = ('zvol/%s/%s') % (self.configuration.ixsystems_dataset_path, volume_name)
jext_params = json.dumps(ext_params)
LOG.debug('_create_extent params : %s', jext_params)
jext_params = jext_params.encode('utf8')
request_urn = ('%s/') % (FreeNASServer.REST_API_EXTENT)
extent = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,
request_urn, jext_params)
LOG.debug('_create_extent response : %s', json.dumps(extent))
if extent['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while creating iscsi target extent: %s' % extent['response'])
raise FreeNASApiError('Unexpected error', msg)
return json.loads(extent['response'])['id']
def get_iscsitarget_id(self, name):
"""get iscsi target id from target name
"""
request_urn = ('%s') % (FreeNASServer.REST_API_TARGET)
LOG.debug('get_iscsitarget_id request_urn : %s', request_urn)
ret = self.handle.invoke_command(FreeNASServer.SELECT_COMMAND, request_urn, None)
LOG.debug('get_iscsitarget_id response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while deleting iscsi target: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
uresp = ret['response']
resp = json.loads(uresp.decode('utf8'))
try:
return (item for item in resp if item['name'] == name).__next__()['id']
except StopIteration:
return 0
def get_tgt_ext_id(self, name):
"""Get target-extent mapping id from target name.
"""
request_urn = ('%s') % (FreeNASServer.REST_API_TARGET_TO_EXTENT)
LOG.debug('get_tgt_ext_id urn : %s', request_urn)
ret = self.handle.invoke_command(FreeNASServer.SELECT_COMMAND, request_urn, None)
LOG.debug('get_tgt_ext_id response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while deleting iscsi target: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
uresp = ret['response']
resp = json.loads(uresp.decode('utf8'))
try:
return (item for item in resp if item['target'] == name).__next__()['id']
except StopIteration:
return 0
def get_extent_id(self, name):
"""Get Extent ID from Extent Name
"""
request_urn = ('%s') % (FreeNASServer.REST_API_EXTENT)
LOG.debug('get_extent_id urn : %s', request_urn)
ret = self.handle.invoke_command(FreeNASServer.SELECT_COMMAND, request_urn, None)
LOG.debug('get_extent_id response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while getting extent id: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
uresp = ret['response']
resp = json.loads(uresp.decode('utf8'))
try:
return (item for item in resp if item['name'] == name).__next__()['id']
except StopIteration:
return 0
def _create_iscsitarget(self, name, volume_name):
"""Creates a iSCSI target on specified volume OR snapshot
TODO : Skipped part for snapshot, review once iscsi target working
TODO: Add cleanup if any operation fails
"""
#Create iscsi target for specified volume
tgt_id = self._create_target(name)
#Create extent for iscsi target for specified volume
ext_id = self._create_extent(name, volume_name)
#Create target to extent mapping for specified volume
self._target_to_extent(tgt_id, ext_id)
def delete_target_to_extent(self, tgt_ext_id):
pass
def delete_target(self, target_id):
if target_id:
request_urn = ('%s/id/%s') % (FreeNASServer.REST_API_TARGET, target_id)
LOG.debug('delete_target urn : %s', request_urn)
ret = self.handle.invoke_command(FreeNASServer.DELETE_COMMAND,
request_urn, None)
LOG.debug('delete_target response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while deleting iscsi target: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
def delete_extent(self, extent_id):
if extent_id:
request_urn = ('%s/id/%s') % (FreeNASServer.REST_API_EXTENT, extent_id)
LOG.debug('delete_extent urn : %s', request_urn)
ret = self.handle.invoke_command(FreeNASServer.DELETE_COMMAND,
request_urn, None)
LOG.debug('delete_extent response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while deleting iscsi extent: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
def _delete_iscsitarget(self, name):
"""Deletes specified iSCSI target
"""
tgt_ext_id = self.get_tgt_ext_id(name)
target_id = self.get_iscsitarget_id(name)
extent_id = self.get_extent_id(name)
self.delete_target_to_extent(tgt_ext_id)
self.delete_target(target_id)
self.delete_extent(extent_id)
def _dependent_clone(self, name):
""" returns the fullname of any snapshot used to create volume 'name' """
request_urn = ('%s/id/%s%s') % (FreeNASServer.REST_API_VOLUME,
urllib.parse.quote_plus(self.configuration.ixsystems_dataset_path + '/'), name)
LOG.debug('_dependent_clones urn : %s', request_urn)
ret = self.handle.invoke_command(FreeNASServer.SELECT_COMMAND, request_urn, None)
LOG.debug('_dependent_clones response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while getting volume: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
uresp = ret['response']
resp = json.loads(uresp.decode('utf8'))
return resp['origin']['value']
def _delete_volume(self, name):
"""Deletes specified volume
"""
request_urn = ('%s/id/%s%s') % (FreeNASServer.REST_API_VOLUME,
urllib.parse.quote_plus(self.configuration.ixsystems_dataset_path + '/'), name)
LOG.debug('_delete_volume urn : %s', request_urn)
clone = self._dependent_clone(name) # add check for dependent clone, if exists will delete
ret = self.handle.invoke_command(FreeNASServer.DELETE_COMMAND,
request_urn, None)
LOG.debug('_delete_volume response : %s', json.dumps(ret))
if clone: # delete the cloned-from snapshot. Must check before deleting volume, but delete snapshot after
fullvolume, snapname = clone.split('@')
temp, snapvol = fullvolume.rsplit('/', 1)
self._delete_snapshot(snapname, snapvol)
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while deleting volume: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
def _create_snapshot(self, name, volume_name):
"""Creates a snapshot of specified volume."""
args = {}
args['dataset'] = ('%s/%s') % (self.configuration.ixsystems_dataset_path, volume_name)
args['name'] = name
jargs = json.dumps(args)
jargs = jargs.encode("utf8")
request_urn = ('%s') % (FreeNASServer.REST_API_SNAPSHOT)
LOG.debug('_create_snapshot urn : %s', request_urn)
try:
ret = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,
request_urn, jargs)
LOG.debug('_create_snapshot response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while creating snapshot: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
except Exception as e:
raise FreeNASApiError('Unexpected error', e)
def _delete_snapshot(self, name, volume_name):
"""Delets a snapshot of specified volume."""
LOG.debug('_delete_snapshot, deleting name: %s from volume: %s', name, volume_name)
request_urn = ('%s/id/%s@%s') % (FreeNASServer.REST_API_SNAPSHOT,
urllib.parse.quote_plus(self.configuration.ixsystems_dataset_path + '/' + volume_name), name)
LOG.debug('_delete_snapshot urn : %s', request_urn)
try:
ret = self.handle.invoke_command(FreeNASServer.DELETE_COMMAND,
request_urn, None)
LOG.debug('_delete_snapshot response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while deleting snapshot: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
except Exception as e:
raise FreeNASApiError('Unexpected error', e)
def _create_volume_from_snapshot(self, name, snapshot_name, snap_zvol_name):
"""creates a volume from a snapshot"""
args = {}
args['snapshot'] = ('%s/%s@%s') % (self.configuration.ixsystems_dataset_path, snap_zvol_name, snapshot_name)
args['dataset_dst'] = ('%s/%s') % (self.configuration.ixsystems_dataset_path, name)
jargs = json.dumps(args)
jargs = jargs.encode("utf8")
request_urn = ('%s/%s') % (FreeNASServer.REST_API_SNAPSHOT, FreeNASServer.CLONE)
LOG.debug('_create_volume_from_snapshot urn : %s', request_urn)
try:
ret = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,
request_urn, jargs)
LOG.debug('_create_volume_from_snapshot response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while creating snapshot: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
except Exception as e:
raise FreeNASApiError('Unexpected error', e)
def _update_volume_stats(self):
"""Retrieve stats info from volume group
REST API: $ GET /pools/mypool "size":95,"allocated":85,
"""
request_urn = ('%s/id/%s') % (FreeNASServer.REST_API_VOLUME,
urllib.parse.quote_plus(self.configuration.ixsystems_dataset_path))
LOG.debug('_update_volume_stats request_urn : %s', request_urn)
ret = self.handle.invoke_command(FreeNASServer.SELECT_COMMAND,
request_urn, None)
LOG.debug("_update_volume_stats response : %s", json.dumps(ret))
data = {}
data["volume_backend_name"] = self.backend_name
data["vendor_name"] = self.vendor_name
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.storage_protocol
data['total_capacity_gb'] = ix_utils.get_size_in_gb(json.loads(ret['response'])['available']['parsed'] \
+ json.loads(ret['response'])['used']['parsed'])
data['free_capacity_gb'] = ix_utils.get_size_in_gb(json.loads(ret['response'])['available']['parsed'])
data['reserved_percentage'] = \
self.configuration.ixsystems_reserved_percentage
data['reserved_percentage'] = 0
data['QoS_support'] = False
self.stats = data
return self.stats
def _create_cloned_volume_to_snapshot_map(self, volume_name, snapshot):
""" maintain a mapping between cloned volume and tempary snapshot"""
map_file = os.path.join(CONF.volumes_dir, volume_name)
jparams = json.dumps(snapshot)
try:
fd = open(map_file, 'w+')
fd.write(jparams)
fd.close()
except Exception as e:
LOG.error(_('_create_halo_volume_name_map: %s') % e)
def _extend_volume(self, name, new_size):
"""Extend an existing volumes size."""
LOG.debug('_extend__volume name: %s', name)
params = {}
params['volsize'] = ix_utils.get_bytes_from_gb(new_size)
jparams = json.dumps(params)
jparams = jparams.encode('utf8')
request_urn = ('%s/id/%s') % (FreeNASServer.REST_API_VOLUME,
urllib.parse.quote_plus(self.configuration.ixsystems_dataset_path + '/' + name))
ret = self.handle.invoke_command(FreeNASServer.UPDATE_COMMAND,
request_urn, jparams)
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while extending volume: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
def _create_export(self, volume_name):
freenas_volume = ix_utils.generate_freenas_volume_name(volume_name, self.configuration.ixsystems_iqn_prefix)
if freenas_volume is None:
LOG.error(_('Error in exporting FREENAS volume!'))
handle = None
else:
handle = "%s:%s,%s %s" % \
(self.configuration.ixsystems_server_hostname,
self.configuration.ixsystems_server_iscsi_port,
freenas_volume['target'],
freenas_volume['iqn'])
LOG.debug('provider_location: %s', handle)
return handle
| 48.758782 | 132 | 0.617627 |
from oslo_log import log as logging
from cinder import exception
from cinder.volume.drivers.ixsystems.freenasapi import FreeNASApiError
from cinder.volume.drivers.ixsystems.freenasapi import FreeNASServer
from oslo_config import cfg
import os
import urllib.parse
from cinder.volume.drivers.ixsystems import utils as ix_utils
import simplejson as json
LOG = logging.getLogger(__name__)
class TrueNASCommon(object):
VERSION = "2.0.0"
IGROUP_PREFIX = 'openstack-'
required_flags = ['ixsystems_transport_type', 'ixsystems_server_hostname',
'ixsystems_server_port', 'ixsystems_server_iscsi_port',
'ixsystems_volume_backend_name', 'ixsystems_vendor_name', 'ixsystems_storage_protocol',
'ixsystems_datastore_pool', 'ixsystems_dataset_path', 'ixsystems_iqn_prefix', ]
def __init__(self, configuration=None):
self.configuration = configuration
self.backend_name = self.configuration.ixsystems_volume_backend_name
self.vendor_name = self.configuration.ixsystems_vendor_name
self.storage_protocol = self.configuration.ixsystems_storage_protocol
self.stats = {}
def _create_handle(self, **kwargs):
host_system = kwargs['hostname']
LOG.debug('Using iXsystems FREENAS server: %s', host_system)
auth_style = FreeNASServer.STYLE_LOGIN_PASSWORD
if kwargs['api_key']:
auth_style = FreeNASServer.STYLE_API_KEY
self.handle = FreeNASServer(host=host_system,
port=kwargs['port'],
username=kwargs['login'],
password=kwargs['password'],
api_key=kwargs['api_key'],
api_version=kwargs['api_version'],
transport_type=kwargs['transport_type'],
style=auth_style)
if not self.handle:
raise FreeNASApiError("Failed to create handle for FREENAS server")
def _check_flags(self):
for flag in self.required_flags:
if not getattr(self.configuration, flag, None):
print("missing flag :", flag)
raise exception.CinderException(_('%s is not set') % flag)
if not getattr(self.configuration, 'ixsystems_api_key'):
for flag in ['ixsystems_login', 'ixsystems_password']:
if not getattr(self.configuration, flag, None):
print("missing flag :", flag)
raise exception.CinderException(_('%s is not set and ixsystems_api_key is not set') % flag)
def _do_custom_setup(self):
self._create_handle(hostname=self.configuration.ixsystems_server_hostname,
port=self.configuration.ixsystems_server_port,
login=self.configuration.ixsystems_login,
password=self.configuration.ixsystems_password,
api_key=self.configuration.ixsystems_api_key,
api_version=self.configuration.ixsystems_api_version,
transport_type=
self.configuration.ixsystems_transport_type)
if not self.handle:
raise FreeNASApiError("Failed to create handle for FREENAS server")
def _create_volume(self, name, size):
params = {}
params['name'] = self.configuration.ixsystems_dataset_path + '/' + name
params['type'] = 'VOLUME'
params['volsize'] = ix_utils.get_bytes_from_gb(size)
jparams = json.dumps(params)
jparams = jparams.encode('utf8')
request_urn = ('%s') % (FreeNASServer.REST_API_VOLUME)
LOG.debug('_create_volume params : %s', params)
LOG.debug('_create_volume urn : %s', request_urn)
ret = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,
request_urn, jparams)
LOG.debug('_create_volume response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while creating volume: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
def _target_to_extent(self, target_id, extent_id):
LOG.debug('_target_to_extent target id : %s extend id : %s', target_id, extent_id)
request_urn = ('%s/') % (FreeNASServer.REST_API_TARGET_TO_EXTENT)
params = {}
params['target'] = target_id
params['extent'] = extent_id
rams)
jparams = jparams.encode('utf8')
LOG.debug('_create_target_to_extent params : %s', json.dumps(params))
tgt_ext = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND, request_urn, jparams)
LOG.debug('_target_to_extent response : %s', json.dumps(tgt_ext))
if tgt_ext['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while creating relation between target and extent: %s' % tgt_ext['response'])
raise FreeNASApiError('Unexpected error', msg)
def _create_target(self, name):
targetgroup_params = [{}]
targetgroup_params[0]['portal'] = int(self.configuration.ixsystems_portal_id)
targetgroup_params[0]['initiator'] = int(self.configuration.ixsystems_initiator_id)
tgt_params = {}
tgt_params['name'] = name
tgt_params['groups'] = targetgroup_params
jtgt_params = json.dumps(tgt_params)
jtgt_params = jtgt_params.encode('utf8')
LOG.debug('_create_target params : %s', json.dumps(tgt_params))
request_urn = ('%s/') % (FreeNASServer.REST_API_TARGET)
target = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,
request_urn, jtgt_params)
LOG.debug('_create_target response : %s', json.dumps(target))
if target['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while creating iscsi target: %s' % target['response'])
raise FreeNASApiError('Unexpected error', msg)
target_id = json.loads(target['response'])['id']
return target_id
def _create_extent(self, name, volume_name,from_snapshot=False):
ext_params = {}
if from_snapshot:
ext_params['Source'] = volume_name
else:
ext_params['type'] = 'DISK'
ext_params['name'] = name
ext_params['disk'] = ('zvol/%s/%s') % (self.configuration.ixsystems_dataset_path, volume_name)
jext_params = json.dumps(ext_params)
LOG.debug('_create_extent params : %s', jext_params)
jext_params = jext_params.encode('utf8')
request_urn = ('%s/') % (FreeNASServer.REST_API_EXTENT)
extent = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,
request_urn, jext_params)
LOG.debug('_create_extent response : %s', json.dumps(extent))
if extent['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while creating iscsi target extent: %s' % extent['response'])
raise FreeNASApiError('Unexpected error', msg)
return json.loads(extent['response'])['id']
def get_iscsitarget_id(self, name):
request_urn = ('%s') % (FreeNASServer.REST_API_TARGET)
LOG.debug('get_iscsitarget_id request_urn : %s', request_urn)
ret = self.handle.invoke_command(FreeNASServer.SELECT_COMMAND, request_urn, None)
LOG.debug('get_iscsitarget_id response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while deleting iscsi target: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
uresp = ret['response']
resp = json.loads(uresp.decode('utf8'))
try:
return (item for item in resp if item['name'] == name).__next__()['id']
except StopIteration:
return 0
def get_tgt_ext_id(self, name):
request_urn = ('%s') % (FreeNASServer.REST_API_TARGET_TO_EXTENT)
LOG.debug('get_tgt_ext_id urn : %s', request_urn)
ret = self.handle.invoke_command(FreeNASServer.SELECT_COMMAND, request_urn, None)
LOG.debug('get_tgt_ext_id response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while deleting iscsi target: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
uresp = ret['response']
resp = json.loads(uresp.decode('utf8'))
try:
return (item for item in resp if item['target'] == name).__next__()['id']
except StopIteration:
return 0
def get_extent_id(self, name):
request_urn = ('%s') % (FreeNASServer.REST_API_EXTENT)
LOG.debug('get_extent_id urn : %s', request_urn)
ret = self.handle.invoke_command(FreeNASServer.SELECT_COMMAND, request_urn, None)
LOG.debug('get_extent_id response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while getting extent id: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
uresp = ret['response']
resp = json.loads(uresp.decode('utf8'))
try:
return (item for item in resp if item['name'] == name).__next__()['id']
except StopIteration:
return 0
def _create_iscsitarget(self, name, volume_name):
tgt_id = self._create_target(name)
ext_id = self._create_extent(name, volume_name)
self._target_to_extent(tgt_id, ext_id)
def delete_target_to_extent(self, tgt_ext_id):
pass
def delete_target(self, target_id):
if target_id:
request_urn = ('%s/id/%s') % (FreeNASServer.REST_API_TARGET, target_id)
LOG.debug('delete_target urn : %s', request_urn)
ret = self.handle.invoke_command(FreeNASServer.DELETE_COMMAND,
request_urn, None)
LOG.debug('delete_target response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while deleting iscsi target: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
def delete_extent(self, extent_id):
if extent_id:
request_urn = ('%s/id/%s') % (FreeNASServer.REST_API_EXTENT, extent_id)
LOG.debug('delete_extent urn : %s', request_urn)
ret = self.handle.invoke_command(FreeNASServer.DELETE_COMMAND,
request_urn, None)
LOG.debug('delete_extent response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while deleting iscsi extent: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
def _delete_iscsitarget(self, name):
tgt_ext_id = self.get_tgt_ext_id(name)
target_id = self.get_iscsitarget_id(name)
extent_id = self.get_extent_id(name)
self.delete_target_to_extent(tgt_ext_id)
self.delete_target(target_id)
self.delete_extent(extent_id)
def _dependent_clone(self, name):
request_urn = ('%s/id/%s%s') % (FreeNASServer.REST_API_VOLUME,
urllib.parse.quote_plus(self.configuration.ixsystems_dataset_path + '/'), name)
LOG.debug('_dependent_clones urn : %s', request_urn)
ret = self.handle.invoke_command(FreeNASServer.SELECT_COMMAND, request_urn, None)
LOG.debug('_dependent_clones response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while getting volume: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
uresp = ret['response']
resp = json.loads(uresp.decode('utf8'))
return resp['origin']['value']
def _delete_volume(self, name):
request_urn = ('%s/id/%s%s') % (FreeNASServer.REST_API_VOLUME,
urllib.parse.quote_plus(self.configuration.ixsystems_dataset_path + '/'), name)
LOG.debug('_delete_volume urn : %s', request_urn)
clone = self._dependent_clone(name)
ret = self.handle.invoke_command(FreeNASServer.DELETE_COMMAND,
request_urn, None)
LOG.debug('_delete_volume response : %s', json.dumps(ret))
if clone:
fullvolume, snapname = clone.split('@')
temp, snapvol = fullvolume.rsplit('/', 1)
self._delete_snapshot(snapname, snapvol)
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while deleting volume: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
def _create_snapshot(self, name, volume_name):
args = {}
args['dataset'] = ('%s/%s') % (self.configuration.ixsystems_dataset_path, volume_name)
args['name'] = name
jargs = json.dumps(args)
jargs = jargs.encode("utf8")
request_urn = ('%s') % (FreeNASServer.REST_API_SNAPSHOT)
LOG.debug('_create_snapshot urn : %s', request_urn)
try:
ret = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,
request_urn, jargs)
LOG.debug('_create_snapshot response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while creating snapshot: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
except Exception as e:
raise FreeNASApiError('Unexpected error', e)
def _delete_snapshot(self, name, volume_name):
LOG.debug('_delete_snapshot, deleting name: %s from volume: %s', name, volume_name)
request_urn = ('%s/id/%s@%s') % (FreeNASServer.REST_API_SNAPSHOT,
urllib.parse.quote_plus(self.configuration.ixsystems_dataset_path + '/' + volume_name), name)
LOG.debug('_delete_snapshot urn : %s', request_urn)
try:
ret = self.handle.invoke_command(FreeNASServer.DELETE_COMMAND,
request_urn, None)
LOG.debug('_delete_snapshot response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while deleting snapshot: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
except Exception as e:
raise FreeNASApiError('Unexpected error', e)
def _create_volume_from_snapshot(self, name, snapshot_name, snap_zvol_name):
args = {}
args['snapshot'] = ('%s/%s@%s') % (self.configuration.ixsystems_dataset_path, snap_zvol_name, snapshot_name)
args['dataset_dst'] = ('%s/%s') % (self.configuration.ixsystems_dataset_path, name)
jargs = json.dumps(args)
jargs = jargs.encode("utf8")
request_urn = ('%s/%s') % (FreeNASServer.REST_API_SNAPSHOT, FreeNASServer.CLONE)
LOG.debug('_create_volume_from_snapshot urn : %s', request_urn)
try:
ret = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,
request_urn, jargs)
LOG.debug('_create_volume_from_snapshot response : %s', json.dumps(ret))
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while creating snapshot: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
except Exception as e:
raise FreeNASApiError('Unexpected error', e)
def _update_volume_stats(self):
request_urn = ('%s/id/%s') % (FreeNASServer.REST_API_VOLUME,
urllib.parse.quote_plus(self.configuration.ixsystems_dataset_path))
LOG.debug('_update_volume_stats request_urn : %s', request_urn)
ret = self.handle.invoke_command(FreeNASServer.SELECT_COMMAND,
request_urn, None)
LOG.debug("_update_volume_stats response : %s", json.dumps(ret))
data = {}
data["volume_backend_name"] = self.backend_name
data["vendor_name"] = self.vendor_name
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.storage_protocol
data['total_capacity_gb'] = ix_utils.get_size_in_gb(json.loads(ret['response'])['available']['parsed'] \
+ json.loads(ret['response'])['used']['parsed'])
data['free_capacity_gb'] = ix_utils.get_size_in_gb(json.loads(ret['response'])['available']['parsed'])
data['reserved_percentage'] = \
self.configuration.ixsystems_reserved_percentage
data['reserved_percentage'] = 0
data['QoS_support'] = False
self.stats = data
return self.stats
def _create_cloned_volume_to_snapshot_map(self, volume_name, snapshot):
map_file = os.path.join(CONF.volumes_dir, volume_name)
jparams = json.dumps(snapshot)
try:
fd = open(map_file, 'w+')
fd.write(jparams)
fd.close()
except Exception as e:
LOG.error(_('_create_halo_volume_name_map: %s') % e)
def _extend_volume(self, name, new_size):
LOG.debug('_extend__volume name: %s', name)
params = {}
params['volsize'] = ix_utils.get_bytes_from_gb(new_size)
jparams = json.dumps(params)
jparams = jparams.encode('utf8')
request_urn = ('%s/id/%s') % (FreeNASServer.REST_API_VOLUME,
urllib.parse.quote_plus(self.configuration.ixsystems_dataset_path + '/' + name))
ret = self.handle.invoke_command(FreeNASServer.UPDATE_COMMAND,
request_urn, jparams)
if ret['status'] != FreeNASServer.STATUS_OK:
msg = ('Error while extending volume: %s' % ret['response'])
raise FreeNASApiError('Unexpected error', msg)
def _create_export(self, volume_name):
freenas_volume = ix_utils.generate_freenas_volume_name(volume_name, self.configuration.ixsystems_iqn_prefix)
if freenas_volume is None:
LOG.error(_('Error in exporting FREENAS volume!'))
handle = None
else:
handle = "%s:%s,%s %s" % \
(self.configuration.ixsystems_server_hostname,
self.configuration.ixsystems_server_iscsi_port,
freenas_volume['target'],
freenas_volume['iqn'])
LOG.debug('provider_location: %s', handle)
return handle
| true | true |
f7f6f9de82b5a679b636553f4554bdba769b692e | 610 | py | Python | 2017/day01/captcha.py | kgaughan/aoc | ffd1d8f28adb3b5b61da15402fb6ca489b9025b0 | [
"BSD-3-Clause"
] | null | null | null | 2017/day01/captcha.py | kgaughan/aoc | ffd1d8f28adb3b5b61da15402fb6ca489b9025b0 | [
"BSD-3-Clause"
] | null | null | null | 2017/day01/captcha.py | kgaughan/aoc | ffd1d8f28adb3b5b61da15402fb6ca489b9025b0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
def runner(path, fn):
with open(path) as fh:
for i, line in enumerate(fh):
print('{}: {}'.format(i + 1, fn(line.rstrip())))
def captcha1(data):
result = 0
last = None
for ch in data + data[0]:
if last == ch:
result += int(ch)
last = ch
return result
def captcha2(data):
half = len(data) // 2
return sum(2 * int(ch1)
for ch1, ch2 in zip(data[:half], data[half:])
if ch1 == ch2)
def part1():
runner('input.txt', captcha1)
def part2():
runner('input.txt', captcha2)
| 18.484848 | 60 | 0.519672 |
def runner(path, fn):
with open(path) as fh:
for i, line in enumerate(fh):
print('{}: {}'.format(i + 1, fn(line.rstrip())))
def captcha1(data):
result = 0
last = None
for ch in data + data[0]:
if last == ch:
result += int(ch)
last = ch
return result
def captcha2(data):
half = len(data) // 2
return sum(2 * int(ch1)
for ch1, ch2 in zip(data[:half], data[half:])
if ch1 == ch2)
def part1():
runner('input.txt', captcha1)
def part2():
runner('input.txt', captcha2)
| true | true |
f7f6faf6f2b978ff07730a7858ff45f9dea806a8 | 305 | py | Python | 2017/07/female-homicide-rates-20170721/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | 14 | 2015-05-08T13:41:51.000Z | 2021-02-24T12:34:55.000Z | 2017/07/female-homicide-rates-20170721/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | null | null | null | 2017/07/female-homicide-rates-20170721/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | 7 | 2015-04-04T04:45:54.000Z | 2021-02-18T11:12:48.000Z | #!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1Oho_mMqOBUmVsj7VAfTlkkvphRaGl2lZKU2b1bZp8ik'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| 21.785714 | 77 | 0.819672 |
import base_filters
COPY_GOOGLE_DOC_KEY = '1Oho_mMqOBUmVsj7VAfTlkkvphRaGl2lZKU2b1bZp8ik'
USE_ASSETS = False
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| true | true |
f7f6fc4844645190ce3f9119ad21b13cdee163f2 | 8,855 | py | Python | aitextgen/train.py | SeanNaren/aitextgen | 12a647cd6e8f2a9f8b0dfa5e380ad50d10a527cd | [
"MIT"
] | null | null | null | aitextgen/train.py | SeanNaren/aitextgen | 12a647cd6e8f2a9f8b0dfa5e380ad50d10a527cd | [
"MIT"
] | null | null | null | aitextgen/train.py | SeanNaren/aitextgen | 12a647cd6e8f2a9f8b0dfa5e380ad50d10a527cd | [
"MIT"
] | 1 | 2020-07-24T05:46:52.000Z | 2020-07-24T05:46:52.000Z | import pytorch_lightning as pl
from pytorch_lightning.callbacks.progress import ProgressBarBase
from tqdm.auto import tqdm
import sys
import torch
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import get_linear_schedule_with_warmup
import os
import shutil
import subprocess
class ATGTransformer(pl.LightningModule):
"""
A training module for aitextgen.
"""
def __init__(self, model, dataset, hparams, tokenizer):
super(ATGTransformer, self).__init__()
self.model, self.dataset, self.hparams, self.tokenizer = (
model,
dataset,
hparams,
tokenizer,
)
def forward(self, inputs):
return self.model(**inputs, return_dict=False)
def training_step(self, batch, batch_num):
outputs = self({"input_ids": batch, "labels": batch})
loss = outputs[0]
return {"loss": loss}
def train_dataloader(self):
return DataLoader(
self.dataset,
batch_size=self.hparams["batch_size"],
shuffle=True,
pin_memory=self.hparams["pin_memory"],
num_workers=self.hparams["num_workers"],
)
def configure_optimizers(self):
"Prepare optimizer"
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in self.model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": self.hparams["weight_decay"],
},
{
"params": [
p
for n, p in self.model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=self.hparams["learning_rate"],
eps=self.hparams["adam_epsilon"],
)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.hparams["warmup_steps"],
num_training_steps=self.hparams["num_steps"],
)
return [optimizer], [scheduler]
class ATGProgressBar(ProgressBarBase):
"""A variant progress bar that works off of steps and prints periodically."""
def __init__(
self,
save_every,
generate_every,
output_dir,
n_generate,
gpu,
smoothing,
run_id,
save_gdrive,
progress_bar_refresh_rate,
train_transformers_only,
num_layers_freeze,
):
super().__init__()
self.enabled = True
self.save_every = save_every
self.generate_every = generate_every
self.output_dir = output_dir
self.n_generate = n_generate
self.gpu = gpu
self.steps = 0
self.prev_avg_loss = None
self.smoothing = smoothing
self.run_id = run_id
self.save_gdrive = save_gdrive
self.progress_bar_refresh_rate = progress_bar_refresh_rate
self.train_transformers_only = train_transformers_only
self.num_layers_freeze = num_layers_freeze
def enabled(self):
self.enabled = True
def disable(self):
self.enabled = False
def on_train_start(self, trainer, pl_module):
super().on_train_start(trainer, pl_module)
self.main_progress_bar = tqdm(
total=trainer.max_steps,
disable=not self.enabled,
smoothing=0,
leave=True,
dynamic_ncols=True,
file=sys.stdout,
)
self.freeze_layers(pl_module)
def on_train_end(self, trainer, pl_module):
self.main_progress_bar.close()
self.unfreeze_layers(pl_module)
def on_batch_end(self, trainer, pl_module):
super().on_batch_end(trainer, pl_module)
# clean up the GPU cache used for the benchmark
# https://discuss.pytorch.org/t/about-torch-cuda-empty-cache/34232/4
if self.steps == 0 and self.gpu:
torch.cuda.empty_cache()
current_loss = float(trainer.progress_bar_dict["loss"])
self.steps += 1
avg_loss = 0
if current_loss == current_loss: # don't add if current_loss is NaN
avg_loss = self.average_loss(
current_loss, self.prev_avg_loss, self.smoothing
)
self.prev_avg_loss = avg_loss
desc = f"Loss: {current_loss:.3f} — Avg: {avg_loss:.3f}"
if self.steps % self.progress_bar_refresh_rate == 0:
if self.gpu:
# via pytorch-lightning's get_gpu_memory_map()
result = subprocess.run(
[
shutil.which("nvidia-smi"),
"--query-gpu=memory.used",
"--format=csv,nounits,noheader",
],
encoding="utf-8",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
gpu_memory = result.stdout.strip().split(os.linesep)[0]
desc += f" — GPU Mem: {gpu_memory} MB"
self.main_progress_bar.update(self.progress_bar_refresh_rate)
self.main_progress_bar.set_description(desc)
if self.enabled:
did_unfreeze = False
if self.save_every > 0 and self.steps % self.save_every == 0:
self.unfreeze_layers(pl_module)
self.save_pytorch_model(trainer, pl_module)
did_unfreeze = True
if self.generate_every > 0 and self.steps % self.generate_every == 0:
self.unfreeze_layers(pl_module)
self.generate_sample_text(trainer, pl_module)
did_unfreeze = True
if did_unfreeze:
self.freeze_layers(pl_module)
def generate_sample_text(self, trainer, pl_module):
self.main_progress_bar.write(
f"\033[1m{self.steps:,} steps reached: generating sample texts.\033[0m"
)
gen_length = min(pl_module.model.config.n_positions, 256)
outputs = pl_module.model.generate(
input_ids=None,
max_length=gen_length,
do_sample=True,
num_return_sequences=self.n_generate,
temperature=0.7,
pad_token_id=pl_module.tokenizer.pad_token_id,
)
special_token_id_tensor = torch.unique(
torch.as_tensor(
[pl_module.tokenizer.bos_token_id, pl_module.tokenizer.eos_token_id]
)
).to(pl_module.model.device.type)
outputs = [
output[
~output.unsqueeze(1).eq(special_token_id_tensor.unsqueeze(1)).any(1)
].tolist()
for output in outputs
]
if self.n_generate > 1:
gen_texts = pl_module.tokenizer.batch_decode(outputs)
else:
gen_texts = [pl_module.tokenizer.decode(outputs[0])]
for text in gen_texts:
self.main_progress_bar.write("=" * 10)
self.main_progress_bar.write(text)
self.main_progress_bar.write("=" * 10)
def save_pytorch_model(self, trainer, pl_module):
self.main_progress_bar.write(
f"\033[1m{self.steps:,} steps reached: saving model to /{self.output_dir}\033[0m"
)
pl_module.model.save_pretrained(self.output_dir)
if self.save_gdrive:
for pt_file in ["pytorch_model.bin", "config.json"]:
shutil.copyfile(
os.path.join(self.output_dir, pt_file),
os.path.join("/content/drive/My Drive/", self.run_id, pt_file),
)
def average_loss(self, current_loss, prev_avg_loss, smoothing):
if prev_avg_loss is None:
return current_loss
else:
return (smoothing * current_loss) + (1 - smoothing) * prev_avg_loss
def modify_layers(self, pl_module, unfreeze):
if self.train_transformers_only:
for name, param in pl_module.model.named_parameters():
if self.num_layers_freeze:
layer_num = int(name.split(".")[2]) if ".h." in name else None
to_freeze = layer_num and layer_num < self.num_layers_freeze
else:
to_freeze = False
if name == "transformer.wte.weight" or to_freeze:
param.requires_grad = unfreeze
def freeze_layers(self, pl_module):
self.modify_layers(pl_module, False)
def unfreeze_layers(self, pl_module):
self.modify_layers(pl_module, True)
| 33.289474 | 93 | 0.578882 | import pytorch_lightning as pl
from pytorch_lightning.callbacks.progress import ProgressBarBase
from tqdm.auto import tqdm
import sys
import torch
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import get_linear_schedule_with_warmup
import os
import shutil
import subprocess
class ATGTransformer(pl.LightningModule):
def __init__(self, model, dataset, hparams, tokenizer):
super(ATGTransformer, self).__init__()
self.model, self.dataset, self.hparams, self.tokenizer = (
model,
dataset,
hparams,
tokenizer,
)
def forward(self, inputs):
return self.model(**inputs, return_dict=False)
def training_step(self, batch, batch_num):
outputs = self({"input_ids": batch, "labels": batch})
loss = outputs[0]
return {"loss": loss}
def train_dataloader(self):
return DataLoader(
self.dataset,
batch_size=self.hparams["batch_size"],
shuffle=True,
pin_memory=self.hparams["pin_memory"],
num_workers=self.hparams["num_workers"],
)
def configure_optimizers(self):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in self.model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": self.hparams["weight_decay"],
},
{
"params": [
p
for n, p in self.model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=self.hparams["learning_rate"],
eps=self.hparams["adam_epsilon"],
)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.hparams["warmup_steps"],
num_training_steps=self.hparams["num_steps"],
)
return [optimizer], [scheduler]
class ATGProgressBar(ProgressBarBase):
def __init__(
self,
save_every,
generate_every,
output_dir,
n_generate,
gpu,
smoothing,
run_id,
save_gdrive,
progress_bar_refresh_rate,
train_transformers_only,
num_layers_freeze,
):
super().__init__()
self.enabled = True
self.save_every = save_every
self.generate_every = generate_every
self.output_dir = output_dir
self.n_generate = n_generate
self.gpu = gpu
self.steps = 0
self.prev_avg_loss = None
self.smoothing = smoothing
self.run_id = run_id
self.save_gdrive = save_gdrive
self.progress_bar_refresh_rate = progress_bar_refresh_rate
self.train_transformers_only = train_transformers_only
self.num_layers_freeze = num_layers_freeze
def enabled(self):
self.enabled = True
def disable(self):
self.enabled = False
def on_train_start(self, trainer, pl_module):
super().on_train_start(trainer, pl_module)
self.main_progress_bar = tqdm(
total=trainer.max_steps,
disable=not self.enabled,
smoothing=0,
leave=True,
dynamic_ncols=True,
file=sys.stdout,
)
self.freeze_layers(pl_module)
def on_train_end(self, trainer, pl_module):
self.main_progress_bar.close()
self.unfreeze_layers(pl_module)
def on_batch_end(self, trainer, pl_module):
super().on_batch_end(trainer, pl_module)
if self.steps == 0 and self.gpu:
torch.cuda.empty_cache()
current_loss = float(trainer.progress_bar_dict["loss"])
self.steps += 1
avg_loss = 0
if current_loss == current_loss:
avg_loss = self.average_loss(
current_loss, self.prev_avg_loss, self.smoothing
)
self.prev_avg_loss = avg_loss
desc = f"Loss: {current_loss:.3f} — Avg: {avg_loss:.3f}"
if self.steps % self.progress_bar_refresh_rate == 0:
if self.gpu:
# via pytorch-lightning's get_gpu_memory_map()
result = subprocess.run(
[
shutil.which("nvidia-smi"),
"--query-gpu=memory.used",
"--format=csv,nounits,noheader",
],
encoding="utf-8",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
gpu_memory = result.stdout.strip().split(os.linesep)[0]
desc += f" — GPU Mem: {gpu_memory} MB"
self.main_progress_bar.update(self.progress_bar_refresh_rate)
self.main_progress_bar.set_description(desc)
if self.enabled:
did_unfreeze = False
if self.save_every > 0 and self.steps % self.save_every == 0:
self.unfreeze_layers(pl_module)
self.save_pytorch_model(trainer, pl_module)
did_unfreeze = True
if self.generate_every > 0 and self.steps % self.generate_every == 0:
self.unfreeze_layers(pl_module)
self.generate_sample_text(trainer, pl_module)
did_unfreeze = True
if did_unfreeze:
self.freeze_layers(pl_module)
def generate_sample_text(self, trainer, pl_module):
self.main_progress_bar.write(
f"\033[1m{self.steps:,} steps reached: generating sample texts.\033[0m"
)
gen_length = min(pl_module.model.config.n_positions, 256)
outputs = pl_module.model.generate(
input_ids=None,
max_length=gen_length,
do_sample=True,
num_return_sequences=self.n_generate,
temperature=0.7,
pad_token_id=pl_module.tokenizer.pad_token_id,
)
special_token_id_tensor = torch.unique(
torch.as_tensor(
[pl_module.tokenizer.bos_token_id, pl_module.tokenizer.eos_token_id]
)
).to(pl_module.model.device.type)
outputs = [
output[
~output.unsqueeze(1).eq(special_token_id_tensor.unsqueeze(1)).any(1)
].tolist()
for output in outputs
]
if self.n_generate > 1:
gen_texts = pl_module.tokenizer.batch_decode(outputs)
else:
gen_texts = [pl_module.tokenizer.decode(outputs[0])]
for text in gen_texts:
self.main_progress_bar.write("=" * 10)
self.main_progress_bar.write(text)
self.main_progress_bar.write("=" * 10)
def save_pytorch_model(self, trainer, pl_module):
self.main_progress_bar.write(
f"\033[1m{self.steps:,} steps reached: saving model to /{self.output_dir}\033[0m"
)
pl_module.model.save_pretrained(self.output_dir)
if self.save_gdrive:
for pt_file in ["pytorch_model.bin", "config.json"]:
shutil.copyfile(
os.path.join(self.output_dir, pt_file),
os.path.join("/content/drive/My Drive/", self.run_id, pt_file),
)
def average_loss(self, current_loss, prev_avg_loss, smoothing):
if prev_avg_loss is None:
return current_loss
else:
return (smoothing * current_loss) + (1 - smoothing) * prev_avg_loss
def modify_layers(self, pl_module, unfreeze):
if self.train_transformers_only:
for name, param in pl_module.model.named_parameters():
if self.num_layers_freeze:
layer_num = int(name.split(".")[2]) if ".h." in name else None
to_freeze = layer_num and layer_num < self.num_layers_freeze
else:
to_freeze = False
if name == "transformer.wte.weight" or to_freeze:
param.requires_grad = unfreeze
def freeze_layers(self, pl_module):
self.modify_layers(pl_module, False)
def unfreeze_layers(self, pl_module):
self.modify_layers(pl_module, True)
| true | true |
f7f6fcd3d0ebddcd93263488c0ab0657c752e36f | 37,900 | py | Python | site-packages/cryptography/x509/extensions.py | NickFlatow/winnf-rel2-testharness | f5174ae643c13b6753fac434569e907df4d50cf1 | [
"Apache-2.0"
] | 132 | 2021-02-24T12:14:35.000Z | 2022-03-28T13:06:22.000Z | site-packages/cryptography/x509/extensions.py | NickFlatow/winnf-rel2-testharness | f5174ae643c13b6753fac434569e907df4d50cf1 | [
"Apache-2.0"
] | 2 | 2022-02-10T16:51:56.000Z | 2022-02-10T18:23:52.000Z | site-packages/cryptography/x509/extensions.py | NickFlatow/winnf-rel2-testharness | f5174ae643c13b6753fac434569e907df4d50cf1 | [
"Apache-2.0"
] | 3 | 2021-12-08T15:20:46.000Z | 2021-12-13T04:55:08.000Z | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import datetime
import hashlib
import ipaddress
import warnings
from enum import Enum
from pyasn1.codec.der import decoder
from pyasn1.type import namedtype, univ
import six
from cryptography import utils
from cryptography.hazmat.primitives import constant_time, serialization
from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicKey
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey
from cryptography.x509.general_name import GeneralName, IPAddress, OtherName
from cryptography.x509.name import Name, RelativeDistinguishedName
from cryptography.x509.oid import (
CRLEntryExtensionOID, ExtensionOID, ObjectIdentifier
)
class _SubjectPublicKeyInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', univ.Sequence()),
namedtype.NamedType('subjectPublicKey', univ.BitString())
)
def _key_identifier_from_public_key(public_key):
if isinstance(public_key, RSAPublicKey):
data = public_key.public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.PKCS1,
)
elif isinstance(public_key, EllipticCurvePublicKey):
data = public_key.public_numbers().encode_point()
else:
# This is a very slow way to do this.
serialized = public_key.public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.SubjectPublicKeyInfo
)
spki, remaining = decoder.decode(
serialized, asn1Spec=_SubjectPublicKeyInfo()
)
assert not remaining
# the univ.BitString object is a tuple of bits. We need bytes and
# pyasn1 really doesn't want to give them to us. To get it we'll
# build an integer and convert that to bytes.
bits = 0
for bit in spki.getComponentByName("subjectPublicKey"):
bits = bits << 1 | bit
data = utils.int_to_bytes(bits)
return hashlib.sha1(data).digest()
class DuplicateExtension(Exception):
def __init__(self, msg, oid):
super(DuplicateExtension, self).__init__(msg)
self.oid = oid
class UnsupportedExtension(Exception):
def __init__(self, msg, oid):
super(UnsupportedExtension, self).__init__(msg)
self.oid = oid
class ExtensionNotFound(Exception):
def __init__(self, msg, oid):
super(ExtensionNotFound, self).__init__(msg)
self.oid = oid
@six.add_metaclass(abc.ABCMeta)
class ExtensionType(object):
@abc.abstractproperty
def oid(self):
"""
Returns the oid associated with the given extension type.
"""
class Extensions(object):
def __init__(self, extensions):
self._extensions = extensions
def get_extension_for_oid(self, oid):
for ext in self:
if ext.oid == oid:
return ext
raise ExtensionNotFound("No {0} extension was found".format(oid), oid)
def get_extension_for_class(self, extclass):
if extclass is UnrecognizedExtension:
raise TypeError(
"UnrecognizedExtension can't be used with "
"get_extension_for_class because more than one instance of the"
" class may be present."
)
for ext in self:
if isinstance(ext.value, extclass):
return ext
raise ExtensionNotFound(
"No {0} extension was found".format(extclass), extclass.oid
)
def __iter__(self):
return iter(self._extensions)
def __len__(self):
return len(self._extensions)
def __getitem__(self, idx):
return self._extensions[idx]
def __repr__(self):
return (
"<Extensions({0})>".format(self._extensions)
)
@utils.register_interface(ExtensionType)
class CRLNumber(object):
oid = ExtensionOID.CRL_NUMBER
def __init__(self, crl_number):
if not isinstance(crl_number, six.integer_types):
raise TypeError("crl_number must be an integer")
self._crl_number = crl_number
def __eq__(self, other):
if not isinstance(other, CRLNumber):
return NotImplemented
return self.crl_number == other.crl_number
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.crl_number)
def __repr__(self):
return "<CRLNumber({0})>".format(self.crl_number)
crl_number = utils.read_only_property("_crl_number")
@utils.register_interface(ExtensionType)
class AuthorityKeyIdentifier(object):
oid = ExtensionOID.AUTHORITY_KEY_IDENTIFIER
def __init__(self, key_identifier, authority_cert_issuer,
authority_cert_serial_number):
if (authority_cert_issuer is None) != (
authority_cert_serial_number is None
):
raise ValueError(
"authority_cert_issuer and authority_cert_serial_number "
"must both be present or both None"
)
if authority_cert_issuer is not None:
authority_cert_issuer = list(authority_cert_issuer)
if not all(
isinstance(x, GeneralName) for x in authority_cert_issuer
):
raise TypeError(
"authority_cert_issuer must be a list of GeneralName "
"objects"
)
if authority_cert_serial_number is not None and not isinstance(
authority_cert_serial_number, six.integer_types
):
raise TypeError(
"authority_cert_serial_number must be an integer"
)
self._key_identifier = key_identifier
self._authority_cert_issuer = authority_cert_issuer
self._authority_cert_serial_number = authority_cert_serial_number
@classmethod
def from_issuer_public_key(cls, public_key):
digest = _key_identifier_from_public_key(public_key)
return cls(
key_identifier=digest,
authority_cert_issuer=None,
authority_cert_serial_number=None
)
@classmethod
def from_issuer_subject_key_identifier(cls, ski):
return cls(
key_identifier=ski.value.digest,
authority_cert_issuer=None,
authority_cert_serial_number=None
)
def __repr__(self):
return (
"<AuthorityKeyIdentifier(key_identifier={0.key_identifier!r}, "
"authority_cert_issuer={0.authority_cert_issuer}, "
"authority_cert_serial_number={0.authority_cert_serial_number}"
")>".format(self)
)
def __eq__(self, other):
if not isinstance(other, AuthorityKeyIdentifier):
return NotImplemented
return (
self.key_identifier == other.key_identifier and
self.authority_cert_issuer == other.authority_cert_issuer and
self.authority_cert_serial_number ==
other.authority_cert_serial_number
)
def __ne__(self, other):
return not self == other
key_identifier = utils.read_only_property("_key_identifier")
authority_cert_issuer = utils.read_only_property("_authority_cert_issuer")
authority_cert_serial_number = utils.read_only_property(
"_authority_cert_serial_number"
)
@utils.register_interface(ExtensionType)
class SubjectKeyIdentifier(object):
oid = ExtensionOID.SUBJECT_KEY_IDENTIFIER
def __init__(self, digest):
self._digest = digest
@classmethod
def from_public_key(cls, public_key):
return cls(_key_identifier_from_public_key(public_key))
digest = utils.read_only_property("_digest")
def __repr__(self):
return "<SubjectKeyIdentifier(digest={0!r})>".format(self.digest)
def __eq__(self, other):
if not isinstance(other, SubjectKeyIdentifier):
return NotImplemented
return constant_time.bytes_eq(self.digest, other.digest)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.digest)
@utils.register_interface(ExtensionType)
class AuthorityInformationAccess(object):
oid = ExtensionOID.AUTHORITY_INFORMATION_ACCESS
def __init__(self, descriptions):
descriptions = list(descriptions)
if not all(isinstance(x, AccessDescription) for x in descriptions):
raise TypeError(
"Every item in the descriptions list must be an "
"AccessDescription"
)
self._descriptions = descriptions
def __iter__(self):
return iter(self._descriptions)
def __len__(self):
return len(self._descriptions)
def __repr__(self):
return "<AuthorityInformationAccess({0})>".format(self._descriptions)
def __eq__(self, other):
if not isinstance(other, AuthorityInformationAccess):
return NotImplemented
return self._descriptions == other._descriptions
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._descriptions[idx]
class AccessDescription(object):
def __init__(self, access_method, access_location):
if not isinstance(access_method, ObjectIdentifier):
raise TypeError("access_method must be an ObjectIdentifier")
if not isinstance(access_location, GeneralName):
raise TypeError("access_location must be a GeneralName")
self._access_method = access_method
self._access_location = access_location
def __repr__(self):
return (
"<AccessDescription(access_method={0.access_method}, access_locati"
"on={0.access_location})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, AccessDescription):
return NotImplemented
return (
self.access_method == other.access_method and
self.access_location == other.access_location
)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.access_method, self.access_location))
access_method = utils.read_only_property("_access_method")
access_location = utils.read_only_property("_access_location")
@utils.register_interface(ExtensionType)
class BasicConstraints(object):
oid = ExtensionOID.BASIC_CONSTRAINTS
def __init__(self, ca, path_length):
if not isinstance(ca, bool):
raise TypeError("ca must be a boolean value")
if path_length is not None and not ca:
raise ValueError("path_length must be None when ca is False")
if (
path_length is not None and
(not isinstance(path_length, six.integer_types) or path_length < 0)
):
raise TypeError(
"path_length must be a non-negative integer or None"
)
self._ca = ca
self._path_length = path_length
ca = utils.read_only_property("_ca")
path_length = utils.read_only_property("_path_length")
def __repr__(self):
return ("<BasicConstraints(ca={0.ca}, "
"path_length={0.path_length})>").format(self)
def __eq__(self, other):
if not isinstance(other, BasicConstraints):
return NotImplemented
return self.ca == other.ca and self.path_length == other.path_length
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.ca, self.path_length))
@utils.register_interface(ExtensionType)
class CRLDistributionPoints(object):
oid = ExtensionOID.CRL_DISTRIBUTION_POINTS
def __init__(self, distribution_points):
distribution_points = list(distribution_points)
if not all(
isinstance(x, DistributionPoint) for x in distribution_points
):
raise TypeError(
"distribution_points must be a list of DistributionPoint "
"objects"
)
self._distribution_points = distribution_points
def __iter__(self):
return iter(self._distribution_points)
def __len__(self):
return len(self._distribution_points)
def __repr__(self):
return "<CRLDistributionPoints({0})>".format(self._distribution_points)
def __eq__(self, other):
if not isinstance(other, CRLDistributionPoints):
return NotImplemented
return self._distribution_points == other._distribution_points
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._distribution_points[idx]
class DistributionPoint(object):
def __init__(self, full_name, relative_name, reasons, crl_issuer):
if full_name and relative_name:
raise ValueError(
"You cannot provide both full_name and relative_name, at "
"least one must be None."
)
if full_name:
full_name = list(full_name)
if not all(isinstance(x, GeneralName) for x in full_name):
raise TypeError(
"full_name must be a list of GeneralName objects"
)
if relative_name:
if isinstance(relative_name, Name):
warnings.warn(
"relative_name=<Name> is deprecated and will "
"be removed in a future version; use "
"<RelativeDistinguishedName> instead.",
utils.DeprecatedIn16,
stacklevel=2
)
relative_name = RelativeDistinguishedName(relative_name)
elif not isinstance(relative_name, RelativeDistinguishedName):
raise TypeError(
"relative_name must be a RelativeDistinguishedName"
)
if crl_issuer:
crl_issuer = list(crl_issuer)
if not all(isinstance(x, GeneralName) for x in crl_issuer):
raise TypeError(
"crl_issuer must be None or a list of general names"
)
if reasons and (not isinstance(reasons, frozenset) or not all(
isinstance(x, ReasonFlags) for x in reasons
)):
raise TypeError("reasons must be None or frozenset of ReasonFlags")
if reasons and (
ReasonFlags.unspecified in reasons or
ReasonFlags.remove_from_crl in reasons
):
raise ValueError(
"unspecified and remove_from_crl are not valid reasons in a "
"DistributionPoint"
)
if reasons and not crl_issuer and not (full_name or relative_name):
raise ValueError(
"You must supply crl_issuer, full_name, or relative_name when "
"reasons is not None"
)
self._full_name = full_name
self._relative_name = relative_name
self._reasons = reasons
self._crl_issuer = crl_issuer
def __repr__(self):
return (
"<DistributionPoint(full_name={0.full_name}, relative_name={0.rela"
"tive_name}, reasons={0.reasons}, crl_issuer={0.crl_is"
"suer})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, DistributionPoint):
return NotImplemented
return (
self.full_name == other.full_name and
self.relative_name == other.relative_name and
self.reasons == other.reasons and
self.crl_issuer == other.crl_issuer
)
def __ne__(self, other):
return not self == other
full_name = utils.read_only_property("_full_name")
relative_name = utils.read_only_property("_relative_name")
reasons = utils.read_only_property("_reasons")
crl_issuer = utils.read_only_property("_crl_issuer")
class ReasonFlags(Enum):
unspecified = "unspecified"
key_compromise = "keyCompromise"
ca_compromise = "cACompromise"
affiliation_changed = "affiliationChanged"
superseded = "superseded"
cessation_of_operation = "cessationOfOperation"
certificate_hold = "certificateHold"
privilege_withdrawn = "privilegeWithdrawn"
aa_compromise = "aACompromise"
remove_from_crl = "removeFromCRL"
@utils.register_interface(ExtensionType)
class PolicyConstraints(object):
oid = ExtensionOID.POLICY_CONSTRAINTS
def __init__(self, require_explicit_policy, inhibit_policy_mapping):
if require_explicit_policy is not None and not isinstance(
require_explicit_policy, six.integer_types
):
raise TypeError(
"require_explicit_policy must be a non-negative integer or "
"None"
)
if inhibit_policy_mapping is not None and not isinstance(
inhibit_policy_mapping, six.integer_types
):
raise TypeError(
"inhibit_policy_mapping must be a non-negative integer or None"
)
if inhibit_policy_mapping is None and require_explicit_policy is None:
raise ValueError(
"At least one of require_explicit_policy and "
"inhibit_policy_mapping must not be None"
)
self._require_explicit_policy = require_explicit_policy
self._inhibit_policy_mapping = inhibit_policy_mapping
def __repr__(self):
return (
u"<PolicyConstraints(require_explicit_policy={0.require_explicit"
u"_policy}, inhibit_policy_mapping={0.inhibit_policy_"
u"mapping})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, PolicyConstraints):
return NotImplemented
return (
self.require_explicit_policy == other.require_explicit_policy and
self.inhibit_policy_mapping == other.inhibit_policy_mapping
)
def __ne__(self, other):
return not self == other
require_explicit_policy = utils.read_only_property(
"_require_explicit_policy"
)
inhibit_policy_mapping = utils.read_only_property(
"_inhibit_policy_mapping"
)
@utils.register_interface(ExtensionType)
class CertificatePolicies(object):
oid = ExtensionOID.CERTIFICATE_POLICIES
def __init__(self, policies):
policies = list(policies)
if not all(isinstance(x, PolicyInformation) for x in policies):
raise TypeError(
"Every item in the policies list must be a "
"PolicyInformation"
)
self._policies = policies
def __iter__(self):
return iter(self._policies)
def __len__(self):
return len(self._policies)
def __repr__(self):
return "<CertificatePolicies({0})>".format(self._policies)
def __eq__(self, other):
if not isinstance(other, CertificatePolicies):
return NotImplemented
return self._policies == other._policies
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._policies[idx]
class PolicyInformation(object):
def __init__(self, policy_identifier, policy_qualifiers):
if not isinstance(policy_identifier, ObjectIdentifier):
raise TypeError("policy_identifier must be an ObjectIdentifier")
self._policy_identifier = policy_identifier
if policy_qualifiers:
policy_qualifiers = list(policy_qualifiers)
if not all(
isinstance(x, (six.text_type, UserNotice))
for x in policy_qualifiers
):
raise TypeError(
"policy_qualifiers must be a list of strings and/or "
"UserNotice objects or None"
)
self._policy_qualifiers = policy_qualifiers
def __repr__(self):
return (
"<PolicyInformation(policy_identifier={0.policy_identifier}, polic"
"y_qualifiers={0.policy_qualifiers})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, PolicyInformation):
return NotImplemented
return (
self.policy_identifier == other.policy_identifier and
self.policy_qualifiers == other.policy_qualifiers
)
def __ne__(self, other):
return not self == other
policy_identifier = utils.read_only_property("_policy_identifier")
policy_qualifiers = utils.read_only_property("_policy_qualifiers")
class UserNotice(object):
def __init__(self, notice_reference, explicit_text):
if notice_reference and not isinstance(
notice_reference, NoticeReference
):
raise TypeError(
"notice_reference must be None or a NoticeReference"
)
self._notice_reference = notice_reference
self._explicit_text = explicit_text
def __repr__(self):
return (
"<UserNotice(notice_reference={0.notice_reference}, explicit_text="
"{0.explicit_text!r})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, UserNotice):
return NotImplemented
return (
self.notice_reference == other.notice_reference and
self.explicit_text == other.explicit_text
)
def __ne__(self, other):
return not self == other
notice_reference = utils.read_only_property("_notice_reference")
explicit_text = utils.read_only_property("_explicit_text")
class NoticeReference(object):
def __init__(self, organization, notice_numbers):
self._organization = organization
notice_numbers = list(notice_numbers)
if not all(isinstance(x, int) for x in notice_numbers):
raise TypeError(
"notice_numbers must be a list of integers"
)
self._notice_numbers = notice_numbers
def __repr__(self):
return (
"<NoticeReference(organization={0.organization!r}, notice_numbers="
"{0.notice_numbers})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, NoticeReference):
return NotImplemented
return (
self.organization == other.organization and
self.notice_numbers == other.notice_numbers
)
def __ne__(self, other):
return not self == other
organization = utils.read_only_property("_organization")
notice_numbers = utils.read_only_property("_notice_numbers")
@utils.register_interface(ExtensionType)
class ExtendedKeyUsage(object):
oid = ExtensionOID.EXTENDED_KEY_USAGE
def __init__(self, usages):
usages = list(usages)
if not all(isinstance(x, ObjectIdentifier) for x in usages):
raise TypeError(
"Every item in the usages list must be an ObjectIdentifier"
)
self._usages = usages
def __iter__(self):
return iter(self._usages)
def __len__(self):
return len(self._usages)
def __repr__(self):
return "<ExtendedKeyUsage({0})>".format(self._usages)
def __eq__(self, other):
if not isinstance(other, ExtendedKeyUsage):
return NotImplemented
return self._usages == other._usages
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class OCSPNoCheck(object):
oid = ExtensionOID.OCSP_NO_CHECK
@utils.register_interface(ExtensionType)
class InhibitAnyPolicy(object):
oid = ExtensionOID.INHIBIT_ANY_POLICY
def __init__(self, skip_certs):
if not isinstance(skip_certs, six.integer_types):
raise TypeError("skip_certs must be an integer")
if skip_certs < 0:
raise ValueError("skip_certs must be a non-negative integer")
self._skip_certs = skip_certs
def __repr__(self):
return "<InhibitAnyPolicy(skip_certs={0.skip_certs})>".format(self)
def __eq__(self, other):
if not isinstance(other, InhibitAnyPolicy):
return NotImplemented
return self.skip_certs == other.skip_certs
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.skip_certs)
skip_certs = utils.read_only_property("_skip_certs")
@utils.register_interface(ExtensionType)
class KeyUsage(object):
oid = ExtensionOID.KEY_USAGE
def __init__(self, digital_signature, content_commitment, key_encipherment,
data_encipherment, key_agreement, key_cert_sign, crl_sign,
encipher_only, decipher_only):
if not key_agreement and (encipher_only or decipher_only):
raise ValueError(
"encipher_only and decipher_only can only be true when "
"key_agreement is true"
)
self._digital_signature = digital_signature
self._content_commitment = content_commitment
self._key_encipherment = key_encipherment
self._data_encipherment = data_encipherment
self._key_agreement = key_agreement
self._key_cert_sign = key_cert_sign
self._crl_sign = crl_sign
self._encipher_only = encipher_only
self._decipher_only = decipher_only
digital_signature = utils.read_only_property("_digital_signature")
content_commitment = utils.read_only_property("_content_commitment")
key_encipherment = utils.read_only_property("_key_encipherment")
data_encipherment = utils.read_only_property("_data_encipherment")
key_agreement = utils.read_only_property("_key_agreement")
key_cert_sign = utils.read_only_property("_key_cert_sign")
crl_sign = utils.read_only_property("_crl_sign")
@property
def encipher_only(self):
if not self.key_agreement:
raise ValueError(
"encipher_only is undefined unless key_agreement is true"
)
else:
return self._encipher_only
@property
def decipher_only(self):
if not self.key_agreement:
raise ValueError(
"decipher_only is undefined unless key_agreement is true"
)
else:
return self._decipher_only
def __repr__(self):
try:
encipher_only = self.encipher_only
decipher_only = self.decipher_only
except ValueError:
encipher_only = None
decipher_only = None
return ("<KeyUsage(digital_signature={0.digital_signature}, "
"content_commitment={0.content_commitment}, "
"key_encipherment={0.key_encipherment}, "
"data_encipherment={0.data_encipherment}, "
"key_agreement={0.key_agreement}, "
"key_cert_sign={0.key_cert_sign}, crl_sign={0.crl_sign}, "
"encipher_only={1}, decipher_only={2})>").format(
self, encipher_only, decipher_only)
def __eq__(self, other):
if not isinstance(other, KeyUsage):
return NotImplemented
return (
self.digital_signature == other.digital_signature and
self.content_commitment == other.content_commitment and
self.key_encipherment == other.key_encipherment and
self.data_encipherment == other.data_encipherment and
self.key_agreement == other.key_agreement and
self.key_cert_sign == other.key_cert_sign and
self.crl_sign == other.crl_sign and
self._encipher_only == other._encipher_only and
self._decipher_only == other._decipher_only
)
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class NameConstraints(object):
oid = ExtensionOID.NAME_CONSTRAINTS
def __init__(self, permitted_subtrees, excluded_subtrees):
if permitted_subtrees is not None:
permitted_subtrees = list(permitted_subtrees)
if not all(
isinstance(x, GeneralName) for x in permitted_subtrees
):
raise TypeError(
"permitted_subtrees must be a list of GeneralName objects "
"or None"
)
self._validate_ip_name(permitted_subtrees)
if excluded_subtrees is not None:
excluded_subtrees = list(excluded_subtrees)
if not all(
isinstance(x, GeneralName) for x in excluded_subtrees
):
raise TypeError(
"excluded_subtrees must be a list of GeneralName objects "
"or None"
)
self._validate_ip_name(excluded_subtrees)
if permitted_subtrees is None and excluded_subtrees is None:
raise ValueError(
"At least one of permitted_subtrees and excluded_subtrees "
"must not be None"
)
self._permitted_subtrees = permitted_subtrees
self._excluded_subtrees = excluded_subtrees
def __eq__(self, other):
if not isinstance(other, NameConstraints):
return NotImplemented
return (
self.excluded_subtrees == other.excluded_subtrees and
self.permitted_subtrees == other.permitted_subtrees
)
def __ne__(self, other):
return not self == other
def _validate_ip_name(self, tree):
if any(isinstance(name, IPAddress) and not isinstance(
name.value, (ipaddress.IPv4Network, ipaddress.IPv6Network)
) for name in tree):
raise TypeError(
"IPAddress name constraints must be an IPv4Network or"
" IPv6Network object"
)
def __repr__(self):
return (
u"<NameConstraints(permitted_subtrees={0.permitted_subtrees}, "
u"excluded_subtrees={0.excluded_subtrees})>".format(self)
)
permitted_subtrees = utils.read_only_property("_permitted_subtrees")
excluded_subtrees = utils.read_only_property("_excluded_subtrees")
class Extension(object):
def __init__(self, oid, critical, value):
if not isinstance(oid, ObjectIdentifier):
raise TypeError(
"oid argument must be an ObjectIdentifier instance."
)
if not isinstance(critical, bool):
raise TypeError("critical must be a boolean value")
self._oid = oid
self._critical = critical
self._value = value
oid = utils.read_only_property("_oid")
critical = utils.read_only_property("_critical")
value = utils.read_only_property("_value")
def __repr__(self):
return ("<Extension(oid={0.oid}, critical={0.critical}, "
"value={0.value})>").format(self)
def __eq__(self, other):
if not isinstance(other, Extension):
return NotImplemented
return (
self.oid == other.oid and
self.critical == other.critical and
self.value == other.value
)
def __ne__(self, other):
return not self == other
class GeneralNames(object):
def __init__(self, general_names):
general_names = list(general_names)
if not all(isinstance(x, GeneralName) for x in general_names):
raise TypeError(
"Every item in the general_names list must be an "
"object conforming to the GeneralName interface"
)
self._general_names = general_names
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
# Return the value of each GeneralName, except for OtherName instances
# which we return directly because it has two important properties not
# just one value.
objs = (i for i in self if isinstance(i, type))
if type != OtherName:
objs = (i.value for i in objs)
return list(objs)
def __repr__(self):
return "<GeneralNames({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, GeneralNames):
return NotImplemented
return self._general_names == other._general_names
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._general_names[idx]
@utils.register_interface(ExtensionType)
class SubjectAlternativeName(object):
oid = ExtensionOID.SUBJECT_ALTERNATIVE_NAME
def __init__(self, general_names):
self._general_names = GeneralNames(general_names)
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
return self._general_names.get_values_for_type(type)
def __repr__(self):
return "<SubjectAlternativeName({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, SubjectAlternativeName):
return NotImplemented
return self._general_names == other._general_names
def __getitem__(self, idx):
return self._general_names[idx]
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class IssuerAlternativeName(object):
oid = ExtensionOID.ISSUER_ALTERNATIVE_NAME
def __init__(self, general_names):
self._general_names = GeneralNames(general_names)
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
return self._general_names.get_values_for_type(type)
def __repr__(self):
return "<IssuerAlternativeName({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, IssuerAlternativeName):
return NotImplemented
return self._general_names == other._general_names
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._general_names[idx]
@utils.register_interface(ExtensionType)
class CertificateIssuer(object):
oid = CRLEntryExtensionOID.CERTIFICATE_ISSUER
def __init__(self, general_names):
self._general_names = GeneralNames(general_names)
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
return self._general_names.get_values_for_type(type)
def __repr__(self):
return "<CertificateIssuer({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, CertificateIssuer):
return NotImplemented
return self._general_names == other._general_names
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._general_names[idx]
@utils.register_interface(ExtensionType)
class CRLReason(object):
oid = CRLEntryExtensionOID.CRL_REASON
def __init__(self, reason):
if not isinstance(reason, ReasonFlags):
raise TypeError("reason must be an element from ReasonFlags")
self._reason = reason
def __repr__(self):
return "<CRLReason(reason={0})>".format(self._reason)
def __eq__(self, other):
if not isinstance(other, CRLReason):
return NotImplemented
return self.reason == other.reason
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.reason)
reason = utils.read_only_property("_reason")
@utils.register_interface(ExtensionType)
class InvalidityDate(object):
oid = CRLEntryExtensionOID.INVALIDITY_DATE
def __init__(self, invalidity_date):
if not isinstance(invalidity_date, datetime.datetime):
raise TypeError("invalidity_date must be a datetime.datetime")
self._invalidity_date = invalidity_date
def __repr__(self):
return "<InvalidityDate(invalidity_date={0})>".format(
self._invalidity_date
)
def __eq__(self, other):
if not isinstance(other, InvalidityDate):
return NotImplemented
return self.invalidity_date == other.invalidity_date
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.invalidity_date)
invalidity_date = utils.read_only_property("_invalidity_date")
@utils.register_interface(ExtensionType)
class UnrecognizedExtension(object):
def __init__(self, oid, value):
if not isinstance(oid, ObjectIdentifier):
raise TypeError("oid must be an ObjectIdentifier")
self._oid = oid
self._value = value
oid = utils.read_only_property("_oid")
value = utils.read_only_property("_value")
def __repr__(self):
return (
"<UnrecognizedExtension(oid={0.oid}, value={0.value!r})>".format(
self
)
)
def __eq__(self, other):
if not isinstance(other, UnrecognizedExtension):
return NotImplemented
return self.oid == other.oid and self.value == other.value
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.oid, self.value))
| 31.244847 | 79 | 0.647045 |
from __future__ import absolute_import, division, print_function
import abc
import datetime
import hashlib
import ipaddress
import warnings
from enum import Enum
from pyasn1.codec.der import decoder
from pyasn1.type import namedtype, univ
import six
from cryptography import utils
from cryptography.hazmat.primitives import constant_time, serialization
from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicKey
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey
from cryptography.x509.general_name import GeneralName, IPAddress, OtherName
from cryptography.x509.name import Name, RelativeDistinguishedName
from cryptography.x509.oid import (
CRLEntryExtensionOID, ExtensionOID, ObjectIdentifier
)
class _SubjectPublicKeyInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', univ.Sequence()),
namedtype.NamedType('subjectPublicKey', univ.BitString())
)
def _key_identifier_from_public_key(public_key):
if isinstance(public_key, RSAPublicKey):
data = public_key.public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.PKCS1,
)
elif isinstance(public_key, EllipticCurvePublicKey):
data = public_key.public_numbers().encode_point()
else:
serialized = public_key.public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.SubjectPublicKeyInfo
)
spki, remaining = decoder.decode(
serialized, asn1Spec=_SubjectPublicKeyInfo()
)
assert not remaining
bits = 0
for bit in spki.getComponentByName("subjectPublicKey"):
bits = bits << 1 | bit
data = utils.int_to_bytes(bits)
return hashlib.sha1(data).digest()
class DuplicateExtension(Exception):
def __init__(self, msg, oid):
super(DuplicateExtension, self).__init__(msg)
self.oid = oid
class UnsupportedExtension(Exception):
def __init__(self, msg, oid):
super(UnsupportedExtension, self).__init__(msg)
self.oid = oid
class ExtensionNotFound(Exception):
def __init__(self, msg, oid):
super(ExtensionNotFound, self).__init__(msg)
self.oid = oid
@six.add_metaclass(abc.ABCMeta)
class ExtensionType(object):
@abc.abstractproperty
def oid(self):
class Extensions(object):
def __init__(self, extensions):
self._extensions = extensions
def get_extension_for_oid(self, oid):
for ext in self:
if ext.oid == oid:
return ext
raise ExtensionNotFound("No {0} extension was found".format(oid), oid)
def get_extension_for_class(self, extclass):
if extclass is UnrecognizedExtension:
raise TypeError(
"UnrecognizedExtension can't be used with "
"get_extension_for_class because more than one instance of the"
" class may be present."
)
for ext in self:
if isinstance(ext.value, extclass):
return ext
raise ExtensionNotFound(
"No {0} extension was found".format(extclass), extclass.oid
)
def __iter__(self):
return iter(self._extensions)
def __len__(self):
return len(self._extensions)
def __getitem__(self, idx):
return self._extensions[idx]
def __repr__(self):
return (
"<Extensions({0})>".format(self._extensions)
)
@utils.register_interface(ExtensionType)
class CRLNumber(object):
oid = ExtensionOID.CRL_NUMBER
def __init__(self, crl_number):
if not isinstance(crl_number, six.integer_types):
raise TypeError("crl_number must be an integer")
self._crl_number = crl_number
def __eq__(self, other):
if not isinstance(other, CRLNumber):
return NotImplemented
return self.crl_number == other.crl_number
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.crl_number)
def __repr__(self):
return "<CRLNumber({0})>".format(self.crl_number)
crl_number = utils.read_only_property("_crl_number")
@utils.register_interface(ExtensionType)
class AuthorityKeyIdentifier(object):
oid = ExtensionOID.AUTHORITY_KEY_IDENTIFIER
def __init__(self, key_identifier, authority_cert_issuer,
authority_cert_serial_number):
if (authority_cert_issuer is None) != (
authority_cert_serial_number is None
):
raise ValueError(
"authority_cert_issuer and authority_cert_serial_number "
"must both be present or both None"
)
if authority_cert_issuer is not None:
authority_cert_issuer = list(authority_cert_issuer)
if not all(
isinstance(x, GeneralName) for x in authority_cert_issuer
):
raise TypeError(
"authority_cert_issuer must be a list of GeneralName "
"objects"
)
if authority_cert_serial_number is not None and not isinstance(
authority_cert_serial_number, six.integer_types
):
raise TypeError(
"authority_cert_serial_number must be an integer"
)
self._key_identifier = key_identifier
self._authority_cert_issuer = authority_cert_issuer
self._authority_cert_serial_number = authority_cert_serial_number
@classmethod
def from_issuer_public_key(cls, public_key):
digest = _key_identifier_from_public_key(public_key)
return cls(
key_identifier=digest,
authority_cert_issuer=None,
authority_cert_serial_number=None
)
@classmethod
def from_issuer_subject_key_identifier(cls, ski):
return cls(
key_identifier=ski.value.digest,
authority_cert_issuer=None,
authority_cert_serial_number=None
)
def __repr__(self):
return (
"<AuthorityKeyIdentifier(key_identifier={0.key_identifier!r}, "
"authority_cert_issuer={0.authority_cert_issuer}, "
"authority_cert_serial_number={0.authority_cert_serial_number}"
")>".format(self)
)
def __eq__(self, other):
if not isinstance(other, AuthorityKeyIdentifier):
return NotImplemented
return (
self.key_identifier == other.key_identifier and
self.authority_cert_issuer == other.authority_cert_issuer and
self.authority_cert_serial_number ==
other.authority_cert_serial_number
)
def __ne__(self, other):
return not self == other
key_identifier = utils.read_only_property("_key_identifier")
authority_cert_issuer = utils.read_only_property("_authority_cert_issuer")
authority_cert_serial_number = utils.read_only_property(
"_authority_cert_serial_number"
)
@utils.register_interface(ExtensionType)
class SubjectKeyIdentifier(object):
oid = ExtensionOID.SUBJECT_KEY_IDENTIFIER
def __init__(self, digest):
self._digest = digest
@classmethod
def from_public_key(cls, public_key):
return cls(_key_identifier_from_public_key(public_key))
digest = utils.read_only_property("_digest")
def __repr__(self):
return "<SubjectKeyIdentifier(digest={0!r})>".format(self.digest)
def __eq__(self, other):
if not isinstance(other, SubjectKeyIdentifier):
return NotImplemented
return constant_time.bytes_eq(self.digest, other.digest)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.digest)
@utils.register_interface(ExtensionType)
class AuthorityInformationAccess(object):
oid = ExtensionOID.AUTHORITY_INFORMATION_ACCESS
def __init__(self, descriptions):
descriptions = list(descriptions)
if not all(isinstance(x, AccessDescription) for x in descriptions):
raise TypeError(
"Every item in the descriptions list must be an "
"AccessDescription"
)
self._descriptions = descriptions
def __iter__(self):
return iter(self._descriptions)
def __len__(self):
return len(self._descriptions)
def __repr__(self):
return "<AuthorityInformationAccess({0})>".format(self._descriptions)
def __eq__(self, other):
if not isinstance(other, AuthorityInformationAccess):
return NotImplemented
return self._descriptions == other._descriptions
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._descriptions[idx]
class AccessDescription(object):
def __init__(self, access_method, access_location):
if not isinstance(access_method, ObjectIdentifier):
raise TypeError("access_method must be an ObjectIdentifier")
if not isinstance(access_location, GeneralName):
raise TypeError("access_location must be a GeneralName")
self._access_method = access_method
self._access_location = access_location
def __repr__(self):
return (
"<AccessDescription(access_method={0.access_method}, access_locati"
"on={0.access_location})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, AccessDescription):
return NotImplemented
return (
self.access_method == other.access_method and
self.access_location == other.access_location
)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.access_method, self.access_location))
access_method = utils.read_only_property("_access_method")
access_location = utils.read_only_property("_access_location")
@utils.register_interface(ExtensionType)
class BasicConstraints(object):
oid = ExtensionOID.BASIC_CONSTRAINTS
def __init__(self, ca, path_length):
if not isinstance(ca, bool):
raise TypeError("ca must be a boolean value")
if path_length is not None and not ca:
raise ValueError("path_length must be None when ca is False")
if (
path_length is not None and
(not isinstance(path_length, six.integer_types) or path_length < 0)
):
raise TypeError(
"path_length must be a non-negative integer or None"
)
self._ca = ca
self._path_length = path_length
ca = utils.read_only_property("_ca")
path_length = utils.read_only_property("_path_length")
def __repr__(self):
return ("<BasicConstraints(ca={0.ca}, "
"path_length={0.path_length})>").format(self)
def __eq__(self, other):
if not isinstance(other, BasicConstraints):
return NotImplemented
return self.ca == other.ca and self.path_length == other.path_length
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.ca, self.path_length))
@utils.register_interface(ExtensionType)
class CRLDistributionPoints(object):
oid = ExtensionOID.CRL_DISTRIBUTION_POINTS
def __init__(self, distribution_points):
distribution_points = list(distribution_points)
if not all(
isinstance(x, DistributionPoint) for x in distribution_points
):
raise TypeError(
"distribution_points must be a list of DistributionPoint "
"objects"
)
self._distribution_points = distribution_points
def __iter__(self):
return iter(self._distribution_points)
def __len__(self):
return len(self._distribution_points)
def __repr__(self):
return "<CRLDistributionPoints({0})>".format(self._distribution_points)
def __eq__(self, other):
if not isinstance(other, CRLDistributionPoints):
return NotImplemented
return self._distribution_points == other._distribution_points
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._distribution_points[idx]
class DistributionPoint(object):
def __init__(self, full_name, relative_name, reasons, crl_issuer):
if full_name and relative_name:
raise ValueError(
"You cannot provide both full_name and relative_name, at "
"least one must be None."
)
if full_name:
full_name = list(full_name)
if not all(isinstance(x, GeneralName) for x in full_name):
raise TypeError(
"full_name must be a list of GeneralName objects"
)
if relative_name:
if isinstance(relative_name, Name):
warnings.warn(
"relative_name=<Name> is deprecated and will "
"be removed in a future version; use "
"<RelativeDistinguishedName> instead.",
utils.DeprecatedIn16,
stacklevel=2
)
relative_name = RelativeDistinguishedName(relative_name)
elif not isinstance(relative_name, RelativeDistinguishedName):
raise TypeError(
"relative_name must be a RelativeDistinguishedName"
)
if crl_issuer:
crl_issuer = list(crl_issuer)
if not all(isinstance(x, GeneralName) for x in crl_issuer):
raise TypeError(
"crl_issuer must be None or a list of general names"
)
if reasons and (not isinstance(reasons, frozenset) or not all(
isinstance(x, ReasonFlags) for x in reasons
)):
raise TypeError("reasons must be None or frozenset of ReasonFlags")
if reasons and (
ReasonFlags.unspecified in reasons or
ReasonFlags.remove_from_crl in reasons
):
raise ValueError(
"unspecified and remove_from_crl are not valid reasons in a "
"DistributionPoint"
)
if reasons and not crl_issuer and not (full_name or relative_name):
raise ValueError(
"You must supply crl_issuer, full_name, or relative_name when "
"reasons is not None"
)
self._full_name = full_name
self._relative_name = relative_name
self._reasons = reasons
self._crl_issuer = crl_issuer
def __repr__(self):
return (
"<DistributionPoint(full_name={0.full_name}, relative_name={0.rela"
"tive_name}, reasons={0.reasons}, crl_issuer={0.crl_is"
"suer})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, DistributionPoint):
return NotImplemented
return (
self.full_name == other.full_name and
self.relative_name == other.relative_name and
self.reasons == other.reasons and
self.crl_issuer == other.crl_issuer
)
def __ne__(self, other):
return not self == other
full_name = utils.read_only_property("_full_name")
relative_name = utils.read_only_property("_relative_name")
reasons = utils.read_only_property("_reasons")
crl_issuer = utils.read_only_property("_crl_issuer")
class ReasonFlags(Enum):
unspecified = "unspecified"
key_compromise = "keyCompromise"
ca_compromise = "cACompromise"
affiliation_changed = "affiliationChanged"
superseded = "superseded"
cessation_of_operation = "cessationOfOperation"
certificate_hold = "certificateHold"
privilege_withdrawn = "privilegeWithdrawn"
aa_compromise = "aACompromise"
remove_from_crl = "removeFromCRL"
@utils.register_interface(ExtensionType)
class PolicyConstraints(object):
oid = ExtensionOID.POLICY_CONSTRAINTS
def __init__(self, require_explicit_policy, inhibit_policy_mapping):
if require_explicit_policy is not None and not isinstance(
require_explicit_policy, six.integer_types
):
raise TypeError(
"require_explicit_policy must be a non-negative integer or "
"None"
)
if inhibit_policy_mapping is not None and not isinstance(
inhibit_policy_mapping, six.integer_types
):
raise TypeError(
"inhibit_policy_mapping must be a non-negative integer or None"
)
if inhibit_policy_mapping is None and require_explicit_policy is None:
raise ValueError(
"At least one of require_explicit_policy and "
"inhibit_policy_mapping must not be None"
)
self._require_explicit_policy = require_explicit_policy
self._inhibit_policy_mapping = inhibit_policy_mapping
def __repr__(self):
return (
u"<PolicyConstraints(require_explicit_policy={0.require_explicit"
u"_policy}, inhibit_policy_mapping={0.inhibit_policy_"
u"mapping})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, PolicyConstraints):
return NotImplemented
return (
self.require_explicit_policy == other.require_explicit_policy and
self.inhibit_policy_mapping == other.inhibit_policy_mapping
)
def __ne__(self, other):
return not self == other
require_explicit_policy = utils.read_only_property(
"_require_explicit_policy"
)
inhibit_policy_mapping = utils.read_only_property(
"_inhibit_policy_mapping"
)
@utils.register_interface(ExtensionType)
class CertificatePolicies(object):
oid = ExtensionOID.CERTIFICATE_POLICIES
def __init__(self, policies):
policies = list(policies)
if not all(isinstance(x, PolicyInformation) for x in policies):
raise TypeError(
"Every item in the policies list must be a "
"PolicyInformation"
)
self._policies = policies
def __iter__(self):
return iter(self._policies)
def __len__(self):
return len(self._policies)
def __repr__(self):
return "<CertificatePolicies({0})>".format(self._policies)
def __eq__(self, other):
if not isinstance(other, CertificatePolicies):
return NotImplemented
return self._policies == other._policies
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._policies[idx]
class PolicyInformation(object):
def __init__(self, policy_identifier, policy_qualifiers):
if not isinstance(policy_identifier, ObjectIdentifier):
raise TypeError("policy_identifier must be an ObjectIdentifier")
self._policy_identifier = policy_identifier
if policy_qualifiers:
policy_qualifiers = list(policy_qualifiers)
if not all(
isinstance(x, (six.text_type, UserNotice))
for x in policy_qualifiers
):
raise TypeError(
"policy_qualifiers must be a list of strings and/or "
"UserNotice objects or None"
)
self._policy_qualifiers = policy_qualifiers
def __repr__(self):
return (
"<PolicyInformation(policy_identifier={0.policy_identifier}, polic"
"y_qualifiers={0.policy_qualifiers})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, PolicyInformation):
return NotImplemented
return (
self.policy_identifier == other.policy_identifier and
self.policy_qualifiers == other.policy_qualifiers
)
def __ne__(self, other):
return not self == other
policy_identifier = utils.read_only_property("_policy_identifier")
policy_qualifiers = utils.read_only_property("_policy_qualifiers")
class UserNotice(object):
def __init__(self, notice_reference, explicit_text):
if notice_reference and not isinstance(
notice_reference, NoticeReference
):
raise TypeError(
"notice_reference must be None or a NoticeReference"
)
self._notice_reference = notice_reference
self._explicit_text = explicit_text
def __repr__(self):
return (
"<UserNotice(notice_reference={0.notice_reference}, explicit_text="
"{0.explicit_text!r})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, UserNotice):
return NotImplemented
return (
self.notice_reference == other.notice_reference and
self.explicit_text == other.explicit_text
)
def __ne__(self, other):
return not self == other
notice_reference = utils.read_only_property("_notice_reference")
explicit_text = utils.read_only_property("_explicit_text")
class NoticeReference(object):
def __init__(self, organization, notice_numbers):
self._organization = organization
notice_numbers = list(notice_numbers)
if not all(isinstance(x, int) for x in notice_numbers):
raise TypeError(
"notice_numbers must be a list of integers"
)
self._notice_numbers = notice_numbers
def __repr__(self):
return (
"<NoticeReference(organization={0.organization!r}, notice_numbers="
"{0.notice_numbers})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, NoticeReference):
return NotImplemented
return (
self.organization == other.organization and
self.notice_numbers == other.notice_numbers
)
def __ne__(self, other):
return not self == other
organization = utils.read_only_property("_organization")
notice_numbers = utils.read_only_property("_notice_numbers")
@utils.register_interface(ExtensionType)
class ExtendedKeyUsage(object):
oid = ExtensionOID.EXTENDED_KEY_USAGE
def __init__(self, usages):
usages = list(usages)
if not all(isinstance(x, ObjectIdentifier) for x in usages):
raise TypeError(
"Every item in the usages list must be an ObjectIdentifier"
)
self._usages = usages
def __iter__(self):
return iter(self._usages)
def __len__(self):
return len(self._usages)
def __repr__(self):
return "<ExtendedKeyUsage({0})>".format(self._usages)
def __eq__(self, other):
if not isinstance(other, ExtendedKeyUsage):
return NotImplemented
return self._usages == other._usages
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class OCSPNoCheck(object):
oid = ExtensionOID.OCSP_NO_CHECK
@utils.register_interface(ExtensionType)
class InhibitAnyPolicy(object):
oid = ExtensionOID.INHIBIT_ANY_POLICY
def __init__(self, skip_certs):
if not isinstance(skip_certs, six.integer_types):
raise TypeError("skip_certs must be an integer")
if skip_certs < 0:
raise ValueError("skip_certs must be a non-negative integer")
self._skip_certs = skip_certs
def __repr__(self):
return "<InhibitAnyPolicy(skip_certs={0.skip_certs})>".format(self)
def __eq__(self, other):
if not isinstance(other, InhibitAnyPolicy):
return NotImplemented
return self.skip_certs == other.skip_certs
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.skip_certs)
skip_certs = utils.read_only_property("_skip_certs")
@utils.register_interface(ExtensionType)
class KeyUsage(object):
oid = ExtensionOID.KEY_USAGE
def __init__(self, digital_signature, content_commitment, key_encipherment,
data_encipherment, key_agreement, key_cert_sign, crl_sign,
encipher_only, decipher_only):
if not key_agreement and (encipher_only or decipher_only):
raise ValueError(
"encipher_only and decipher_only can only be true when "
"key_agreement is true"
)
self._digital_signature = digital_signature
self._content_commitment = content_commitment
self._key_encipherment = key_encipherment
self._data_encipherment = data_encipherment
self._key_agreement = key_agreement
self._key_cert_sign = key_cert_sign
self._crl_sign = crl_sign
self._encipher_only = encipher_only
self._decipher_only = decipher_only
digital_signature = utils.read_only_property("_digital_signature")
content_commitment = utils.read_only_property("_content_commitment")
key_encipherment = utils.read_only_property("_key_encipherment")
data_encipherment = utils.read_only_property("_data_encipherment")
key_agreement = utils.read_only_property("_key_agreement")
key_cert_sign = utils.read_only_property("_key_cert_sign")
crl_sign = utils.read_only_property("_crl_sign")
@property
def encipher_only(self):
if not self.key_agreement:
raise ValueError(
"encipher_only is undefined unless key_agreement is true"
)
else:
return self._encipher_only
@property
def decipher_only(self):
if not self.key_agreement:
raise ValueError(
"decipher_only is undefined unless key_agreement is true"
)
else:
return self._decipher_only
def __repr__(self):
try:
encipher_only = self.encipher_only
decipher_only = self.decipher_only
except ValueError:
encipher_only = None
decipher_only = None
return ("<KeyUsage(digital_signature={0.digital_signature}, "
"content_commitment={0.content_commitment}, "
"key_encipherment={0.key_encipherment}, "
"data_encipherment={0.data_encipherment}, "
"key_agreement={0.key_agreement}, "
"key_cert_sign={0.key_cert_sign}, crl_sign={0.crl_sign}, "
"encipher_only={1}, decipher_only={2})>").format(
self, encipher_only, decipher_only)
def __eq__(self, other):
if not isinstance(other, KeyUsage):
return NotImplemented
return (
self.digital_signature == other.digital_signature and
self.content_commitment == other.content_commitment and
self.key_encipherment == other.key_encipherment and
self.data_encipherment == other.data_encipherment and
self.key_agreement == other.key_agreement and
self.key_cert_sign == other.key_cert_sign and
self.crl_sign == other.crl_sign and
self._encipher_only == other._encipher_only and
self._decipher_only == other._decipher_only
)
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class NameConstraints(object):
oid = ExtensionOID.NAME_CONSTRAINTS
def __init__(self, permitted_subtrees, excluded_subtrees):
if permitted_subtrees is not None:
permitted_subtrees = list(permitted_subtrees)
if not all(
isinstance(x, GeneralName) for x in permitted_subtrees
):
raise TypeError(
"permitted_subtrees must be a list of GeneralName objects "
"or None"
)
self._validate_ip_name(permitted_subtrees)
if excluded_subtrees is not None:
excluded_subtrees = list(excluded_subtrees)
if not all(
isinstance(x, GeneralName) for x in excluded_subtrees
):
raise TypeError(
"excluded_subtrees must be a list of GeneralName objects "
"or None"
)
self._validate_ip_name(excluded_subtrees)
if permitted_subtrees is None and excluded_subtrees is None:
raise ValueError(
"At least one of permitted_subtrees and excluded_subtrees "
"must not be None"
)
self._permitted_subtrees = permitted_subtrees
self._excluded_subtrees = excluded_subtrees
def __eq__(self, other):
if not isinstance(other, NameConstraints):
return NotImplemented
return (
self.excluded_subtrees == other.excluded_subtrees and
self.permitted_subtrees == other.permitted_subtrees
)
def __ne__(self, other):
return not self == other
def _validate_ip_name(self, tree):
if any(isinstance(name, IPAddress) and not isinstance(
name.value, (ipaddress.IPv4Network, ipaddress.IPv6Network)
) for name in tree):
raise TypeError(
"IPAddress name constraints must be an IPv4Network or"
" IPv6Network object"
)
def __repr__(self):
return (
u"<NameConstraints(permitted_subtrees={0.permitted_subtrees}, "
u"excluded_subtrees={0.excluded_subtrees})>".format(self)
)
permitted_subtrees = utils.read_only_property("_permitted_subtrees")
excluded_subtrees = utils.read_only_property("_excluded_subtrees")
class Extension(object):
def __init__(self, oid, critical, value):
if not isinstance(oid, ObjectIdentifier):
raise TypeError(
"oid argument must be an ObjectIdentifier instance."
)
if not isinstance(critical, bool):
raise TypeError("critical must be a boolean value")
self._oid = oid
self._critical = critical
self._value = value
oid = utils.read_only_property("_oid")
critical = utils.read_only_property("_critical")
value = utils.read_only_property("_value")
def __repr__(self):
return ("<Extension(oid={0.oid}, critical={0.critical}, "
"value={0.value})>").format(self)
def __eq__(self, other):
if not isinstance(other, Extension):
return NotImplemented
return (
self.oid == other.oid and
self.critical == other.critical and
self.value == other.value
)
def __ne__(self, other):
return not self == other
class GeneralNames(object):
def __init__(self, general_names):
general_names = list(general_names)
if not all(isinstance(x, GeneralName) for x in general_names):
raise TypeError(
"Every item in the general_names list must be an "
"object conforming to the GeneralName interface"
)
self._general_names = general_names
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
# Return the value of each GeneralName, except for OtherName instances
# which we return directly because it has two important properties not
# just one value.
objs = (i for i in self if isinstance(i, type))
if type != OtherName:
objs = (i.value for i in objs)
return list(objs)
def __repr__(self):
return "<GeneralNames({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, GeneralNames):
return NotImplemented
return self._general_names == other._general_names
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._general_names[idx]
@utils.register_interface(ExtensionType)
class SubjectAlternativeName(object):
oid = ExtensionOID.SUBJECT_ALTERNATIVE_NAME
def __init__(self, general_names):
self._general_names = GeneralNames(general_names)
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
return self._general_names.get_values_for_type(type)
def __repr__(self):
return "<SubjectAlternativeName({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, SubjectAlternativeName):
return NotImplemented
return self._general_names == other._general_names
def __getitem__(self, idx):
return self._general_names[idx]
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class IssuerAlternativeName(object):
oid = ExtensionOID.ISSUER_ALTERNATIVE_NAME
def __init__(self, general_names):
self._general_names = GeneralNames(general_names)
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
return self._general_names.get_values_for_type(type)
def __repr__(self):
return "<IssuerAlternativeName({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, IssuerAlternativeName):
return NotImplemented
return self._general_names == other._general_names
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._general_names[idx]
@utils.register_interface(ExtensionType)
class CertificateIssuer(object):
oid = CRLEntryExtensionOID.CERTIFICATE_ISSUER
def __init__(self, general_names):
self._general_names = GeneralNames(general_names)
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
return self._general_names.get_values_for_type(type)
def __repr__(self):
return "<CertificateIssuer({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, CertificateIssuer):
return NotImplemented
return self._general_names == other._general_names
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._general_names[idx]
@utils.register_interface(ExtensionType)
class CRLReason(object):
oid = CRLEntryExtensionOID.CRL_REASON
def __init__(self, reason):
if not isinstance(reason, ReasonFlags):
raise TypeError("reason must be an element from ReasonFlags")
self._reason = reason
def __repr__(self):
return "<CRLReason(reason={0})>".format(self._reason)
def __eq__(self, other):
if not isinstance(other, CRLReason):
return NotImplemented
return self.reason == other.reason
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.reason)
reason = utils.read_only_property("_reason")
@utils.register_interface(ExtensionType)
class InvalidityDate(object):
oid = CRLEntryExtensionOID.INVALIDITY_DATE
def __init__(self, invalidity_date):
if not isinstance(invalidity_date, datetime.datetime):
raise TypeError("invalidity_date must be a datetime.datetime")
self._invalidity_date = invalidity_date
def __repr__(self):
return "<InvalidityDate(invalidity_date={0})>".format(
self._invalidity_date
)
def __eq__(self, other):
if not isinstance(other, InvalidityDate):
return NotImplemented
return self.invalidity_date == other.invalidity_date
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.invalidity_date)
invalidity_date = utils.read_only_property("_invalidity_date")
@utils.register_interface(ExtensionType)
class UnrecognizedExtension(object):
def __init__(self, oid, value):
if not isinstance(oid, ObjectIdentifier):
raise TypeError("oid must be an ObjectIdentifier")
self._oid = oid
self._value = value
oid = utils.read_only_property("_oid")
value = utils.read_only_property("_value")
def __repr__(self):
return (
"<UnrecognizedExtension(oid={0.oid}, value={0.value!r})>".format(
self
)
)
def __eq__(self, other):
if not isinstance(other, UnrecognizedExtension):
return NotImplemented
return self.oid == other.oid and self.value == other.value
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.oid, self.value))
| true | true |
f7f6fd16d9ea150115bb9ff2a2d94e41a940d9e8 | 610 | py | Python | segm/utils/torch.py | urasakikeisuke/segmenter | 69ff016e621b7d1e4b8573a8b150e80dbc70cf84 | [
"MIT"
] | 418 | 2021-05-12T20:27:13.000Z | 2022-03-30T14:34:27.000Z | segm/utils/torch.py | urasakikeisuke/segmenter | 69ff016e621b7d1e4b8573a8b150e80dbc70cf84 | [
"MIT"
] | 38 | 2021-05-13T03:36:17.000Z | 2022-03-22T19:34:52.000Z | segm/utils/torch.py | urasakikeisuke/segmenter | 69ff016e621b7d1e4b8573a8b150e80dbc70cf84 | [
"MIT"
] | 95 | 2021-05-13T03:23:59.000Z | 2022-03-28T09:08:32.000Z | import os
import torch
"""
GPU wrappers
"""
use_gpu = False
gpu_id = 0
device = None
distributed = False
dist_rank = 0
world_size = 1
def set_gpu_mode(mode):
global use_gpu
global device
global gpu_id
global distributed
global dist_rank
global world_size
gpu_id = int(os.environ.get("SLURM_LOCALID", 0))
dist_rank = int(os.environ.get("SLURM_PROCID", 0))
world_size = int(os.environ.get("SLURM_NTASKS", 1))
distributed = world_size > 1
use_gpu = mode
device = torch.device(f"cuda:{gpu_id}" if use_gpu else "cpu")
torch.backends.cudnn.benchmark = True
| 18.484848 | 65 | 0.685246 | import os
import torch
use_gpu = False
gpu_id = 0
device = None
distributed = False
dist_rank = 0
world_size = 1
def set_gpu_mode(mode):
global use_gpu
global device
global gpu_id
global distributed
global dist_rank
global world_size
gpu_id = int(os.environ.get("SLURM_LOCALID", 0))
dist_rank = int(os.environ.get("SLURM_PROCID", 0))
world_size = int(os.environ.get("SLURM_NTASKS", 1))
distributed = world_size > 1
use_gpu = mode
device = torch.device(f"cuda:{gpu_id}" if use_gpu else "cpu")
torch.backends.cudnn.benchmark = True
| true | true |
f7f6fd59551340f97f86fc20c09799f81bd6fa9a | 10,424 | py | Python | quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py | anton-buyskikh/QuSpin | 4e46b495e399414d9361d659e186492a1ac5b511 | [
"BSD-3-Clause"
] | null | null | null | quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py | anton-buyskikh/QuSpin | 4e46b495e399414d9361d659e186492a1ac5b511 | [
"BSD-3-Clause"
] | null | null | null | quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py | anton-buyskikh/QuSpin | 4e46b495e399414d9361d659e186492a1ac5b511 | [
"BSD-3-Clause"
] | null | null | null | from scipy.sparse.linalg import LinearOperator,onenormest,aslinearoperator
from .expm_multiply_parallel_wrapper import _wrapper_expm_multiply,_wrapper_csr_trace
import scipy.sparse as _sp
import numpy as _np
class expm_multiply_parallel(object):
"""Implements `scipy.sparse.linalg.expm_multiply()` for *openmp*.
Notes
-----
This is a wrapper over custom c++ code.
Examples
--------
This example shows how to construct the `expm_multiply_parallel` object.
Further code snippets can be found in the examples for the function methods of the class.
The code snippet below initiates the class, and is required to run the example codes for the function methods.
.. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py
:linenos:
:language: python
:lines: 7-30
"""
def __init__(self,A,a=1.0):
"""Initializes `expm_multiply_parallel`.
Parameters
-----------
A : {array_like, scipy.sparse matrix}
The operator (matrix) whose exponential is to be calculated.
a : scalar, optional
scalar value multiplying generator matrix :math:`A` in matrix exponential: :math:`\\mathrm{e}^{aA}`.
"""
if _np.array(a).ndim == 0:
self._a = a
else:
raise ValueError("a must be scalar value.")
self._A = _sp.csr_matrix(A,copy=False)
if A.shape[0] != A.shape[1]:
raise ValueError("A must be a square matrix.")
tol = _np.finfo(A.dtype).eps/2
self._tol = _np.array(tol,dtype=tol.dtype)
self._mu = _np.array(_wrapper_csr_trace(self._A.indptr,self._A.indices,self._A.data)/self._A.shape[0],dtype=A.dtype)
self._A -= self._mu * _sp.identity(self._A.shape[0],dtype=self._A.dtype,format="csr")
self._A_1_norm = _np.max(_np.abs(A).sum(axis=0))
self._calculate_partition()
@property
def a(self):
"""scalar: value multiplying generator matrix :math:`A` in matrix exponential: :math:`\\mathrm{e}^{aA}`"""
return self._a
@property
def A(self):
"""scipy.sparse.csr_matrix: csr_matrix to be exponentiated."""
return self._A
def set_a(self,a):
"""Sets the value of the property `a`.
Examples
--------
.. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py
:linenos:
:language: python
:lines: 32-35
Parameters
-----------
a : scalar
new value of `a`.
"""
if _np.array(a).ndim == 0:
self._a = a
self._calculate_partition()
else:
raise ValueError("expecting 'a' to be scalar.")
def dot(self,v,work_array=None,overwrite_v=False):
"""Calculates the action of :math:`\\mathrm{e}^{aA}` on a vector :math:`v`.
Examples
--------
.. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py
:linenos:
:language: python
:lines: 37-
Parameters
-----------
v : contiguous numpy.ndarray
array to apply :math:`\\mathrm{e}^{aA}` on.
work_array : contiguous numpy.ndarray, optional
array of `shape = (2*len(v),)` which is used as work_array space for the underlying c-code. This saves extra memory allocation for function operations.
overwrite_v : bool
if set to `True`, the data in `v` is overwritten by the function. This saves extra memory allocation for the results.
Returns
--------
numpy.ndarray
result of :math:`\\mathrm{e}^{aA}v`.
If `overwrite_v = True` the dunction returns `v` with the data overwritten, otherwise the result is stored in a new array.
"""
v = _np.asarray(v)
if v.ndim != 1:
raise ValueError("array must have ndim of 1.")
if v.shape[0] != self._A.shape[1]:
raise ValueError("dimension mismatch {}, {}".format(self._A.shape,v.shape))
a_dtype = _np.array(self._a).dtype
v_dtype = _np.result_type(self._A.dtype,a_dtype,v.dtype)
if overwrite_v:
if v_dtype != v.dtype:
raise ValueError("if overwrite_v is True, the input array must match correct output dtype for matrix multiplication.")
if not v.flags["CARRAY"]:
raise TypeError("input array must a contiguous and writable.")
if v.ndim != 1:
raise ValueError("array must have ndim of 1.")
else:
v = v.astype(v_dtype,order="C",copy=True)
if work_array is None:
work_array = _np.zeros((2*self._A.shape[0],),dtype=v.dtype)
else:
work_array = _np.ascontiguousarray(work_array)
if work_array.shape != (2*self._A.shape[0],):
raise ValueError("work_array array must be an array of shape (2*v.shape[0],) with same dtype as v.")
if work_array.dtype != v_dtype:
raise ValueError("work_array must be array of dtype which matches the result of the matrix-vector multiplication.")
a = _np.array(self._a,dtype=v_dtype)
_wrapper_expm_multiply(self._A.indptr,self._A.indices,self._A.data,
self._m_star,self._s,a,self._tol,self._mu,v,work_array)
return v
def _calculate_partition(self):
if _np.abs(self._a)*self._A_1_norm == 0:
self._m_star, self._s = 0, 1
else:
ell = 2
self._norm_info = LazyOperatorNormInfo(self._A, self._A_1_norm, self._a, ell=ell)
self._m_star, self._s = _fragment_3_1(self._norm_info, 1, self._tol, ell=ell)
##### code below is copied from scipy.sparse.linalg._expm_multiply_core and modified slightly.
# This table helps to compute bounds.
# They seem to have been difficult to calculate, involving symbolic
# manipulation of equations, followed by numerical root finding.
_theta = {
# The first 30 values are from table A.3 of Computing Matrix Functions.
1: 2.29e-16,
2: 2.58e-8,
3: 1.39e-5,
4: 3.40e-4,
5: 2.40e-3,
6: 9.07e-3,
7: 2.38e-2,
8: 5.00e-2,
9: 8.96e-2,
10: 1.44e-1,
# 11
11: 2.14e-1,
12: 3.00e-1,
13: 4.00e-1,
14: 5.14e-1,
15: 6.41e-1,
16: 7.81e-1,
17: 9.31e-1,
18: 1.09,
19: 1.26,
20: 1.44,
# 21
21: 1.62,
22: 1.82,
23: 2.01,
24: 2.22,
25: 2.43,
26: 2.64,
27: 2.86,
28: 3.08,
29: 3.31,
30: 3.54,
# The rest are from table 3.1 of
# Computing the Action of the Matrix Exponential.
35: 4.7,
40: 6.0,
45: 7.2,
50: 8.5,
55: 9.9,
}
class LazyOperatorNormInfo:
"""
Information about an operator is lazily computed.
The information includes the exact 1-norm of the operator,
in addition to estimates of 1-norms of powers of the operator.
This uses the notation of Computing the Action (2011).
This class is specialized enough to probably not be of general interest
outside of this module.
"""
def __init__(self, A, A_1_norm, a, ell=2):
"""
Provide the operator and some norm-related information.
Parameters
-----------
A : linear operator
The operator of interest.
A_1_norm : float
The exact 1-norm of A.
ell : int, optional
A technical parameter controlling norm estimation quality.
"""
self._A = A
self._a = a
self._A_1_norm = A_1_norm
self._ell = ell
self._d = {}
def onenorm(self):
"""
Compute the exact 1-norm.
"""
return _np.abs(self._a) * self._A_1_norm
def d(self, p):
"""
Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm.
"""
if p not in self._d:
est = onenormest((self._a*aslinearoperator(self._A))**p)
self._d[p] = est ** (1.0 / p)
return self._d[p]
def alpha(self, p):
"""
Lazily compute max(d(p), d(p+1)).
"""
return max(self.d(p), self.d(p+1))
def _compute_cost_div_m(m, p, norm_info):
"""
A helper function for computing bounds.
This is equation (3.10).
It measures cost in terms of the number of required matrix products.
Parameters
-----------
m : int
A valid key of _theta.
p : int
A matrix power.
norm_info : LazyOperatorNormInfo
Information about 1-norms of related operators.
Returns
--------
cost_div_m : int
Required number of matrix products divided by m.
"""
return int(_np.ceil(norm_info.alpha(p) / _theta[m]))
def _compute_p_max(m_max):
"""
Compute the largest positive integer p such that p*(p-1) <= m_max + 1.
Do this in a slightly dumb way, but safe and not too slow.
Parameters
-----------
m_max : int
A count related to bounds.
"""
sqrt_m_max = _np.sqrt(m_max)
p_low = int(_np.floor(sqrt_m_max))
p_high = int(_np.ceil(sqrt_m_max + 1))
return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1)
def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2):
"""
A helper function for the _expm_multiply_* functions.
Parameters
-----------
norm_info : LazyOperatorNormInfo
Information about norms of certain linear operators of interest.
n0 : int
Number of columns in the _expm_multiply_* B matrix.
tol : float
Expected to be
:math:`2^{-24}` for single precision or
:math:`2^{-53}` for double precision.
m_max : int
A value related to a bound.
ell : int
The number of columns used in the 1-norm approximation.
This is usually taken to be small, maybe between 1 and 5.
Returns
--------
best_m : int
Related to bounds for error control.
best_s : int
Amount of scaling.
Notes
-----
This is code fragment (3.1) in Al-Mohy and Higham (2011).
The discussion of default values for m_max and ell
is given between the definitions of equation (3.11)
and the definition of equation (3.12).
"""
if ell < 1:
raise ValueError('expected ell to be a positive integer')
best_m = None
best_s = None
if _condition_3_13(norm_info.onenorm(), n0, m_max, ell):
for m, theta in _theta.items():
s = int(_np.ceil(norm_info.onenorm() / theta))
if best_m is None or m * s < best_m * best_s:
best_m = m
best_s = s
else:
# Equation (3.11).
for p in range(2, _compute_p_max(m_max) + 1):
for m in range(p*(p-1)-1, m_max+1):
if m in _theta:
s = _compute_cost_div_m(m, p, norm_info)
if best_m is None or m * s < best_m * best_s:
best_m = m
best_s = s
best_s = max(best_s, 1)
return best_m, best_s
def _condition_3_13(A_1_norm, n0, m_max, ell):
"""
A helper function for the _expm_multiply_* functions.
Parameters
-----------
A_1_norm : float
The precomputed 1-norm of A.
n0 : int
Number of columns in the _expm_multiply_* B matrix.
m_max : int
A value related to a bound.
ell : int
The number of columns used in the 1-norm approximation.
This is usually taken to be small, maybe between 1 and 5.
Returns
--------
value : bool
Indicates whether or not the condition has been met.
Notes
-----
This is condition (3.13) in Al-Mohy and Higham (2011).
"""
# This is the rhs of equation (3.12).
p_max = _compute_p_max(m_max)
a = 2 * ell * p_max * (p_max + 3)
# Evaluate the condition (3.13).
b = _theta[m_max] / float(n0 * m_max)
return A_1_norm <= a * b
| 25.738272 | 154 | 0.671815 | from scipy.sparse.linalg import LinearOperator,onenormest,aslinearoperator
from .expm_multiply_parallel_wrapper import _wrapper_expm_multiply,_wrapper_csr_trace
import scipy.sparse as _sp
import numpy as _np
class expm_multiply_parallel(object):
def __init__(self,A,a=1.0):
if _np.array(a).ndim == 0:
self._a = a
else:
raise ValueError("a must be scalar value.")
self._A = _sp.csr_matrix(A,copy=False)
if A.shape[0] != A.shape[1]:
raise ValueError("A must be a square matrix.")
tol = _np.finfo(A.dtype).eps/2
self._tol = _np.array(tol,dtype=tol.dtype)
self._mu = _np.array(_wrapper_csr_trace(self._A.indptr,self._A.indices,self._A.data)/self._A.shape[0],dtype=A.dtype)
self._A -= self._mu * _sp.identity(self._A.shape[0],dtype=self._A.dtype,format="csr")
self._A_1_norm = _np.max(_np.abs(A).sum(axis=0))
self._calculate_partition()
@property
def a(self):
return self._a
@property
def A(self):
return self._A
def set_a(self,a):
if _np.array(a).ndim == 0:
self._a = a
self._calculate_partition()
else:
raise ValueError("expecting 'a' to be scalar.")
def dot(self,v,work_array=None,overwrite_v=False):
v = _np.asarray(v)
if v.ndim != 1:
raise ValueError("array must have ndim of 1.")
if v.shape[0] != self._A.shape[1]:
raise ValueError("dimension mismatch {}, {}".format(self._A.shape,v.shape))
a_dtype = _np.array(self._a).dtype
v_dtype = _np.result_type(self._A.dtype,a_dtype,v.dtype)
if overwrite_v:
if v_dtype != v.dtype:
raise ValueError("if overwrite_v is True, the input array must match correct output dtype for matrix multiplication.")
if not v.flags["CARRAY"]:
raise TypeError("input array must a contiguous and writable.")
if v.ndim != 1:
raise ValueError("array must have ndim of 1.")
else:
v = v.astype(v_dtype,order="C",copy=True)
if work_array is None:
work_array = _np.zeros((2*self._A.shape[0],),dtype=v.dtype)
else:
work_array = _np.ascontiguousarray(work_array)
if work_array.shape != (2*self._A.shape[0],):
raise ValueError("work_array array must be an array of shape (2*v.shape[0],) with same dtype as v.")
if work_array.dtype != v_dtype:
raise ValueError("work_array must be array of dtype which matches the result of the matrix-vector multiplication.")
a = _np.array(self._a,dtype=v_dtype)
_wrapper_expm_multiply(self._A.indptr,self._A.indices,self._A.data,
self._m_star,self._s,a,self._tol,self._mu,v,work_array)
return v
def _calculate_partition(self):
if _np.abs(self._a)*self._A_1_norm == 0:
self._m_star, self._s = 0, 1
else:
ell = 2
self._norm_info = LazyOperatorNormInfo(self._A, self._A_1_norm, self._a, ell=ell)
self._m_star, self._s = _fragment_3_1(self._norm_info, 1, self._tol, ell=ell)
3,
26: 2.64,
27: 2.86,
28: 3.08,
29: 3.31,
30: 3.54,
35: 4.7,
40: 6.0,
45: 7.2,
50: 8.5,
55: 9.9,
}
class LazyOperatorNormInfo:
def __init__(self, A, A_1_norm, a, ell=2):
self._A = A
self._a = a
self._A_1_norm = A_1_norm
self._ell = ell
self._d = {}
def onenorm(self):
return _np.abs(self._a) * self._A_1_norm
def d(self, p):
if p not in self._d:
est = onenormest((self._a*aslinearoperator(self._A))**p)
self._d[p] = est ** (1.0 / p)
return self._d[p]
def alpha(self, p):
return max(self.d(p), self.d(p+1))
def _compute_cost_div_m(m, p, norm_info):
return int(_np.ceil(norm_info.alpha(p) / _theta[m]))
def _compute_p_max(m_max):
sqrt_m_max = _np.sqrt(m_max)
p_low = int(_np.floor(sqrt_m_max))
p_high = int(_np.ceil(sqrt_m_max + 1))
return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1)
def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2):
if ell < 1:
raise ValueError('expected ell to be a positive integer')
best_m = None
best_s = None
if _condition_3_13(norm_info.onenorm(), n0, m_max, ell):
for m, theta in _theta.items():
s = int(_np.ceil(norm_info.onenorm() / theta))
if best_m is None or m * s < best_m * best_s:
best_m = m
best_s = s
else:
for p in range(2, _compute_p_max(m_max) + 1):
for m in range(p*(p-1)-1, m_max+1):
if m in _theta:
s = _compute_cost_div_m(m, p, norm_info)
if best_m is None or m * s < best_m * best_s:
best_m = m
best_s = s
best_s = max(best_s, 1)
return best_m, best_s
def _condition_3_13(A_1_norm, n0, m_max, ell):
p_max = _compute_p_max(m_max)
a = 2 * ell * p_max * (p_max + 3)
b = _theta[m_max] / float(n0 * m_max)
return A_1_norm <= a * b
| true | true |
f7f6fe58b1ade27a1089441821ab646de50cdb18 | 2,726 | py | Python | src/data/make_dataset.py | acbart/csedm20-paper-cs1-analysis | 0bd54894e8768caeadcc2b1bc2043d4282d4c6ed | [
"MIT"
] | null | null | null | src/data/make_dataset.py | acbart/csedm20-paper-cs1-analysis | 0bd54894e8768caeadcc2b1bc2043d4282d4c6ed | [
"MIT"
] | null | null | null | src/data/make_dataset.py | acbart/csedm20-paper-cs1-analysis | 0bd54894e8768caeadcc2b1bc2043d4282d4c6ed | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import os
import sqlite3
import pandas as pd
import json
from src.data.process_quizzes import process_quizzes
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
#logger.info('processing quizzes into database')
#process_quizzes(input_filepath, output_filepath)
logger.info('Loading consenting')
consenting = pd.read_csv(os.path.join(input_filepath, 'consenting.txt'), squeeze=True)
logger.info('Collecting demographics')
demographics = pd.read_csv(os.path.join(input_filepath, 'demographics.csv'))
logger.info('Getting pokemon')
pokemon = pd.read_csv(os.path.join(input_filepath, 'pokemon.txt'))
only_consenting = demographics[demographics.Email.isin(consenting)]
blockpy = pd.read_sql("""SELECT SubjectID, `X-Email` as Email
FROM LinkSubject""",
sqlite3.connect(os.path.join(output_filepath, "progsnap2_7.db")))
only_consenting = only_consenting.merge(blockpy, on='Email')
only_consenting.insert(1, 'pokemon', pokemon)
#only_consenting['pokemon'] = pokemon
logger.info('Remove identifying data')
del only_consenting['Email']
del only_consenting['ID']
del only_consenting['CanvasID']
only_consenting.to_csv(os.path.join(output_filepath, "subjects.csv"), index=False)
'''
# Deprecated in favor of just manually creating
with open(os.path.join(input_filepath, 'lock_dates.json')) as lock_dates_file:
lock_dates = json.load(lock_dates_file)
lock_dates = lock_dates['data']['course']['assignmentsConnection']['nodes']
lock_dates = [{'AssignmentId': ld['name'], 'lock': ld['lockAt']} for ld in lock_dates]
pd.DataFrame(lock_dates).to_csv(os.path.join(output_filepath, 'lock_dates.csv'), index=False)
'''
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| 37.861111 | 97 | 0.707263 |
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import os
import sqlite3
import pandas as pd
import json
from src.data.process_quizzes import process_quizzes
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
logger.info('Loading consenting')
consenting = pd.read_csv(os.path.join(input_filepath, 'consenting.txt'), squeeze=True)
logger.info('Collecting demographics')
demographics = pd.read_csv(os.path.join(input_filepath, 'demographics.csv'))
logger.info('Getting pokemon')
pokemon = pd.read_csv(os.path.join(input_filepath, 'pokemon.txt'))
only_consenting = demographics[demographics.Email.isin(consenting)]
blockpy = pd.read_sql("""SELECT SubjectID, `X-Email` as Email
FROM LinkSubject""",
sqlite3.connect(os.path.join(output_filepath, "progsnap2_7.db")))
only_consenting = only_consenting.merge(blockpy, on='Email')
only_consenting.insert(1, 'pokemon', pokemon)
logger.info('Remove identifying data')
del only_consenting['Email']
del only_consenting['ID']
del only_consenting['CanvasID']
only_consenting.to_csv(os.path.join(output_filepath, "subjects.csv"), index=False)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
project_dir = Path(__file__).resolve().parents[2]
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| true | true |
f7f6ff0700b23db94f28ae36bc0083c4b68b1b5f | 3,059 | py | Python | primitiveMining/primitiveTwitter.py | juleskt/SentimentOracle | f24ab9551aedbf08900de5f8cd5ac91805912937 | [
"MIT"
] | 1 | 2016-01-06T06:58:58.000Z | 2016-01-06T06:58:58.000Z | primitiveMining/primitiveTwitter.py | juleskt/SentimentOracle | f24ab9551aedbf08900de5f8cd5ac91805912937 | [
"MIT"
] | null | null | null | primitiveMining/primitiveTwitter.py | juleskt/SentimentOracle | f24ab9551aedbf08900de5f8cd5ac91805912937 | [
"MIT"
] | null | null | null | #Minqing Hu and Bing Liu. "Mining and Summarizing Customer Reviews."
#Proceedings of the ACM SIGKDD International Conference on Knowledge
#Discovery and Data Mining (KDD-2004), Aug 22-25, 2004, Seattle,
#Washington, USA,
#Bing Liu, Minqing Hu and Junsheng Cheng. "Opinion Observer: Analyzing
#and Comparing Opinions on the Web." Proceedings of the 14th
#International World Wide Web conference (WWW-2005), May 10-14,
#2005, Chiba, Japan.
from os import path
import twitter
from wordcloud import WordCloud
#Authentication for Twitter API
OAUTH_TOKEN = 'xxxx'
OAUTH_SECRET = 'xxxx'
CONSUMER_KEY = 'xxxx'
CONSUMER_SECRET = 'xxxx'
def binarySearch(alist,item):
first = 0
last = len(alist)-1
found = False
while first <= last and not found:
midpoint = (first + last)
if alist[midpoint] == item:
found = True
return found
else:
if item < alist[midpoint]:
last = midpoint-1
else:
first = midpoint+1
return found
def TwitterSearch(searchWord):
try:
# Create API object for calls
twitterApi = twitter.Api(
CONSUMER_KEY, CONSUMER_SECRET,
OAUTH_TOKEN,OAUTH_SECRET)
# Test results file
# Make a search call to the API
search = twitterApi.GetSearch(term=searchWord, lang='en', result_type='recent', count=100, max_id='')
search += twitterApi.GetSearch(term=searchWord, lang='en', result_type='popular', count=100, max_id='')
tweets = ""
for t in search:
# print t.user.screen_name + ' (' + t.created_at + ')'
# Combining the tweets
tweets += t.text.encode('utf-8')
# Writing to a textfile for later API use
# Split tweets by word
tweetByWord = tweets.split()
return tweetByWord
except Exception as e:
print e
def rateWordList(wordList):
rating = 0
posWords = open('positive-words.txt').read().splitlines()
negWords = open('negative-words.txt').read().splitlines()
f = open('results.txt','w')
# Parse words
for index in wordList:
# Set everything to lowercase to correctly binary search
index = index.lower()
# Make sure the word is not a link
if 'http' or 'RT' or 'rt' not in index:
# Test results file
# Remove non-alphabetic characters
f.write(index + '\n')
index = ''.join(ch for ch in index if ch.isalpha())
# Binary searching to find words, very basic and primitive mass-sentiment analysis
if binarySearch(posWords,index):
rating+=1
if binarySearch(negWords,index):
rating-=1
else:
wordList.remove(index)
print "Removed"
f.close()
return rating
def generateCloud():
d = path.dirname(__file__)
text = open(path.join(d,'results.txt')).read()
wordcloud = WordCloud().generate(text)
# Display the generated image:
import matplotlib.pyplot as plt
plt.imshow(wordcloud)
plt.axis("off")
# take relative word frequencies into account, lower max_font_size
wordcloud = WordCloud(max_font_size=40, relative_scaling=.5).generate(text)
plt.figure()
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
while(True):
searchTerm = raw_input("Search term: ")
print rateWordList(TwitterSearch(searchTerm))
# generateCloud()
| 26.833333 | 105 | 0.712651 |
#and Comparing Opinions on the Web." Proceedings of the 14th
from os import path
import twitter
from wordcloud import WordCloud
OAUTH_TOKEN = 'xxxx'
OAUTH_SECRET = 'xxxx'
CONSUMER_KEY = 'xxxx'
CONSUMER_SECRET = 'xxxx'
def binarySearch(alist,item):
first = 0
last = len(alist)-1
found = False
while first <= last and not found:
midpoint = (first + last)
if alist[midpoint] == item:
found = True
return found
else:
if item < alist[midpoint]:
last = midpoint-1
else:
first = midpoint+1
return found
def TwitterSearch(searchWord):
try:
twitterApi = twitter.Api(
CONSUMER_KEY, CONSUMER_SECRET,
OAUTH_TOKEN,OAUTH_SECRET)
search = twitterApi.GetSearch(term=searchWord, lang='en', result_type='recent', count=100, max_id='')
search += twitterApi.GetSearch(term=searchWord, lang='en', result_type='popular', count=100, max_id='')
tweets = ""
for t in search:
tweets += t.text.encode('utf-8')
tweetByWord = tweets.split()
return tweetByWord
except Exception as e:
print e
def rateWordList(wordList):
rating = 0
posWords = open('positive-words.txt').read().splitlines()
negWords = open('negative-words.txt').read().splitlines()
f = open('results.txt','w')
for index in wordList:
index = index.lower()
if 'http' or 'RT' or 'rt' not in index:
f.write(index + '\n')
index = ''.join(ch for ch in index if ch.isalpha())
if binarySearch(posWords,index):
rating+=1
if binarySearch(negWords,index):
rating-=1
else:
wordList.remove(index)
print "Removed"
f.close()
return rating
def generateCloud():
d = path.dirname(__file__)
text = open(path.join(d,'results.txt')).read()
wordcloud = WordCloud().generate(text)
import matplotlib.pyplot as plt
plt.imshow(wordcloud)
plt.axis("off")
wordcloud = WordCloud(max_font_size=40, relative_scaling=.5).generate(text)
plt.figure()
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
while(True):
searchTerm = raw_input("Search term: ")
print rateWordList(TwitterSearch(searchTerm))
| false | true |
f7f700d638e18614ca377341088ad873b1037c34 | 2,301 | py | Python | GPflow/testing/test_triang.py | mlilab/Mixed-Effect-Composite-RNN-Gaussian-Process | dd7da89ce3c41d459a26ad1ce5ed2f40ab4ca85d | [
"Apache-2.0"
] | 24 | 2018-11-29T07:00:59.000Z | 2021-04-22T19:12:31.000Z | GPflow/testing/test_triang.py | mlilab/Mixed-Effect-Composite-RNN-Gaussian-Process | dd7da89ce3c41d459a26ad1ce5ed2f40ab4ca85d | [
"Apache-2.0"
] | 1 | 2018-12-04T11:51:21.000Z | 2018-12-04T11:51:21.000Z | GPflow/testing/test_triang.py | OpenXAIProject/Mixed-Effect-Composite-RNN-Gaussian-Process | dd7da89ce3c41d459a26ad1ce5ed2f40ab4ca85d | [
"Apache-2.0"
] | 12 | 2018-11-30T00:40:13.000Z | 2019-10-30T16:09:52.000Z | import unittest
from gpflow.tf_wraps import vec_to_tri
import tensorflow as tf
import numpy as np
from testing.gpflow_testcase import GPflowTestCase
from gpflow.tf_wraps import vec_to_tri
class TestVecToTri(GPflowTestCase):
def referenceInverse(self, matrices):
#this is the inverse operation of the vec_to_tri
#op being tested.
D, N, _ = matrices.shape
M = (N * (N + 1)) // 2
tril_indices = np.tril_indices(N)
output = np.zeros((D, M))
for vector_index in range(D):
matrix = matrices[vector_index, :]
output[vector_index, :] = matrix[tril_indices]
return output
def getExampleMatrices(self, D, N ):
rng = np.random.RandomState(1)
random_matrices = rng.randn(D, N, N)
for matrix_index in range(D):
for row_index in range(N):
for col_index in range(N):
if col_index > row_index:
random_matrices[matrix_index, row_index, col_index] = 0.
return random_matrices
def testBasicFunctionality(self):
with self.test_session() as sess:
N = 3
D = 3
reference_matrices = self.getExampleMatrices(D, N)
input_vector_tensor = tf.constant(self.referenceInverse(reference_matrices))
test_matrices_tensor = vec_to_tri(input_vector_tensor, N)
test_matrices = sess.run(test_matrices_tensor)
np.testing.assert_array_almost_equal(reference_matrices, test_matrices)
def testDifferentiable(self):
with self.test_session() as sess:
N = 3
D = 3
reference_matrices = self.getExampleMatrices(D, N)
input_vector_array = self.referenceInverse(reference_matrices)
input_vector_tensor = tf.constant(input_vector_array)
test_matrices_tensor = vec_to_tri(input_vector_tensor, N)
reduced_sum = tf.reduce_sum(test_matrices_tensor)
gradient = tf.gradients(reduced_sum, input_vector_tensor)[0]
reference_gradient = np.ones_like(input_vector_array)
test_gradient = sess.run(gradient)
np.testing.assert_array_almost_equal(reference_gradient, test_gradient)
if __name__ == "__main__":
unittest.main()
| 37.721311 | 88 | 0.65189 | import unittest
from gpflow.tf_wraps import vec_to_tri
import tensorflow as tf
import numpy as np
from testing.gpflow_testcase import GPflowTestCase
from gpflow.tf_wraps import vec_to_tri
class TestVecToTri(GPflowTestCase):
def referenceInverse(self, matrices):
D, N, _ = matrices.shape
M = (N * (N + 1)) // 2
tril_indices = np.tril_indices(N)
output = np.zeros((D, M))
for vector_index in range(D):
matrix = matrices[vector_index, :]
output[vector_index, :] = matrix[tril_indices]
return output
def getExampleMatrices(self, D, N ):
rng = np.random.RandomState(1)
random_matrices = rng.randn(D, N, N)
for matrix_index in range(D):
for row_index in range(N):
for col_index in range(N):
if col_index > row_index:
random_matrices[matrix_index, row_index, col_index] = 0.
return random_matrices
def testBasicFunctionality(self):
with self.test_session() as sess:
N = 3
D = 3
reference_matrices = self.getExampleMatrices(D, N)
input_vector_tensor = tf.constant(self.referenceInverse(reference_matrices))
test_matrices_tensor = vec_to_tri(input_vector_tensor, N)
test_matrices = sess.run(test_matrices_tensor)
np.testing.assert_array_almost_equal(reference_matrices, test_matrices)
def testDifferentiable(self):
with self.test_session() as sess:
N = 3
D = 3
reference_matrices = self.getExampleMatrices(D, N)
input_vector_array = self.referenceInverse(reference_matrices)
input_vector_tensor = tf.constant(input_vector_array)
test_matrices_tensor = vec_to_tri(input_vector_tensor, N)
reduced_sum = tf.reduce_sum(test_matrices_tensor)
gradient = tf.gradients(reduced_sum, input_vector_tensor)[0]
reference_gradient = np.ones_like(input_vector_array)
test_gradient = sess.run(gradient)
np.testing.assert_array_almost_equal(reference_gradient, test_gradient)
if __name__ == "__main__":
unittest.main()
| true | true |
f7f70226d29b1929ac0dfeeb4fa929cf051f1d06 | 6,668 | py | Python | spacy/training/loggers.py | rynoV/spaCy | d8805a1073c01468e8b284ef7ca91eb80f37d237 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | spacy/training/loggers.py | rynoV/spaCy | d8805a1073c01468e8b284ef7ca91eb80f37d237 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | spacy/training/loggers.py | rynoV/spaCy | d8805a1073c01468e8b284ef7ca91eb80f37d237 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | from typing import TYPE_CHECKING, Dict, Any, Tuple, Callable, List, Optional, IO
from wasabi import Printer
import tqdm
import sys
from ..util import registry
from .. import util
from ..errors import Errors
if TYPE_CHECKING:
from ..language import Language # noqa: F401
def setup_table(
*, cols: List[str], widths: List[int], max_width: int = 13
) -> Tuple[List[str], List[int], List[str]]:
final_cols = []
final_widths = []
for col, width in zip(cols, widths):
if len(col) > max_width:
col = col[: max_width - 3] + "..." # shorten column if too long
final_cols.append(col.upper())
final_widths.append(max(len(col), width))
return final_cols, final_widths, ["r" for _ in final_widths]
@registry.loggers("spacy.ConsoleLogger.v1")
def console_logger(progress_bar: bool = False):
def setup_printer(
nlp: "Language", stdout: IO = sys.stdout, stderr: IO = sys.stderr
) -> Tuple[Callable[[Optional[Dict[str, Any]]], None], Callable[[], None]]:
write = lambda text: stdout.write(f"{text}\n")
msg = Printer(no_print=True)
# ensure that only trainable components are logged
logged_pipes = [
name
for name, proc in nlp.pipeline
if hasattr(proc, "is_trainable") and proc.is_trainable
]
eval_frequency = nlp.config["training"]["eval_frequency"]
score_weights = nlp.config["training"]["score_weights"]
score_cols = [col for col, value in score_weights.items() if value is not None]
loss_cols = [f"Loss {pipe}" for pipe in logged_pipes]
spacing = 2
table_header, table_widths, table_aligns = setup_table(
cols=["E", "#"] + loss_cols + score_cols + ["Score"],
widths=[3, 6] + [8 for _ in loss_cols] + [6 for _ in score_cols] + [6],
)
write(msg.row(table_header, widths=table_widths, spacing=spacing))
write(msg.row(["-" * width for width in table_widths], spacing=spacing))
progress = None
def log_step(info: Optional[Dict[str, Any]]) -> None:
nonlocal progress
if info is None:
# If we don't have a new checkpoint, just return.
if progress is not None:
progress.update(1)
return
losses = [
"{0:.2f}".format(float(info["losses"][pipe_name]))
for pipe_name in logged_pipes
]
scores = []
for col in score_cols:
score = info["other_scores"].get(col, 0.0)
try:
score = float(score)
except TypeError:
err = Errors.E916.format(name=col, score_type=type(score))
raise ValueError(err) from None
if col != "speed":
score *= 100
scores.append("{0:.2f}".format(score))
data = (
[info["epoch"], info["step"]]
+ losses
+ scores
+ ["{0:.2f}".format(float(info["score"]))]
)
if progress is not None:
progress.close()
write(
msg.row(data, widths=table_widths, aligns=table_aligns, spacing=spacing)
)
if progress_bar:
# Set disable=None, so that it disables on non-TTY
progress = tqdm.tqdm(
total=eval_frequency, disable=None, leave=False, file=stderr
)
progress.set_description(f"Epoch {info['epoch']+1}")
def finalize() -> None:
pass
return log_step, finalize
return setup_printer
@registry.loggers("spacy.WandbLogger.v2")
def wandb_logger(
project_name: str,
remove_config_values: List[str] = [],
model_log_interval: Optional[int] = None,
log_dataset_dir: Optional[str] = None,
):
try:
import wandb
# test that these are available
from wandb import init, log, join # noqa: F401
except ImportError:
raise ImportError(Errors.E880)
console = console_logger(progress_bar=False)
def setup_logger(
nlp: "Language", stdout: IO = sys.stdout, stderr: IO = sys.stderr
) -> Tuple[Callable[[Dict[str, Any]], None], Callable[[], None]]:
config = nlp.config.interpolate()
config_dot = util.dict_to_dot(config)
for field in remove_config_values:
del config_dot[field]
config = util.dot_to_dict(config_dot)
run = wandb.init(project=project_name, config=config, reinit=True)
console_log_step, console_finalize = console(nlp, stdout, stderr)
def log_dir_artifact(
path: str,
name: str,
type: str,
metadata: Optional[Dict[str, Any]] = {},
aliases: Optional[List[str]] = [],
):
dataset_artifact = wandb.Artifact(name, type=type, metadata=metadata)
dataset_artifact.add_dir(path, name=name)
wandb.log_artifact(dataset_artifact, aliases=aliases)
if log_dataset_dir:
log_dir_artifact(path=log_dataset_dir, name="dataset", type="dataset")
def log_step(info: Optional[Dict[str, Any]]):
console_log_step(info)
if info is not None:
score = info["score"]
other_scores = info["other_scores"]
losses = info["losses"]
wandb.log({"score": score})
if losses:
wandb.log({f"loss_{k}": v for k, v in losses.items()})
if isinstance(other_scores, dict):
wandb.log(other_scores)
if model_log_interval and info.get("output_path"):
if info["step"] % model_log_interval == 0 and info["step"] != 0:
log_dir_artifact(
path=info["output_path"],
name="pipeline_" + run.id,
type="checkpoint",
metadata=info,
aliases=[
f"epoch {info['epoch']} step {info['step']}",
"latest",
"best"
if info["score"] == max(info["checkpoints"])[0]
else "",
],
)
def finalize() -> None:
console_finalize()
wandb.join()
return log_step, finalize
return setup_logger
| 37.044444 | 88 | 0.535243 | from typing import TYPE_CHECKING, Dict, Any, Tuple, Callable, List, Optional, IO
from wasabi import Printer
import tqdm
import sys
from ..util import registry
from .. import util
from ..errors import Errors
if TYPE_CHECKING:
from ..language import Language
def setup_table(
*, cols: List[str], widths: List[int], max_width: int = 13
) -> Tuple[List[str], List[int], List[str]]:
final_cols = []
final_widths = []
for col, width in zip(cols, widths):
if len(col) > max_width:
col = col[: max_width - 3] + "..."
final_cols.append(col.upper())
final_widths.append(max(len(col), width))
return final_cols, final_widths, ["r" for _ in final_widths]
@registry.loggers("spacy.ConsoleLogger.v1")
def console_logger(progress_bar: bool = False):
def setup_printer(
nlp: "Language", stdout: IO = sys.stdout, stderr: IO = sys.stderr
) -> Tuple[Callable[[Optional[Dict[str, Any]]], None], Callable[[], None]]:
write = lambda text: stdout.write(f"{text}\n")
msg = Printer(no_print=True)
logged_pipes = [
name
for name, proc in nlp.pipeline
if hasattr(proc, "is_trainable") and proc.is_trainable
]
eval_frequency = nlp.config["training"]["eval_frequency"]
score_weights = nlp.config["training"]["score_weights"]
score_cols = [col for col, value in score_weights.items() if value is not None]
loss_cols = [f"Loss {pipe}" for pipe in logged_pipes]
spacing = 2
table_header, table_widths, table_aligns = setup_table(
cols=["E", "#"] + loss_cols + score_cols + ["Score"],
widths=[3, 6] + [8 for _ in loss_cols] + [6 for _ in score_cols] + [6],
)
write(msg.row(table_header, widths=table_widths, spacing=spacing))
write(msg.row(["-" * width for width in table_widths], spacing=spacing))
progress = None
def log_step(info: Optional[Dict[str, Any]]) -> None:
nonlocal progress
if info is None:
if progress is not None:
progress.update(1)
return
losses = [
"{0:.2f}".format(float(info["losses"][pipe_name]))
for pipe_name in logged_pipes
]
scores = []
for col in score_cols:
score = info["other_scores"].get(col, 0.0)
try:
score = float(score)
except TypeError:
err = Errors.E916.format(name=col, score_type=type(score))
raise ValueError(err) from None
if col != "speed":
score *= 100
scores.append("{0:.2f}".format(score))
data = (
[info["epoch"], info["step"]]
+ losses
+ scores
+ ["{0:.2f}".format(float(info["score"]))]
)
if progress is not None:
progress.close()
write(
msg.row(data, widths=table_widths, aligns=table_aligns, spacing=spacing)
)
if progress_bar:
# Set disable=None, so that it disables on non-TTY
progress = tqdm.tqdm(
total=eval_frequency, disable=None, leave=False, file=stderr
)
progress.set_description(f"Epoch {info['epoch']+1}")
def finalize() -> None:
pass
return log_step, finalize
return setup_printer
@registry.loggers("spacy.WandbLogger.v2")
def wandb_logger(
project_name: str,
remove_config_values: List[str] = [],
model_log_interval: Optional[int] = None,
log_dataset_dir: Optional[str] = None,
):
try:
import wandb
# test that these are available
from wandb import init, log, join # noqa: F401
except ImportError:
raise ImportError(Errors.E880)
console = console_logger(progress_bar=False)
def setup_logger(
nlp: "Language", stdout: IO = sys.stdout, stderr: IO = sys.stderr
) -> Tuple[Callable[[Dict[str, Any]], None], Callable[[], None]]:
config = nlp.config.interpolate()
config_dot = util.dict_to_dot(config)
for field in remove_config_values:
del config_dot[field]
config = util.dot_to_dict(config_dot)
run = wandb.init(project=project_name, config=config, reinit=True)
console_log_step, console_finalize = console(nlp, stdout, stderr)
def log_dir_artifact(
path: str,
name: str,
type: str,
metadata: Optional[Dict[str, Any]] = {},
aliases: Optional[List[str]] = [],
):
dataset_artifact = wandb.Artifact(name, type=type, metadata=metadata)
dataset_artifact.add_dir(path, name=name)
wandb.log_artifact(dataset_artifact, aliases=aliases)
if log_dataset_dir:
log_dir_artifact(path=log_dataset_dir, name="dataset", type="dataset")
def log_step(info: Optional[Dict[str, Any]]):
console_log_step(info)
if info is not None:
score = info["score"]
other_scores = info["other_scores"]
losses = info["losses"]
wandb.log({"score": score})
if losses:
wandb.log({f"loss_{k}": v for k, v in losses.items()})
if isinstance(other_scores, dict):
wandb.log(other_scores)
if model_log_interval and info.get("output_path"):
if info["step"] % model_log_interval == 0 and info["step"] != 0:
log_dir_artifact(
path=info["output_path"],
name="pipeline_" + run.id,
type="checkpoint",
metadata=info,
aliases=[
f"epoch {info['epoch']} step {info['step']}",
"latest",
"best"
if info["score"] == max(info["checkpoints"])[0]
else "",
],
)
def finalize() -> None:
console_finalize()
wandb.join()
return log_step, finalize
return setup_logger
| true | true |
f7f702355a4331722a6306d9e6cd4f2f9e0ddb0c | 1,496 | py | Python | pollination_streamlit/selectors.py | mostaphaRoudsari/pollination-streamlit | 5930ad3f8484301f145d0d646acafe6d1314ec8a | [
"Apache-2.0"
] | null | null | null | pollination_streamlit/selectors.py | mostaphaRoudsari/pollination-streamlit | 5930ad3f8484301f145d0d646acafe6d1314ec8a | [
"Apache-2.0"
] | 1 | 2022-03-16T12:28:08.000Z | 2022-03-16T12:28:08.000Z | pollination_streamlit/selectors.py | mostaphaRoudsari/pollination-streamlit | 5930ad3f8484301f145d0d646acafe6d1314ec8a | [
"Apache-2.0"
] | 3 | 2021-11-21T13:29:41.000Z | 2022-03-17T13:17:09.000Z | import streamlit as st
from .api.client import ApiClient
from .authentication import get_jwt_from_browser
from .interactors import Job, Run
def get_api_client(st_element: st = st) -> ApiClient:
client = ApiClient()
client.jwt_token = get_jwt_from_browser()
if client.jwt_token is None:
client.api_token = st_element.text_input(
'Enter Pollination APIKEY', type='password',
help=':bulb: You only need an API Key to access private projects. '
'If you do not have a key already go to the settings tab under your profile to '
'generate one.'
)
return client
def job_selector(
client: ApiClient, label: str = 'Job URL',
default: str = None, help: str = None
) -> Job:
job_url = st.text_input(label=label, value=default, help=help)
if not job_url or job_url == 'None':
return None
url_split = job_url.split('/')
job_id = url_split[-1]
project = url_split[-3]
owner = url_split[-4]
return Job(owner, project, job_id, client)
def run_selector(
client: ApiClient, label: str = 'Run URL',
default: str = None, help: str = None
) -> Run:
run_url = st.text_input(label=label, value=default, help=help)
if not run_url or run_url == 'None':
return None
url_split = run_url.split('/')
run_id = url_split[-1]
job_id = url_split[-3]
project = url_split[-5]
owner = url_split[-6]
return Run(owner, project, job_id, run_id, client)
| 28.769231 | 92 | 0.649733 | import streamlit as st
from .api.client import ApiClient
from .authentication import get_jwt_from_browser
from .interactors import Job, Run
def get_api_client(st_element: st = st) -> ApiClient:
client = ApiClient()
client.jwt_token = get_jwt_from_browser()
if client.jwt_token is None:
client.api_token = st_element.text_input(
'Enter Pollination APIKEY', type='password',
help=':bulb: You only need an API Key to access private projects. '
'If you do not have a key already go to the settings tab under your profile to '
'generate one.'
)
return client
def job_selector(
client: ApiClient, label: str = 'Job URL',
default: str = None, help: str = None
) -> Job:
job_url = st.text_input(label=label, value=default, help=help)
if not job_url or job_url == 'None':
return None
url_split = job_url.split('/')
job_id = url_split[-1]
project = url_split[-3]
owner = url_split[-4]
return Job(owner, project, job_id, client)
def run_selector(
client: ApiClient, label: str = 'Run URL',
default: str = None, help: str = None
) -> Run:
run_url = st.text_input(label=label, value=default, help=help)
if not run_url or run_url == 'None':
return None
url_split = run_url.split('/')
run_id = url_split[-1]
job_id = url_split[-3]
project = url_split[-5]
owner = url_split[-6]
return Run(owner, project, job_id, run_id, client)
| true | true |
f7f702a284d173171315a38138931e0c50c9b4f0 | 12,869 | py | Python | tpqoa/tpqoa.py | jordan-owen/tpqoa | 7f75b3399ec5b05ad8c543e95c815a76372db98d | [
"MIT"
] | null | null | null | tpqoa/tpqoa.py | jordan-owen/tpqoa | 7f75b3399ec5b05ad8c543e95c815a76372db98d | [
"MIT"
] | null | null | null | tpqoa/tpqoa.py | jordan-owen/tpqoa | 7f75b3399ec5b05ad8c543e95c815a76372db98d | [
"MIT"
] | 1 | 2021-11-20T00:48:42.000Z | 2021-11-20T00:48:42.000Z | #
# tpqoa is a wrapper class for the
# Oanda v20 API (RESTful & streaming)
# making use of the v20 Python package
#
# (c) Dr. Yves J. Hilpisch
# The Python Quants GmbH
#
#
# Trading forex/CFDs on margin carries a high level of risk and may
# not be suitable for all investors as you could sustain losses
# in excess of deposits. Leverage can work against you. Due to the certain
# restrictions imposed by the local law and regulation, German resident
# retail client(s) could sustain a total loss of deposited funds but are
# not subject to subsequent payment obligations beyond the deposited funds.
# Be aware and fully understand all risks associated with
# the market and trading. Prior to trading any products,
# carefully consider your financial situation and
# experience level. Any opinions, news, research, analyses, prices,
# or other information is provided as general market commentary, and does not
# constitute investment advice. The Python Quants GmbH will not accept
# liability for any loss or damage, including without limitation to,
# any loss of profit, which may arise directly or indirectly from use
# of or reliance on such information.
#
# The tpqoa package is intended as a technological illustration only.
# It comes with no warranties or representations,
# to the extent permitted by applicable law.
#
import v20
import json
import configparser
import pandas as pd
from v20.transaction import StopLossDetails, ClientExtensions
from v20.transaction import TrailingStopLossDetails, TakeProfitDetails
class tpqoa(object):
''' tpqoa is a Python wrapper class for the Oanda v20 API. '''
def __init__(self, conf_file):
''' Init function is expecting a configuration file with
the following content:
[oanda]
account_id = XYZ-ABC-...
access_token = ZYXCAB...
account_type = practice (default) or live
Parameters
==========
conf_file: string
path to and filename of the configuration file,
e.g. '/home/me/oanda.cfg'
'''
self.config = configparser.ConfigParser()
self.config.read(conf_file)
self.access_token = self.config['oanda']['access_token']
self.account_id = self.config['oanda']['account_id']
self.account_type = self.config['oanda']['account_type']
if self.account_type == 'live':
self.hostname = 'api-fxtrade.oanda.com'
self.stream_hostname = 'stream-fxtrade.oanda.com'
else:
self.hostname = 'api-fxpractice.oanda.com'
self.stream_hostname = 'stream-fxpractice.oanda.com'
self.ctx = v20.Context(
hostname=self.hostname,
port=443,
token=self.access_token,
poll_timeout=10
)
self.ctx_stream = v20.Context(
hostname=self.stream_hostname,
port=443,
token=self.access_token,
)
self.suffix = '.000000000Z'
self.stop_stream = False
def get_instruments(self):
''' Retrieves and returns all instruments for the given account. '''
resp = self.ctx.account.instruments(self.account_id)
instruments = resp.get('instruments')
instruments = [ins.dict() for ins in instruments]
instruments = [(ins['displayName'], ins['name'])
for ins in instruments]
return sorted(instruments)
def get_prices(self, instrument):
''' Returns the current BID/ASK prices for instrument. '''
r = self.ctx.pricing.get(self.account_id, instruments=instrument)
r = json.loads(r.raw_body)
bid = float(r['prices'][0]['closeoutBid'])
ask = float(r['prices'][0]['closeoutAsk'])
return r['time'], bid, ask
def transform_datetime(self, dati):
''' Transforms Python datetime object to string. '''
if isinstance(dati, str):
dati = pd.Timestamp(dati).to_pydatetime()
return dati.isoformat('T') + self.suffix
def retrieve_data(self, instrument, start, end, granularity, price):
raw = self.ctx.instrument.candles(
instrument=instrument,
fromTime=start, toTime=end,
granularity=granularity, price=price)
raw = raw.get('candles')
raw = [cs.dict() for cs in raw]
if price == 'A':
for cs in raw:
cs.update(cs['ask'])
del cs['ask']
elif price == 'B':
for cs in raw:
cs.update(cs['bid'])
del cs['bid']
elif price == 'M':
for cs in raw:
cs.update(cs['mid'])
del cs['mid']
else:
raise ValueError("price must be either 'B', 'A' or 'M'.")
if len(raw) == 0:
return pd.DataFrame() # return empty DataFrame if no data
data = pd.DataFrame(raw)
data['time'] = pd.to_datetime(data['time'])
data = data.set_index('time')
data.index = pd.DatetimeIndex(data.index)
for col in list('ohlc'):
data[col] = data[col].astype(float)
return data
def get_history(self, instrument, start, end,
granularity, price, localize=True):
''' Retrieves historical data for instrument.
Parameters
==========
instrument: string
valid instrument name
start, end: datetime, str
Python datetime or string objects for start and end
granularity: string
a string like 'S5', 'M1' or 'D'
price: string
one of 'A' (ask), 'B' (bid) or 'M' (middle)
Returns
=======
data: pd.DataFrame
pandas DataFrame object with data
'''
if granularity.startswith('S') or granularity.startswith('M'):
if granularity.startswith('S'):
freq = '1h'
else:
freq = 'D'
data = pd.DataFrame()
dr = pd.date_range(start, end, freq=freq)
for t in range(len(dr)):
batch_start = self.transform_datetime(dr[t])
if t != len(dr) - 1:
batch_end = self.transform_datetime(dr[t + 1])
else:
batch_end = self.transform_datetime(end)
batch = self.retrieve_data(instrument, batch_start, batch_end,
granularity, price)
data = data.append(batch)
else:
start = self.transform_datetime(start)
end = self.transform_datetime(end)
data = self.retrieve_data(instrument, start, end,
granularity, price)
if localize:
data.index = data.index.tz_localize(None)
return data[['o', 'h', 'l', 'c', 'volume', 'complete']]
def create_order(self, instrument, units, price=None, sl_distance=None,
tsl_distance=None, tp_price=None, comment=None,
touch=False, suppress=False, ret=False):
''' Places order with Oanda.
Parameters
==========
instrument: string
valid instrument name
units: int
number of units of instrument to be bought
(positive int, eg 'units=50')
or to be sold (negative int, eg 'units=-100')
price: float
limit order price, touch order price
sl_distance: float
stop loss distance price, mandatory eg in Germany
tsl_distance: float
trailing stop loss distance
tp_price: float
take profit price to be used for the trade
comment: str
string
touch: boolean
market_if_touched order (requires price to be set)
suppress: boolean
whether to suppress print out
ret: boolean
whether to return the order object
'''
client_ext = ClientExtensions(
comment=comment) if comment is not None else None
sl_details = (StopLossDetails(distance=sl_distance,
clientExtensions=client_ext)
if sl_distance is not None else None)
tsl_details = (TrailingStopLossDetails(distance=tsl_distance,
clientExtensions=client_ext)
if tsl_distance is not None else None)
tp_details = (TakeProfitDetails(
price=tp_price, clientExtensions=client_ext)
if tp_price is not None else None)
if price is None:
request = self.ctx.order.market(
self.account_id,
instrument=instrument,
units=units,
stopLossOnFill=sl_details,
trailingStopLossOnFill=tsl_details,
takeProfitOnFill=tp_details,
)
elif touch:
request = self.ctx.order.market_if_touched(
self.account_id,
instrument=instrument,
price=price,
units=units,
stopLossOnFill=sl_details,
trailingStopLossOnFill=tsl_details,
takeProfitOnFill=tp_details
)
else:
request = self.ctx.order.limit(
self.account_id,
instrument=instrument,
price=price,
units=units,
stopLossOnFill=sl_details,
trailingStopLossOnFill=tsl_details,
takeProfitOnFill=tp_details
)
try:
order = request.get('orderFillTransaction')
except Exception:
order = request.get('orderCreateTransaction')
if not suppress:
print('\n\n', order.dict(), '\n')
if ret is True:
return order.dict()
def stream_data(self, instrument, stop=None, ret=False):
''' Starts a real-time data stream.
Parameters
==========
instrument: string
valid instrument name
'''
self.stream_instrument = instrument
self.ticks = 0
response = self.ctx_stream.pricing.stream(
self.account_id, snapshot=True,
instruments=instrument)
msgs = []
for msg_type, msg in response.parts():
msgs.append(msg)
# print(msg_type, msg)
if msg_type == 'pricing.ClientPrice':
self.ticks += 1
self.time = msg.time
self.on_success(msg.time,
float(msg.bids[0].dict()['price']),
float(msg.asks[0].dict()['price']))
if stop is not None:
if self.ticks >= stop:
if ret:
return msgs
break
if self.stop_stream:
if ret:
return msgs
break
def on_success(self, time, bid, ask):
''' Method called when new data is retrieved. '''
print(time, bid, ask)
def get_account_summary(self, detailed=False):
''' Returns summary data for Oanda account.'''
if detailed is True:
response = self.ctx.account.get(self.account_id)
else:
response = self.ctx.account.summary(self.account_id)
raw = response.get('account')
return raw.dict()
def get_transaction(self, tid=0):
''' Retrieves and returns transaction data. '''
response = self.ctx.transaction.get(self.account_id, tid)
transaction = response.get('transaction')
return transaction.dict()
def get_transactions(self, tid=0):
''' Retrieves and returns transactions data. '''
response = self.ctx.transaction.since(self.account_id, id=tid)
transactions = response.get('transactions')
transactions = [t.dict() for t in transactions]
return transactions
def print_transactions(self, tid=0):
''' Prints basic transactions data. '''
transactions = self.get_transactions(tid)
for trans in transactions:
try:
templ = '%4s | %s | %7s | %12s | %8s'
print(templ % (trans['id'],
trans['time'][:-8],
trans['instrument'],
trans['units'],
trans['pl']))
except Exception:
pass
def get_positions(self):
''' Retrieves and returns positions data. '''
response = self.ctx.position.list_open(self.account_id).body
positions = [p.dict() for p in response.get('positions')]
return positions
| 37.51895 | 78 | 0.564535 |
import v20
import json
import configparser
import pandas as pd
from v20.transaction import StopLossDetails, ClientExtensions
from v20.transaction import TrailingStopLossDetails, TakeProfitDetails
class tpqoa(object):
def __init__(self, conf_file):
self.config = configparser.ConfigParser()
self.config.read(conf_file)
self.access_token = self.config['oanda']['access_token']
self.account_id = self.config['oanda']['account_id']
self.account_type = self.config['oanda']['account_type']
if self.account_type == 'live':
self.hostname = 'api-fxtrade.oanda.com'
self.stream_hostname = 'stream-fxtrade.oanda.com'
else:
self.hostname = 'api-fxpractice.oanda.com'
self.stream_hostname = 'stream-fxpractice.oanda.com'
self.ctx = v20.Context(
hostname=self.hostname,
port=443,
token=self.access_token,
poll_timeout=10
)
self.ctx_stream = v20.Context(
hostname=self.stream_hostname,
port=443,
token=self.access_token,
)
self.suffix = '.000000000Z'
self.stop_stream = False
def get_instruments(self):
resp = self.ctx.account.instruments(self.account_id)
instruments = resp.get('instruments')
instruments = [ins.dict() for ins in instruments]
instruments = [(ins['displayName'], ins['name'])
for ins in instruments]
return sorted(instruments)
def get_prices(self, instrument):
r = self.ctx.pricing.get(self.account_id, instruments=instrument)
r = json.loads(r.raw_body)
bid = float(r['prices'][0]['closeoutBid'])
ask = float(r['prices'][0]['closeoutAsk'])
return r['time'], bid, ask
def transform_datetime(self, dati):
if isinstance(dati, str):
dati = pd.Timestamp(dati).to_pydatetime()
return dati.isoformat('T') + self.suffix
def retrieve_data(self, instrument, start, end, granularity, price):
raw = self.ctx.instrument.candles(
instrument=instrument,
fromTime=start, toTime=end,
granularity=granularity, price=price)
raw = raw.get('candles')
raw = [cs.dict() for cs in raw]
if price == 'A':
for cs in raw:
cs.update(cs['ask'])
del cs['ask']
elif price == 'B':
for cs in raw:
cs.update(cs['bid'])
del cs['bid']
elif price == 'M':
for cs in raw:
cs.update(cs['mid'])
del cs['mid']
else:
raise ValueError("price must be either 'B', 'A' or 'M'.")
if len(raw) == 0:
return pd.DataFrame()
data = pd.DataFrame(raw)
data['time'] = pd.to_datetime(data['time'])
data = data.set_index('time')
data.index = pd.DatetimeIndex(data.index)
for col in list('ohlc'):
data[col] = data[col].astype(float)
return data
def get_history(self, instrument, start, end,
granularity, price, localize=True):
if granularity.startswith('S') or granularity.startswith('M'):
if granularity.startswith('S'):
freq = '1h'
else:
freq = 'D'
data = pd.DataFrame()
dr = pd.date_range(start, end, freq=freq)
for t in range(len(dr)):
batch_start = self.transform_datetime(dr[t])
if t != len(dr) - 1:
batch_end = self.transform_datetime(dr[t + 1])
else:
batch_end = self.transform_datetime(end)
batch = self.retrieve_data(instrument, batch_start, batch_end,
granularity, price)
data = data.append(batch)
else:
start = self.transform_datetime(start)
end = self.transform_datetime(end)
data = self.retrieve_data(instrument, start, end,
granularity, price)
if localize:
data.index = data.index.tz_localize(None)
return data[['o', 'h', 'l', 'c', 'volume', 'complete']]
def create_order(self, instrument, units, price=None, sl_distance=None,
tsl_distance=None, tp_price=None, comment=None,
touch=False, suppress=False, ret=False):
client_ext = ClientExtensions(
comment=comment) if comment is not None else None
sl_details = (StopLossDetails(distance=sl_distance,
clientExtensions=client_ext)
if sl_distance is not None else None)
tsl_details = (TrailingStopLossDetails(distance=tsl_distance,
clientExtensions=client_ext)
if tsl_distance is not None else None)
tp_details = (TakeProfitDetails(
price=tp_price, clientExtensions=client_ext)
if tp_price is not None else None)
if price is None:
request = self.ctx.order.market(
self.account_id,
instrument=instrument,
units=units,
stopLossOnFill=sl_details,
trailingStopLossOnFill=tsl_details,
takeProfitOnFill=tp_details,
)
elif touch:
request = self.ctx.order.market_if_touched(
self.account_id,
instrument=instrument,
price=price,
units=units,
stopLossOnFill=sl_details,
trailingStopLossOnFill=tsl_details,
takeProfitOnFill=tp_details
)
else:
request = self.ctx.order.limit(
self.account_id,
instrument=instrument,
price=price,
units=units,
stopLossOnFill=sl_details,
trailingStopLossOnFill=tsl_details,
takeProfitOnFill=tp_details
)
try:
order = request.get('orderFillTransaction')
except Exception:
order = request.get('orderCreateTransaction')
if not suppress:
print('\n\n', order.dict(), '\n')
if ret is True:
return order.dict()
def stream_data(self, instrument, stop=None, ret=False):
self.stream_instrument = instrument
self.ticks = 0
response = self.ctx_stream.pricing.stream(
self.account_id, snapshot=True,
instruments=instrument)
msgs = []
for msg_type, msg in response.parts():
msgs.append(msg)
if msg_type == 'pricing.ClientPrice':
self.ticks += 1
self.time = msg.time
self.on_success(msg.time,
float(msg.bids[0].dict()['price']),
float(msg.asks[0].dict()['price']))
if stop is not None:
if self.ticks >= stop:
if ret:
return msgs
break
if self.stop_stream:
if ret:
return msgs
break
def on_success(self, time, bid, ask):
print(time, bid, ask)
def get_account_summary(self, detailed=False):
if detailed is True:
response = self.ctx.account.get(self.account_id)
else:
response = self.ctx.account.summary(self.account_id)
raw = response.get('account')
return raw.dict()
def get_transaction(self, tid=0):
response = self.ctx.transaction.get(self.account_id, tid)
transaction = response.get('transaction')
return transaction.dict()
def get_transactions(self, tid=0):
response = self.ctx.transaction.since(self.account_id, id=tid)
transactions = response.get('transactions')
transactions = [t.dict() for t in transactions]
return transactions
def print_transactions(self, tid=0):
transactions = self.get_transactions(tid)
for trans in transactions:
try:
templ = '%4s | %s | %7s | %12s | %8s'
print(templ % (trans['id'],
trans['time'][:-8],
trans['instrument'],
trans['units'],
trans['pl']))
except Exception:
pass
def get_positions(self):
response = self.ctx.position.list_open(self.account_id).body
positions = [p.dict() for p in response.get('positions')]
return positions
| true | true |
f7f703284c94234e0f7dc5f03b9978d6ef9405d0 | 4,116 | py | Python | samples/basic/executor/models/cisco-ios-xr/Cisco-IOS-XR-ping-act/nc-execute-xr-ping-act-28-ydk.py | deom119/ydk-py-samples | 1ad6cc2b798f358ff835df93d12924df308b85fc | [
"Apache-2.0"
] | 104 | 2016-03-15T17:04:01.000Z | 2021-12-31T06:09:35.000Z | samples/basic/executor/models/cisco-ios-xr/Cisco-IOS-XR-ping-act/nc-execute-xr-ping-act-28-ydk.py | https-maxus-github-com/ydk-py-samples | 1ad6cc2b798f358ff835df93d12924df308b85fc | [
"Apache-2.0"
] | 15 | 2016-03-15T23:09:47.000Z | 2020-08-13T12:13:18.000Z | samples/basic/executor/models/cisco-ios-xr/Cisco-IOS-XR-ping-act/nc-execute-xr-ping-act-28-ydk.py | https-maxus-github-com/ydk-py-samples | 1ad6cc2b798f358ff835df93d12924df308b85fc | [
"Apache-2.0"
] | 87 | 2016-04-15T16:59:23.000Z | 2021-09-18T18:05:47.000Z | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Execute RPC for model Cisco-IOS-XR-ping-act.
usage: nc-execute-xr-ping-act-28-ydk.py [-h] [-v] device
positional arguments:
device NETCONF device (ssh://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import ExecutorService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ping_act \
as xr_ping_act
import logging
def prepare_ping(ping):
"""Add RPC input data to ping object."""
ping.input.destination.destination = '10.0.0.1'
ping.input.destination.vrf_name = 'RED'
def process_ping(ping):
"""Process data in RPC output object."""
# format string for reply header
ping_reply_header = ('Sending 5, 100-byte ICMP Echos to {destination}, '
'timeout is 2 seconds:\n')
# format string for reply trailer
ping_reply_trailer = ('\nSuccess rate is {success_rate} percent '
'({hits}/{total}), '
'round-trip min/avg/max = {rtt_min}/{rtt_avg}/{rtt_max} ms')
ping_response = ping.output.ping_response
ping_reply = ping_reply_header.format(destination=ping_response.ipv4[0].destination)
# iterate over all replies
for reply in ping_response.ipv4[0].replies.reply:
ping_reply += reply.result
ping_reply += ping_reply_trailer.format(success_rate=ping_response.ipv4[0].success_rate,
hits=ping_response.ipv4[0].hits,
total=ping_response.ipv4[0].total,
rtt_min=ping_response.ipv4[0].rtt_min,
rtt_avg=ping_response.ipv4[0].rtt_avg,
rtt_max=ping_response.ipv4[0].rtt_max,
)
# return formated string
return ping_reply
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
# create executor service
executor = ExecutorService()
ping = xr_ping_act.Ping() # create object
prepare_ping(ping) # add RPC input
# execute RPC on NETCONF device
ping.output = executor.execute_rpc(provider, ping)
print(process_ping(ping))
exit()
# End of script
| 35.791304 | 92 | 0.619776 |
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import ExecutorService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ping_act \
as xr_ping_act
import logging
def prepare_ping(ping):
ping.input.destination.destination = '10.0.0.1'
ping.input.destination.vrf_name = 'RED'
def process_ping(ping):
ping_reply_header = ('Sending 5, 100-byte ICMP Echos to {destination}, '
'timeout is 2 seconds:\n')
ping_reply_trailer = ('\nSuccess rate is {success_rate} percent '
'({hits}/{total}), '
'round-trip min/avg/max = {rtt_min}/{rtt_avg}/{rtt_max} ms')
ping_response = ping.output.ping_response
ping_reply = ping_reply_header.format(destination=ping_response.ipv4[0].destination)
for reply in ping_response.ipv4[0].replies.reply:
ping_reply += reply.result
ping_reply += ping_reply_trailer.format(success_rate=ping_response.ipv4[0].success_rate,
hits=ping_response.ipv4[0].hits,
total=ping_response.ipv4[0].total,
rtt_min=ping_response.ipv4[0].rtt_min,
rtt_avg=ping_response.ipv4[0].rtt_avg,
rtt_max=ping_response.ipv4[0].rtt_max,
)
return ping_reply
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
executor = ExecutorService()
ping = xr_ping_act.Ping()
prepare_ping(ping)
ping.output = executor.execute_rpc(provider, ping)
print(process_ping(ping))
exit()
| true | true |
f7f703600a5cd0bd2ae5cdcd72a2f6b613ba0d2a | 29,689 | py | Python | test_tablib.py | rhunwicks/tablib | bbdf5f11ab0c77e0b8907c593cdd73e287c2948d | [
"MIT"
] | null | null | null | test_tablib.py | rhunwicks/tablib | bbdf5f11ab0c77e0b8907c593cdd73e287c2948d | [
"MIT"
] | null | null | null | test_tablib.py | rhunwicks/tablib | bbdf5f11ab0c77e0b8907c593cdd73e287c2948d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for Tablib."""
import json
import unittest
import sys
import os
import tablib
from tablib.compat import markup, unicode, is_py3
from tablib.core import Row
class TablibTestCase(unittest.TestCase):
"""Tablib test cases."""
def setUp(self):
"""Create simple data set with headers."""
global data, book
data = tablib.Dataset()
book = tablib.Databook()
self.headers = ('first_name', 'last_name', 'gpa')
self.john = ('John', 'Adams', 90)
self.george = ('George', 'Washington', 67)
self.tom = ('Thomas', 'Jefferson', 50)
self.founders = tablib.Dataset(headers=self.headers, title='Founders')
self.founders.append(self.john)
self.founders.append(self.george)
self.founders.append(self.tom)
def tearDown(self):
"""Teardown."""
pass
def test_empty_append(self):
"""Verify append() correctly adds tuple with no headers."""
new_row = (1, 2, 3)
data.append(new_row)
# Verify width/data
self.assertTrue(data.width == len(new_row))
self.assertTrue(data[0] == new_row)
def test_empty_append_with_headers(self):
"""Verify append() correctly detects mismatch of number of
headers and data.
"""
data.headers = ['first', 'second']
new_row = (1, 2, 3, 4)
self.assertRaises(tablib.InvalidDimensions, data.append, new_row)
def test_set_headers_with_incorrect_dimension(self):
"""Verify headers correctly detects mismatch of number of
headers and data.
"""
data.append(self.john)
def set_header_callable():
data.headers = ['first_name']
self.assertRaises(tablib.InvalidDimensions, set_header_callable)
def test_add_column(self):
"""Verify adding column works with/without headers."""
data.append(['kenneth'])
data.append(['bessie'])
new_col = ['reitz', 'monke']
data.append_col(new_col)
self.assertEqual(data[0], ('kenneth', 'reitz'))
self.assertEqual(data.width, 2)
# With Headers
data.headers = ('fname', 'lname')
new_col = [21, 22]
data.append_col(new_col, header='age')
self.assertEqual(data['age'], new_col)
def test_add_column_no_data_no_headers(self):
"""Verify adding new column with no headers."""
new_col = ('reitz', 'monke')
data.append_col(new_col)
self.assertEqual(data[0], tuple([new_col[0]]))
self.assertEqual(data.width, 1)
self.assertEqual(data.height, len(new_col))
def test_add_column_with_header_ignored(self):
"""Verify append_col() ignores the header if data.headers has
not previously been set
"""
new_col = ('reitz', 'monke')
data.append_col(new_col, header='first_name')
self.assertEqual(data[0], tuple([new_col[0]]))
self.assertEqual(data.width, 1)
self.assertEqual(data.height, len(new_col))
self.assertEqual(data.headers, None)
def test_add_column_with_header_and_headers_only_exist(self):
"""Verify append_col() with header correctly detects mismatch when
headers exist but there is no existing row data
"""
data.headers = ['first_name']
#no data
new_col = ('allen')
def append_col_callable():
data.append_col(new_col, header='middle_name')
self.assertRaises(tablib.InvalidDimensions, append_col_callable)
def test_add_column_with_header_and_data_exists(self):
"""Verify append_col() works when headers and rows exists"""
data.headers = self.headers
data.append(self.john)
new_col = [10];
data.append_col(new_col, header='age')
self.assertEqual(data.height, 1)
self.assertEqual(data.width, len(self.john) + 1)
self.assertEqual(data['age'], new_col)
self.assertEqual(len(data.headers), len(self.headers) + 1)
def test_add_callable_column(self):
"""Verify adding column with values specified as callable."""
new_col = lambda x: x[0]
self.founders.append_col(new_col, header='first_again')
def test_header_slicing(self):
"""Verify slicing by headers."""
self.assertEqual(self.founders['first_name'],
[self.john[0], self.george[0], self.tom[0]])
self.assertEqual(self.founders['last_name'],
[self.john[1], self.george[1], self.tom[1]])
self.assertEqual(self.founders['gpa'],
[self.john[2], self.george[2], self.tom[2]])
def test_get_col(self):
"""Verify getting columns by index"""
self.assertEqual(
self.founders.get_col(list(self.headers).index('first_name')),
[self.john[0], self.george[0], self.tom[0]])
self.assertEqual(
self.founders.get_col(list(self.headers).index('last_name')),
[self.john[1], self.george[1], self.tom[1]])
self.assertEqual(
self.founders.get_col(list(self.headers).index('gpa')),
[self.john[2], self.george[2], self.tom[2]])
def test_data_slicing(self):
"""Verify slicing by data."""
# Slice individual rows
self.assertEqual(self.founders[0], self.john)
self.assertEqual(self.founders[:1], [self.john])
self.assertEqual(self.founders[1:2], [self.george])
self.assertEqual(self.founders[-1], self.tom)
self.assertEqual(self.founders[3:], [])
# Slice multiple rows
self.assertEqual(self.founders[:], [self.john, self.george, self.tom])
self.assertEqual(self.founders[0:2], [self.john, self.george])
self.assertEqual(self.founders[1:3], [self.george, self.tom])
self.assertEqual(self.founders[2:], [self.tom])
def test_row_slicing(self):
"""Verify Row's __getslice__ method. Issue #184."""
john = Row(self.john)
self.assertEqual(john[:], list(self.john[:]))
self.assertEqual(john[0:], list(self.john[0:]))
self.assertEqual(john[:2], list(self.john[:2]))
self.assertEqual(john[0:2], list(self.john[0:2]))
self.assertEqual(john[0:-1], list(self.john[0:-1]))
def test_delete(self):
"""Verify deleting from dataset works."""
# Delete from front of object
del self.founders[0]
self.assertEqual(self.founders[:], [self.george, self.tom])
# Verify dimensions, width should NOT change
self.assertEqual(self.founders.height, 2)
self.assertEqual(self.founders.width, 3)
# Delete from back of object
del self.founders[1]
self.assertEqual(self.founders[:], [self.george])
# Verify dimensions, width should NOT change
self.assertEqual(self.founders.height, 1)
self.assertEqual(self.founders.width, 3)
# Delete from invalid index
self.assertRaises(IndexError, self.founders.__delitem__, 3)
def test_csv_export(self):
"""Verify exporting dataset object as CSV."""
# Build up the csv string with headers first, followed by each row
csv = ''
for col in self.headers:
csv += col + ','
csv = csv.strip(',') + '\r\n'
for founder in self.founders:
for col in founder:
csv += str(col) + ','
csv = csv.strip(',') + '\r\n'
self.assertEqual(csv, self.founders.csv)
def test_tsv_export(self):
"""Verify exporting dataset object as TSV."""
# Build up the tsv string with headers first, followed by each row
tsv = ''
for col in self.headers:
tsv += col + '\t'
tsv = tsv.strip('\t') + '\r\n'
for founder in self.founders:
for col in founder:
tsv += str(col) + '\t'
tsv = tsv.strip('\t') + '\r\n'
self.assertEqual(tsv, self.founders.tsv)
def test_html_export(self):
"""HTML export"""
html = markup.page()
html.table.open()
html.thead.open()
html.tr(markup.oneliner.th(self.founders.headers))
html.thead.close()
for founder in self.founders:
html.tr(markup.oneliner.td(founder))
html.table.close()
html = str(html)
self.assertEqual(html, self.founders.html)
def test_html_export_none_value(self):
"""HTML export"""
html = markup.page()
html.table.open()
html.thead.open()
html.tr(markup.oneliner.th(['foo','', 'bar']))
html.thead.close()
html.tr(markup.oneliner.td(['foo','', 'bar']))
html.table.close()
html = str(html)
headers = ['foo', None, 'bar'];
d = tablib.Dataset(['foo', None, 'bar'], headers=headers)
self.assertEqual(html, d.html)
def test_latex_export(self):
"""LaTeX export"""
expected = """\
% Note: add \\usepackage{booktabs} to your preamble
%
\\begin{table}[!htbp]
\\centering
\\caption{Founders}
\\begin{tabular}{lrr}
\\toprule
first\\_name & last\\_name & gpa \\\\
\\cmidrule(r){1-1} \\cmidrule(lr){2-2} \\cmidrule(l){3-3}
John & Adams & 90 \\\\
George & Washington & 67 \\\\
Thomas & Jefferson & 50 \\\\
\\bottomrule
\\end{tabular}
\\end{table}
"""
output = self.founders.latex
self.assertEqual(output, expected)
def test_latex_export_empty_dataset(self):
self.assertTrue(tablib.Dataset().latex is not None)
def test_latex_export_no_headers(self):
d = tablib.Dataset()
d.append(('one', 'two', 'three'))
self.assertTrue('one' in d.latex)
def test_latex_export_caption(self):
d = tablib.Dataset()
d.append(('one', 'two', 'three'))
self.assertFalse('caption' in d.latex)
d.title = 'Title'
self.assertTrue('\\caption{Title}' in d.latex)
def test_latex_export_none_values(self):
headers = ['foo', None, 'bar']
d = tablib.Dataset(['foo', None, 'bar'], headers=headers)
output = d.latex
self.assertTrue('foo' in output)
self.assertFalse('None' in output)
def test_latex_escaping(self):
d = tablib.Dataset(['~', '^'])
output = d.latex
self.assertFalse('~' in output)
self.assertTrue('textasciitilde' in output)
self.assertFalse('^' in output)
self.assertTrue('textasciicircum' in output)
def test_unicode_append(self):
"""Passes in a single unicode character and exports."""
if is_py3:
new_row = ('å', 'é')
else:
exec("new_row = (u'å', u'é')")
data.append(new_row)
data.json
data.yaml
data.csv
data.tsv
data.xls
data.xlsx
data.ods
data.html
data.latex
def test_book_export_no_exceptions(self):
"""Test that various exports don't error out."""
book = tablib.Databook()
book.add_sheet(data)
book.json
book.yaml
book.xls
book.xlsx
book.ods
def test_json_import_set(self):
"""Generate and import JSON set serialization."""
data.append(self.john)
data.append(self.george)
data.headers = self.headers
_json = data.json
data.json = _json
self.assertEqual(json.loads(_json), json.loads(data.json))
def test_json_import_book(self):
"""Generate and import JSON book serialization."""
data.append(self.john)
data.append(self.george)
data.headers = self.headers
book.add_sheet(data)
_json = book.json
book.json = _json
self.assertEqual(json.loads(_json), json.loads(book.json))
def test_yaml_import_set(self):
"""Generate and import YAML set serialization."""
data.append(self.john)
data.append(self.george)
data.headers = self.headers
_yaml = data.yaml
data.yaml = _yaml
self.assertEqual(_yaml, data.yaml)
def test_yaml_import_book(self):
"""Generate and import YAML book serialization."""
data.append(self.john)
data.append(self.george)
data.headers = self.headers
book.add_sheet(data)
_yaml = book.yaml
book.yaml = _yaml
self.assertEqual(_yaml, book.yaml)
def test_csv_import_set(self):
"""Generate and import CSV set serialization."""
data.append(self.john)
data.append(self.george)
data.headers = self.headers
_csv = data.csv
data.csv = _csv
self.assertEqual(_csv, data.csv)
def test_csv_import_set_semicolons(self):
"""Test for proper output with semicolon separated CSV."""
data.append(self.john)
data.append(self.george)
data.headers = self.headers
_csv = data.get_csv(delimiter=';')
data.set_csv(_csv, delimiter=';')
self.assertEqual(_csv, data.get_csv(delimiter=';'))
def test_csv_import_set_with_spaces(self):
"""Generate and import CSV set serialization when row values have
spaces."""
data.append(('Bill Gates', 'Microsoft'))
data.append(('Steve Jobs', 'Apple'))
data.headers = ('Name', 'Company')
_csv = data.csv
data.csv = _csv
self.assertEqual(_csv, data.csv)
def test_csv_import_set_semicolon_with_spaces(self):
"""Generate and import semicolon separated CSV set serialization when row values have
spaces."""
data.append(('Bill Gates', 'Microsoft'))
data.append(('Steve Jobs', 'Apple'))
data.headers = ('Name', 'Company')
_csv = data.get_csv(delimiter=';')
data.set_csv(_csv, delimiter=';')
self.assertEqual(_csv, data.get_csv(delimiter=';'))
def test_csv_import_set_with_newlines(self):
"""Generate and import CSV set serialization when row values have
newlines."""
data.append(('Markdown\n=======',
'A cool language\n\nwith paragraphs'))
data.append(('reStructedText\n==============',
'Another cool language\n\nwith paragraphs'))
data.headers = ('title', 'body')
_csv = data.csv
data.csv = _csv
self.assertEqual(_csv, data.csv)
def test_tsv_import_set(self):
"""Generate and import TSV set serialization."""
data.append(self.john)
data.append(self.george)
data.headers = self.headers
_tsv = data.tsv
data.tsv = _tsv
self.assertEqual(_tsv, data.tsv)
def test_dbf_import_set(self):
data.append(self.john)
data.append(self.george)
data.headers = self.headers
_dbf = data.dbf
data.dbf = _dbf
#self.assertEqual(_dbf, data.dbf)
try:
self.assertEqual(_dbf, data.dbf)
except AssertionError:
index = 0
so_far = ''
for reg_char, data_char in zip(_dbf, data.dbf):
so_far += chr(data_char)
if reg_char != data_char and index not in [1, 2, 3]:
raise AssertionError('Failing at char %s: %s vs %s %s' % (
index, reg_char, data_char, so_far))
index += 1
def test_dbf_export_set(self):
"""Test DBF import."""
data.append(self.john)
data.append(self.george)
data.append(self.tom)
data.headers = self.headers
_regression_dbf = (b'\x03r\x06\x06\x03\x00\x00\x00\x81\x00\xab\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00FIRST_NAME\x00C\x00\x00\x00\x00P\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00LAST_NAME\x00\x00C\x00'
b'\x00\x00\x00P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00GPA\x00\x00\x00\x00\x00\x00\x00\x00N\x00\x00\x00\x00\n'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r'
)
_regression_dbf += b' John' + (b' ' * 75)
_regression_dbf += b' Adams' + (b' ' * 74)
_regression_dbf += b' 90.0000000'
_regression_dbf += b' George' + (b' ' * 73)
_regression_dbf += b' Washington' + (b' ' * 69)
_regression_dbf += b' 67.0000000'
_regression_dbf += b' Thomas' + (b' ' * 73)
_regression_dbf += b' Jefferson' + (b' ' * 70)
_regression_dbf += b' 50.0000000'
_regression_dbf += b'\x1a'
if is_py3:
# If in python3, decode regression string to binary.
#_regression_dbf = bytes(_regression_dbf, 'utf-8')
#_regression_dbf = _regression_dbf.replace(b'\n', b'\r')
pass
try:
self.assertEqual(_regression_dbf, data.dbf)
except AssertionError:
index = 0
found_so_far = ''
for reg_char, data_char in zip(_regression_dbf, data.dbf):
#found_so_far += chr(data_char)
if reg_char != data_char and index not in [1, 2, 3]:
raise AssertionError(
'Failing at char %s: %s vs %s (found %s)' % (
index, reg_char, data_char, found_so_far))
index += 1
def test_dbf_format_detect(self):
"""Test the DBF format detection."""
_dbf = (b'\x03r\x06\x03\x03\x00\x00\x00\x81\x00\xab\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00FIRST_NAME\x00C\x00\x00\x00\x00P\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00LAST_NAME\x00\x00C\x00'
b'\x00\x00\x00P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00GPA\x00\x00\x00\x00\x00\x00\x00\x00N\x00\x00\x00\x00\n'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r'
)
_dbf += b' John' + (b' ' * 75)
_dbf += b' Adams' + (b' ' * 74)
_dbf += b' 90.0000000'
_dbf += b' George' + (b' ' * 73)
_dbf += b' Washington' + (b' ' * 69)
_dbf += b' 67.0000000'
_dbf += b' Thomas' + (b' ' * 73)
_dbf += b' Jefferson' + (b' ' * 70)
_dbf += b' 50.0000000'
_dbf += b'\x1a'
_yaml = '- {age: 90, first_name: John, last_name: Adams}'
_tsv = 'foo\tbar'
_csv = '1,2,3\n4,5,6\n7,8,9\n'
_json = '[{"last_name": "Adams","age": 90,"first_name": "John"}]'
_bunk = (
'¡¡¡¡¡¡¡¡£™∞¢£§∞§¶•¶ª∞¶•ªº••ª–º§•†•§º¶•†¥ª–º•§ƒø¥¨©πƒø†ˆ¥ç©¨√øˆ¥≈†ƒ¥ç©ø¨çˆ¥ƒçø¶'
)
self.assertTrue(tablib.formats.dbf.detect(_dbf))
self.assertFalse(tablib.formats.dbf.detect(_yaml))
self.assertFalse(tablib.formats.dbf.detect(_tsv))
self.assertFalse(tablib.formats.dbf.detect(_csv))
self.assertFalse(tablib.formats.dbf.detect(_json))
self.assertFalse(tablib.formats.dbf.detect(_bunk))
def test_csv_format_detect(self):
"""Test CSV format detection."""
_csv = (
'1,2,3\n'
'4,5,6\n'
'7,8,9\n'
)
_bunk = (
'¡¡¡¡¡¡¡¡£™∞¢£§∞§¶•¶ª∞¶•ªº••ª–º§•†•§º¶•†¥ª–º•§ƒø¥¨©πƒø†ˆ¥ç©¨√øˆ¥≈†ƒ¥ç©ø¨çˆ¥ƒçø¶'
)
self.assertTrue(tablib.formats.csv.detect(_csv))
self.assertFalse(tablib.formats.csv.detect(_bunk))
def test_tsv_format_detect(self):
"""Test TSV format detection."""
_tsv = (
'1\t2\t3\n'
'4\t5\t6\n'
'7\t8\t9\n'
)
_bunk = (
'¡¡¡¡¡¡¡¡£™∞¢£§∞§¶•¶ª∞¶•ªº••ª–º§•†•§º¶•†¥ª–º•§ƒø¥¨©πƒø†ˆ¥ç©¨√øˆ¥≈†ƒ¥ç©ø¨çˆ¥ƒçø¶'
)
self.assertTrue(tablib.formats.tsv.detect(_tsv))
self.assertFalse(tablib.formats.tsv.detect(_bunk))
def test_json_format_detect(self):
"""Test JSON format detection."""
_json = '[{"last_name": "Adams","age": 90,"first_name": "John"}]'
_bunk = (
'¡¡¡¡¡¡¡¡£™∞¢£§∞§¶•¶ª∞¶•ªº••ª–º§•†•§º¶•†¥ª–º•§ƒø¥¨©πƒø†ˆ¥ç©¨√øˆ¥≈†ƒ¥ç©ø¨çˆ¥ƒçø¶'
)
self.assertTrue(tablib.formats.json.detect(_json))
self.assertFalse(tablib.formats.json.detect(_bunk))
def test_yaml_format_detect(self):
"""Test YAML format detection."""
_yaml = '- {age: 90, first_name: John, last_name: Adams}'
_tsv = 'foo\tbar'
_bunk = (
'¡¡¡¡¡¡---///\n\n\n¡¡£™∞¢£§∞§¶•¶ª∞¶•ªº••ª–º§•†•§º¶•†¥ª–º•§ƒø¥¨©πƒø†ˆ¥ç©¨√øˆ¥≈†ƒ¥ç©ø¨çˆ¥ƒçø¶'
)
self.assertTrue(tablib.formats.yaml.detect(_yaml))
self.assertFalse(tablib.formats.yaml.detect(_bunk))
self.assertFalse(tablib.formats.yaml.detect(_tsv))
def test_auto_format_detect(self):
"""Test auto format detection."""
_yaml = '- {age: 90, first_name: John, last_name: Adams}'
_json = '[{"last_name": "Adams","age": 90,"first_name": "John"}]'
_csv = '1,2,3\n4,5,6\n7,8,9\n'
_tsv = '1\t2\t3\n4\t5\t6\n7\t8\t9\n'
_bunk = '¡¡¡¡¡¡---///\n\n\n¡¡£™∞¢£§∞§¶•¶ª∞¶•ªº••ª–º§•†•§º¶•†¥ª–º•§ƒø¥¨©πƒø†ˆ¥ç©¨√øˆ¥≈†ƒ¥ç©ø¨çˆ¥ƒçø¶'
self.assertEqual(tablib.detect_format(_yaml), 'yaml')
self.assertEqual(tablib.detect_format(_csv), 'csv')
self.assertEqual(tablib.detect_format(_tsv), 'tsv')
self.assertEqual(tablib.detect_format(_json), 'json')
self.assertEqual(tablib.detect_format(_bunk), None)
def test_transpose(self):
"""Transpose a dataset."""
transposed_founders = self.founders.transpose()
first_row = transposed_founders[0]
second_row = transposed_founders[1]
self.assertEqual(transposed_founders.headers,
["first_name","John", "George", "Thomas"])
self.assertEqual(first_row,
("last_name","Adams", "Washington", "Jefferson"))
self.assertEqual(second_row,
("gpa",90, 67, 50))
def test_transpose_multiple_headers(self):
data = tablib.Dataset()
data.headers = ("first_name", "last_name", "age")
data.append(('John', 'Adams', 90))
data.append(('George', 'Washington', 67))
data.append(('John', 'Tyler', 71))
self.assertEqual(data.transpose().transpose().dict, data.dict)
def test_row_stacking(self):
"""Row stacking."""
to_join = tablib.Dataset(headers=self.founders.headers)
for row in self.founders:
to_join.append(row=row)
row_stacked = self.founders.stack(to_join)
for column in row_stacked.headers:
original_data = self.founders[column]
expected_data = original_data + original_data
self.assertEqual(row_stacked[column], expected_data)
def test_column_stacking(self):
"""Column stacking"""
to_join = tablib.Dataset(headers=self.founders.headers)
for row in self.founders:
to_join.append(row=row)
column_stacked = self.founders.stack_cols(to_join)
for index, row in enumerate(column_stacked):
original_data = self.founders[index]
expected_data = original_data + original_data
self.assertEqual(row, expected_data)
self.assertEqual(column_stacked[0],
("John", "Adams", 90, "John", "Adams", 90))
def test_sorting(self):
"""Sort columns."""
sorted_data = self.founders.sort(col="first_name")
self.assertEqual(sorted_data.title, 'Founders')
first_row = sorted_data[0]
second_row = sorted_data[2]
third_row = sorted_data[1]
expected_first = self.founders[1]
expected_second = self.founders[2]
expected_third = self.founders[0]
self.assertEqual(first_row, expected_first)
self.assertEqual(second_row, expected_second)
self.assertEqual(third_row, expected_third)
def test_remove_duplicates(self):
"""Unique Rows."""
self.founders.append(self.john)
self.founders.append(self.george)
self.founders.append(self.tom)
self.assertEqual(self.founders[0], self.founders[3])
self.assertEqual(self.founders[1], self.founders[4])
self.assertEqual(self.founders[2], self.founders[5])
self.assertEqual(self.founders.height, 6)
self.founders.remove_duplicates()
self.assertEqual(self.founders[0], self.john)
self.assertEqual(self.founders[1], self.george)
self.assertEqual(self.founders[2], self.tom)
self.assertEqual(self.founders.height, 3)
def test_wipe(self):
"""Purge a dataset."""
new_row = (1, 2, 3)
data.append(new_row)
# Verify width/data
self.assertTrue(data.width == len(new_row))
self.assertTrue(data[0] == new_row)
data.wipe()
new_row = (1, 2, 3, 4)
data.append(new_row)
self.assertTrue(data.width == len(new_row))
self.assertTrue(data[0] == new_row)
def test_subset(self):
"""Create a subset of a dataset"""
rows = (0, 2)
columns = ('first_name','gpa')
data.headers = self.headers
data.append(self.john)
data.append(self.george)
data.append(self.tom)
#Verify data is truncated
subset = data.subset(rows=rows, cols=columns)
self.assertEqual(type(subset), tablib.Dataset)
self.assertEqual(subset.headers, list(columns))
self.assertEqual(subset._data[0].list, ['John', 90])
self.assertEqual(subset._data[1].list, ['Thomas', 50])
def test_formatters(self):
"""Confirm formatters are being triggered."""
def _formatter(cell_value):
return str(cell_value).upper()
self.founders.add_formatter('last_name', _formatter)
for name in [r['last_name'] for r in self.founders.dict]:
self.assertTrue(name.isupper())
def test_unicode_csv(self):
"""Check if unicode in csv export doesn't raise."""
data = tablib.Dataset()
if sys.version_info[0] > 2:
data.append(['\xfc', '\xfd'])
else:
exec("data.append([u'\xfc', u'\xfd'])")
data.csv
def test_csv_column_select(self):
"""Build up a CSV and test selecting a column"""
data = tablib.Dataset()
data.csv = self.founders.csv
headers = data.headers
self.assertTrue(isinstance(headers[0], unicode))
orig_first_name = self.founders[self.headers[0]]
csv_first_name = data[headers[0]]
self.assertEqual(orig_first_name, csv_first_name)
def test_csv_column_delete(self):
"""Build up a CSV and test deleting a column"""
data = tablib.Dataset()
data.csv = self.founders.csv
target_header = data.headers[0]
self.assertTrue(isinstance(target_header, unicode))
del data[target_header]
self.assertTrue(target_header not in data.headers)
def test_csv_column_sort(self):
"""Build up a CSV and test sorting a column by name"""
data = tablib.Dataset()
data.csv = self.founders.csv
orig_target_header = self.founders.headers[1]
target_header = data.headers[1]
self.founders.sort(orig_target_header)
data.sort(target_header)
self.assertEqual(self.founders[orig_target_header], data[target_header])
def test_unicode_renders_markdown_table(self):
# add another entry to test right field width for
# integer
self.founders.append(('Old', 'Man', 100500))
self.assertEqual('first_name|last_name |gpa ', unicode(self.founders).split('\n')[0])
def test_databook_add_sheet_accepts_only_dataset_instances(self):
class NotDataset(object):
def append(self, item):
pass
dataset = NotDataset()
dataset.append(self.john)
self.assertRaises(tablib.InvalidDatasetType, book.add_sheet, dataset)
def test_databook_add_sheet_accepts_dataset_subclasses(self):
class DatasetSubclass(tablib.Dataset):
pass
# just checking if subclass of tablib.Dataset can be added to Databook
dataset = DatasetSubclass()
dataset.append(self.john)
dataset.append(self.tom)
try:
book.add_sheet(dataset)
except tablib.InvalidDatasetType:
self.fail("Subclass of tablib.Dataset should be accepted by Databook.add_sheet")
def test_csv_formatter_support_kwargs(self):
"""Test CSV import and export with formatter configuration."""
data.append(self.john)
data.append(self.george)
data.headers = self.headers
expected = 'first_name;last_name;gpa\nJohn;Adams;90\nGeorge;Washington;67\n'
kwargs = dict(delimiter=';', lineterminator='\n')
_csv = data.export('csv', **kwargs)
self.assertEqual(expected, _csv)
# the import works but consider default delimiter=','
d1 = tablib.import_set(_csv, format="csv")
self.assertEqual(1, len(d1.headers))
d2 = tablib.import_set(_csv, format="csv", **kwargs)
self.assertEqual(3, len(d2.headers))
def test_databook_formatter_support_kwargs(self):
"""Test XLSX export with formatter configuration."""
self.founders.export('xlsx', freeze_panes=False)
if __name__ == '__main__':
unittest.main()
| 30.141117 | 108 | 0.587996 |
import json
import unittest
import sys
import os
import tablib
from tablib.compat import markup, unicode, is_py3
from tablib.core import Row
class TablibTestCase(unittest.TestCase):
def setUp(self):
global data, book
data = tablib.Dataset()
book = tablib.Databook()
self.headers = ('first_name', 'last_name', 'gpa')
self.john = ('John', 'Adams', 90)
self.george = ('George', 'Washington', 67)
self.tom = ('Thomas', 'Jefferson', 50)
self.founders = tablib.Dataset(headers=self.headers, title='Founders')
self.founders.append(self.john)
self.founders.append(self.george)
self.founders.append(self.tom)
def tearDown(self):
pass
def test_empty_append(self):
new_row = (1, 2, 3)
data.append(new_row)
self.assertTrue(data.width == len(new_row))
self.assertTrue(data[0] == new_row)
def test_empty_append_with_headers(self):
data.headers = ['first', 'second']
new_row = (1, 2, 3, 4)
self.assertRaises(tablib.InvalidDimensions, data.append, new_row)
def test_set_headers_with_incorrect_dimension(self):
data.append(self.john)
def set_header_callable():
data.headers = ['first_name']
self.assertRaises(tablib.InvalidDimensions, set_header_callable)
def test_add_column(self):
data.append(['kenneth'])
data.append(['bessie'])
new_col = ['reitz', 'monke']
data.append_col(new_col)
self.assertEqual(data[0], ('kenneth', 'reitz'))
self.assertEqual(data.width, 2)
data.headers = ('fname', 'lname')
new_col = [21, 22]
data.append_col(new_col, header='age')
self.assertEqual(data['age'], new_col)
def test_add_column_no_data_no_headers(self):
new_col = ('reitz', 'monke')
data.append_col(new_col)
self.assertEqual(data[0], tuple([new_col[0]]))
self.assertEqual(data.width, 1)
self.assertEqual(data.height, len(new_col))
def test_add_column_with_header_ignored(self):
new_col = ('reitz', 'monke')
data.append_col(new_col, header='first_name')
self.assertEqual(data[0], tuple([new_col[0]]))
self.assertEqual(data.width, 1)
self.assertEqual(data.height, len(new_col))
self.assertEqual(data.headers, None)
def test_add_column_with_header_and_headers_only_exist(self):
data.headers = ['first_name']
new_col = ('allen')
def append_col_callable():
data.append_col(new_col, header='middle_name')
self.assertRaises(tablib.InvalidDimensions, append_col_callable)
def test_add_column_with_header_and_data_exists(self):
data.headers = self.headers
data.append(self.john)
new_col = [10];
data.append_col(new_col, header='age')
self.assertEqual(data.height, 1)
self.assertEqual(data.width, len(self.john) + 1)
self.assertEqual(data['age'], new_col)
self.assertEqual(len(data.headers), len(self.headers) + 1)
def test_add_callable_column(self):
new_col = lambda x: x[0]
self.founders.append_col(new_col, header='first_again')
def test_header_slicing(self):
self.assertEqual(self.founders['first_name'],
[self.john[0], self.george[0], self.tom[0]])
self.assertEqual(self.founders['last_name'],
[self.john[1], self.george[1], self.tom[1]])
self.assertEqual(self.founders['gpa'],
[self.john[2], self.george[2], self.tom[2]])
def test_get_col(self):
self.assertEqual(
self.founders.get_col(list(self.headers).index('first_name')),
[self.john[0], self.george[0], self.tom[0]])
self.assertEqual(
self.founders.get_col(list(self.headers).index('last_name')),
[self.john[1], self.george[1], self.tom[1]])
self.assertEqual(
self.founders.get_col(list(self.headers).index('gpa')),
[self.john[2], self.george[2], self.tom[2]])
def test_data_slicing(self):
self.assertEqual(self.founders[0], self.john)
self.assertEqual(self.founders[:1], [self.john])
self.assertEqual(self.founders[1:2], [self.george])
self.assertEqual(self.founders[-1], self.tom)
self.assertEqual(self.founders[3:], [])
self.assertEqual(self.founders[:], [self.john, self.george, self.tom])
self.assertEqual(self.founders[0:2], [self.john, self.george])
self.assertEqual(self.founders[1:3], [self.george, self.tom])
self.assertEqual(self.founders[2:], [self.tom])
def test_row_slicing(self):
john = Row(self.john)
self.assertEqual(john[:], list(self.john[:]))
self.assertEqual(john[0:], list(self.john[0:]))
self.assertEqual(john[:2], list(self.john[:2]))
self.assertEqual(john[0:2], list(self.john[0:2]))
self.assertEqual(john[0:-1], list(self.john[0:-1]))
def test_delete(self):
del self.founders[0]
self.assertEqual(self.founders[:], [self.george, self.tom])
self.assertEqual(self.founders.height, 2)
self.assertEqual(self.founders.width, 3)
del self.founders[1]
self.assertEqual(self.founders[:], [self.george])
self.assertEqual(self.founders.height, 1)
self.assertEqual(self.founders.width, 3)
self.assertRaises(IndexError, self.founders.__delitem__, 3)
def test_csv_export(self):
csv = ''
for col in self.headers:
csv += col + ','
csv = csv.strip(',') + '\r\n'
for founder in self.founders:
for col in founder:
csv += str(col) + ','
csv = csv.strip(',') + '\r\n'
self.assertEqual(csv, self.founders.csv)
def test_tsv_export(self):
tsv = ''
for col in self.headers:
tsv += col + '\t'
tsv = tsv.strip('\t') + '\r\n'
for founder in self.founders:
for col in founder:
tsv += str(col) + '\t'
tsv = tsv.strip('\t') + '\r\n'
self.assertEqual(tsv, self.founders.tsv)
def test_html_export(self):
html = markup.page()
html.table.open()
html.thead.open()
html.tr(markup.oneliner.th(self.founders.headers))
html.thead.close()
for founder in self.founders:
html.tr(markup.oneliner.td(founder))
html.table.close()
html = str(html)
self.assertEqual(html, self.founders.html)
def test_html_export_none_value(self):
html = markup.page()
html.table.open()
html.thead.open()
html.tr(markup.oneliner.th(['foo','', 'bar']))
html.thead.close()
html.tr(markup.oneliner.td(['foo','', 'bar']))
html.table.close()
html = str(html)
headers = ['foo', None, 'bar'];
d = tablib.Dataset(['foo', None, 'bar'], headers=headers)
self.assertEqual(html, d.html)
def test_latex_export(self):
expected = """\
% Note: add \\usepackage{booktabs} to your preamble
%
\\begin{table}[!htbp]
\\centering
\\caption{Founders}
\\begin{tabular}{lrr}
\\toprule
first\\_name & last\\_name & gpa \\\\
\\cmidrule(r){1-1} \\cmidrule(lr){2-2} \\cmidrule(l){3-3}
John & Adams & 90 \\\\
George & Washington & 67 \\\\
Thomas & Jefferson & 50 \\\\
\\bottomrule
\\end{tabular}
\\end{table}
"""
output = self.founders.latex
self.assertEqual(output, expected)
def test_latex_export_empty_dataset(self):
self.assertTrue(tablib.Dataset().latex is not None)
def test_latex_export_no_headers(self):
d = tablib.Dataset()
d.append(('one', 'two', 'three'))
self.assertTrue('one' in d.latex)
def test_latex_export_caption(self):
d = tablib.Dataset()
d.append(('one', 'two', 'three'))
self.assertFalse('caption' in d.latex)
d.title = 'Title'
self.assertTrue('\\caption{Title}' in d.latex)
def test_latex_export_none_values(self):
headers = ['foo', None, 'bar']
d = tablib.Dataset(['foo', None, 'bar'], headers=headers)
output = d.latex
self.assertTrue('foo' in output)
self.assertFalse('None' in output)
def test_latex_escaping(self):
d = tablib.Dataset(['~', '^'])
output = d.latex
self.assertFalse('~' in output)
self.assertTrue('textasciitilde' in output)
self.assertFalse('^' in output)
self.assertTrue('textasciicircum' in output)
def test_unicode_append(self):
if is_py3:
new_row = ('å', 'é')
else:
exec("new_row = (u'å', u'é')")
data.append(new_row)
data.json
data.yaml
data.csv
data.tsv
data.xls
data.xlsx
data.ods
data.html
data.latex
def test_book_export_no_exceptions(self):
book = tablib.Databook()
book.add_sheet(data)
book.json
book.yaml
book.xls
book.xlsx
book.ods
def test_json_import_set(self):
data.append(self.john)
data.append(self.george)
data.headers = self.headers
_json = data.json
data.json = _json
self.assertEqual(json.loads(_json), json.loads(data.json))
def test_json_import_book(self):
data.append(self.john)
data.append(self.george)
data.headers = self.headers
book.add_sheet(data)
_json = book.json
book.json = _json
self.assertEqual(json.loads(_json), json.loads(book.json))
def test_yaml_import_set(self):
data.append(self.john)
data.append(self.george)
data.headers = self.headers
_yaml = data.yaml
data.yaml = _yaml
self.assertEqual(_yaml, data.yaml)
def test_yaml_import_book(self):
data.append(self.john)
data.append(self.george)
data.headers = self.headers
book.add_sheet(data)
_yaml = book.yaml
book.yaml = _yaml
self.assertEqual(_yaml, book.yaml)
def test_csv_import_set(self):
data.append(self.john)
data.append(self.george)
data.headers = self.headers
_csv = data.csv
data.csv = _csv
self.assertEqual(_csv, data.csv)
def test_csv_import_set_semicolons(self):
data.append(self.john)
data.append(self.george)
data.headers = self.headers
_csv = data.get_csv(delimiter=';')
data.set_csv(_csv, delimiter=';')
self.assertEqual(_csv, data.get_csv(delimiter=';'))
def test_csv_import_set_with_spaces(self):
data.append(('Bill Gates', 'Microsoft'))
data.append(('Steve Jobs', 'Apple'))
data.headers = ('Name', 'Company')
_csv = data.csv
data.csv = _csv
self.assertEqual(_csv, data.csv)
def test_csv_import_set_semicolon_with_spaces(self):
data.append(('Bill Gates', 'Microsoft'))
data.append(('Steve Jobs', 'Apple'))
data.headers = ('Name', 'Company')
_csv = data.get_csv(delimiter=';')
data.set_csv(_csv, delimiter=';')
self.assertEqual(_csv, data.get_csv(delimiter=';'))
def test_csv_import_set_with_newlines(self):
data.append(('Markdown\n=======',
'A cool language\n\nwith paragraphs'))
data.append(('reStructedText\n==============',
'Another cool language\n\nwith paragraphs'))
data.headers = ('title', 'body')
_csv = data.csv
data.csv = _csv
self.assertEqual(_csv, data.csv)
def test_tsv_import_set(self):
data.append(self.john)
data.append(self.george)
data.headers = self.headers
_tsv = data.tsv
data.tsv = _tsv
self.assertEqual(_tsv, data.tsv)
def test_dbf_import_set(self):
data.append(self.john)
data.append(self.george)
data.headers = self.headers
_dbf = data.dbf
data.dbf = _dbf
try:
self.assertEqual(_dbf, data.dbf)
except AssertionError:
index = 0
so_far = ''
for reg_char, data_char in zip(_dbf, data.dbf):
so_far += chr(data_char)
if reg_char != data_char and index not in [1, 2, 3]:
raise AssertionError('Failing at char %s: %s vs %s %s' % (
index, reg_char, data_char, so_far))
index += 1
def test_dbf_export_set(self):
data.append(self.john)
data.append(self.george)
data.append(self.tom)
data.headers = self.headers
_regression_dbf = (b'\x03r\x06\x06\x03\x00\x00\x00\x81\x00\xab\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00FIRST_NAME\x00C\x00\x00\x00\x00P\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00LAST_NAME\x00\x00C\x00'
b'\x00\x00\x00P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00GPA\x00\x00\x00\x00\x00\x00\x00\x00N\x00\x00\x00\x00\n'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r'
)
_regression_dbf += b' John' + (b' ' * 75)
_regression_dbf += b' Adams' + (b' ' * 74)
_regression_dbf += b' 90.0000000'
_regression_dbf += b' George' + (b' ' * 73)
_regression_dbf += b' Washington' + (b' ' * 69)
_regression_dbf += b' 67.0000000'
_regression_dbf += b' Thomas' + (b' ' * 73)
_regression_dbf += b' Jefferson' + (b' ' * 70)
_regression_dbf += b' 50.0000000'
_regression_dbf += b'\x1a'
if is_py3:
pass
try:
self.assertEqual(_regression_dbf, data.dbf)
except AssertionError:
index = 0
found_so_far = ''
for reg_char, data_char in zip(_regression_dbf, data.dbf):
if reg_char != data_char and index not in [1, 2, 3]:
raise AssertionError(
'Failing at char %s: %s vs %s (found %s)' % (
index, reg_char, data_char, found_so_far))
index += 1
def test_dbf_format_detect(self):
_dbf = (b'\x03r\x06\x03\x03\x00\x00\x00\x81\x00\xab\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00FIRST_NAME\x00C\x00\x00\x00\x00P\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00LAST_NAME\x00\x00C\x00'
b'\x00\x00\x00P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00GPA\x00\x00\x00\x00\x00\x00\x00\x00N\x00\x00\x00\x00\n'
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r'
)
_dbf += b' John' + (b' ' * 75)
_dbf += b' Adams' + (b' ' * 74)
_dbf += b' 90.0000000'
_dbf += b' George' + (b' ' * 73)
_dbf += b' Washington' + (b' ' * 69)
_dbf += b' 67.0000000'
_dbf += b' Thomas' + (b' ' * 73)
_dbf += b' Jefferson' + (b' ' * 70)
_dbf += b' 50.0000000'
_dbf += b'\x1a'
_yaml = '- {age: 90, first_name: John, last_name: Adams}'
_tsv = 'foo\tbar'
_csv = '1,2,3\n4,5,6\n7,8,9\n'
_json = '[{"last_name": "Adams","age": 90,"first_name": "John"}]'
_bunk = (
'¡¡¡¡¡¡¡¡£™∞¢£§∞§¶•¶ª∞¶•ªº••ª–º§•†•§º¶•†¥ª–º•§ƒø¥¨©πƒø†ˆ¥ç©¨√øˆ¥≈†ƒ¥ç©ø¨çˆ¥ƒçø¶'
)
self.assertTrue(tablib.formats.dbf.detect(_dbf))
self.assertFalse(tablib.formats.dbf.detect(_yaml))
self.assertFalse(tablib.formats.dbf.detect(_tsv))
self.assertFalse(tablib.formats.dbf.detect(_csv))
self.assertFalse(tablib.formats.dbf.detect(_json))
self.assertFalse(tablib.formats.dbf.detect(_bunk))
def test_csv_format_detect(self):
_csv = (
'1,2,3\n'
'4,5,6\n'
'7,8,9\n'
)
_bunk = (
'¡¡¡¡¡¡¡¡£™∞¢£§∞§¶•¶ª∞¶•ªº••ª–º§•†•§º¶•†¥ª–º•§ƒø¥¨©πƒø†ˆ¥ç©¨√øˆ¥≈†ƒ¥ç©ø¨çˆ¥ƒçø¶'
)
self.assertTrue(tablib.formats.csv.detect(_csv))
self.assertFalse(tablib.formats.csv.detect(_bunk))
def test_tsv_format_detect(self):
_tsv = (
'1\t2\t3\n'
'4\t5\t6\n'
'7\t8\t9\n'
)
_bunk = (
'¡¡¡¡¡¡¡¡£™∞¢£§∞§¶•¶ª∞¶•ªº••ª–º§•†•§º¶•†¥ª–º•§ƒø¥¨©πƒø†ˆ¥ç©¨√øˆ¥≈†ƒ¥ç©ø¨çˆ¥ƒçø¶'
)
self.assertTrue(tablib.formats.tsv.detect(_tsv))
self.assertFalse(tablib.formats.tsv.detect(_bunk))
def test_json_format_detect(self):
_json = '[{"last_name": "Adams","age": 90,"first_name": "John"}]'
_bunk = (
'¡¡¡¡¡¡¡¡£™∞¢£§∞§¶•¶ª∞¶•ªº••ª–º§•†•§º¶•†¥ª–º•§ƒø¥¨©πƒø†ˆ¥ç©¨√øˆ¥≈†ƒ¥ç©ø¨çˆ¥ƒçø¶'
)
self.assertTrue(tablib.formats.json.detect(_json))
self.assertFalse(tablib.formats.json.detect(_bunk))
def test_yaml_format_detect(self):
_yaml = '- {age: 90, first_name: John, last_name: Adams}'
_tsv = 'foo\tbar'
_bunk = (
'¡¡¡¡¡¡---///\n\n\n¡¡£™∞¢£§∞§¶•¶ª∞¶•ªº••ª–º§•†•§º¶•†¥ª–º•§ƒø¥¨©πƒø†ˆ¥ç©¨√øˆ¥≈†ƒ¥ç©ø¨çˆ¥ƒçø¶'
)
self.assertTrue(tablib.formats.yaml.detect(_yaml))
self.assertFalse(tablib.formats.yaml.detect(_bunk))
self.assertFalse(tablib.formats.yaml.detect(_tsv))
def test_auto_format_detect(self):
_yaml = '- {age: 90, first_name: John, last_name: Adams}'
_json = '[{"last_name": "Adams","age": 90,"first_name": "John"}]'
_csv = '1,2,3\n4,5,6\n7,8,9\n'
_tsv = '1\t2\t3\n4\t5\t6\n7\t8\t9\n'
_bunk = '¡¡¡¡¡¡---///\n\n\n¡¡£™∞¢£§∞§¶•¶ª∞¶•ªº••ª–º§•†•§º¶•†¥ª–º•§ƒø¥¨©πƒø†ˆ¥ç©¨√øˆ¥≈†ƒ¥ç©ø¨çˆ¥ƒçø¶'
self.assertEqual(tablib.detect_format(_yaml), 'yaml')
self.assertEqual(tablib.detect_format(_csv), 'csv')
self.assertEqual(tablib.detect_format(_tsv), 'tsv')
self.assertEqual(tablib.detect_format(_json), 'json')
self.assertEqual(tablib.detect_format(_bunk), None)
def test_transpose(self):
transposed_founders = self.founders.transpose()
first_row = transposed_founders[0]
second_row = transposed_founders[1]
self.assertEqual(transposed_founders.headers,
["first_name","John", "George", "Thomas"])
self.assertEqual(first_row,
("last_name","Adams", "Washington", "Jefferson"))
self.assertEqual(second_row,
("gpa",90, 67, 50))
def test_transpose_multiple_headers(self):
data = tablib.Dataset()
data.headers = ("first_name", "last_name", "age")
data.append(('John', 'Adams', 90))
data.append(('George', 'Washington', 67))
data.append(('John', 'Tyler', 71))
self.assertEqual(data.transpose().transpose().dict, data.dict)
def test_row_stacking(self):
to_join = tablib.Dataset(headers=self.founders.headers)
for row in self.founders:
to_join.append(row=row)
row_stacked = self.founders.stack(to_join)
for column in row_stacked.headers:
original_data = self.founders[column]
expected_data = original_data + original_data
self.assertEqual(row_stacked[column], expected_data)
def test_column_stacking(self):
to_join = tablib.Dataset(headers=self.founders.headers)
for row in self.founders:
to_join.append(row=row)
column_stacked = self.founders.stack_cols(to_join)
for index, row in enumerate(column_stacked):
original_data = self.founders[index]
expected_data = original_data + original_data
self.assertEqual(row, expected_data)
self.assertEqual(column_stacked[0],
("John", "Adams", 90, "John", "Adams", 90))
def test_sorting(self):
sorted_data = self.founders.sort(col="first_name")
self.assertEqual(sorted_data.title, 'Founders')
first_row = sorted_data[0]
second_row = sorted_data[2]
third_row = sorted_data[1]
expected_first = self.founders[1]
expected_second = self.founders[2]
expected_third = self.founders[0]
self.assertEqual(first_row, expected_first)
self.assertEqual(second_row, expected_second)
self.assertEqual(third_row, expected_third)
def test_remove_duplicates(self):
self.founders.append(self.john)
self.founders.append(self.george)
self.founders.append(self.tom)
self.assertEqual(self.founders[0], self.founders[3])
self.assertEqual(self.founders[1], self.founders[4])
self.assertEqual(self.founders[2], self.founders[5])
self.assertEqual(self.founders.height, 6)
self.founders.remove_duplicates()
self.assertEqual(self.founders[0], self.john)
self.assertEqual(self.founders[1], self.george)
self.assertEqual(self.founders[2], self.tom)
self.assertEqual(self.founders.height, 3)
def test_wipe(self):
new_row = (1, 2, 3)
data.append(new_row)
self.assertTrue(data.width == len(new_row))
self.assertTrue(data[0] == new_row)
data.wipe()
new_row = (1, 2, 3, 4)
data.append(new_row)
self.assertTrue(data.width == len(new_row))
self.assertTrue(data[0] == new_row)
def test_subset(self):
rows = (0, 2)
columns = ('first_name','gpa')
data.headers = self.headers
data.append(self.john)
data.append(self.george)
data.append(self.tom)
subset = data.subset(rows=rows, cols=columns)
self.assertEqual(type(subset), tablib.Dataset)
self.assertEqual(subset.headers, list(columns))
self.assertEqual(subset._data[0].list, ['John', 90])
self.assertEqual(subset._data[1].list, ['Thomas', 50])
def test_formatters(self):
def _formatter(cell_value):
return str(cell_value).upper()
self.founders.add_formatter('last_name', _formatter)
for name in [r['last_name'] for r in self.founders.dict]:
self.assertTrue(name.isupper())
def test_unicode_csv(self):
data = tablib.Dataset()
if sys.version_info[0] > 2:
data.append(['\xfc', '\xfd'])
else:
exec("data.append([u'\xfc', u'\xfd'])")
data.csv
def test_csv_column_select(self):
data = tablib.Dataset()
data.csv = self.founders.csv
headers = data.headers
self.assertTrue(isinstance(headers[0], unicode))
orig_first_name = self.founders[self.headers[0]]
csv_first_name = data[headers[0]]
self.assertEqual(orig_first_name, csv_first_name)
def test_csv_column_delete(self):
data = tablib.Dataset()
data.csv = self.founders.csv
target_header = data.headers[0]
self.assertTrue(isinstance(target_header, unicode))
del data[target_header]
self.assertTrue(target_header not in data.headers)
def test_csv_column_sort(self):
data = tablib.Dataset()
data.csv = self.founders.csv
orig_target_header = self.founders.headers[1]
target_header = data.headers[1]
self.founders.sort(orig_target_header)
data.sort(target_header)
self.assertEqual(self.founders[orig_target_header], data[target_header])
def test_unicode_renders_markdown_table(self):
self.founders.append(('Old', 'Man', 100500))
self.assertEqual('first_name|last_name |gpa ', unicode(self.founders).split('\n')[0])
def test_databook_add_sheet_accepts_only_dataset_instances(self):
class NotDataset(object):
def append(self, item):
pass
dataset = NotDataset()
dataset.append(self.john)
self.assertRaises(tablib.InvalidDatasetType, book.add_sheet, dataset)
def test_databook_add_sheet_accepts_dataset_subclasses(self):
class DatasetSubclass(tablib.Dataset):
pass
dataset = DatasetSubclass()
dataset.append(self.john)
dataset.append(self.tom)
try:
book.add_sheet(dataset)
except tablib.InvalidDatasetType:
self.fail("Subclass of tablib.Dataset should be accepted by Databook.add_sheet")
def test_csv_formatter_support_kwargs(self):
data.append(self.john)
data.append(self.george)
data.headers = self.headers
expected = 'first_name;last_name;gpa\nJohn;Adams;90\nGeorge;Washington;67\n'
kwargs = dict(delimiter=';', lineterminator='\n')
_csv = data.export('csv', **kwargs)
self.assertEqual(expected, _csv)
d1 = tablib.import_set(_csv, format="csv")
self.assertEqual(1, len(d1.headers))
d2 = tablib.import_set(_csv, format="csv", **kwargs)
self.assertEqual(3, len(d2.headers))
def test_databook_formatter_support_kwargs(self):
self.founders.export('xlsx', freeze_panes=False)
if __name__ == '__main__':
unittest.main()
| true | true |
f7f703f86b7893d23ba25c2035987377669cf8a2 | 9,783 | py | Python | models/unet_original_dice&BCE.py | FarCaptain/MyUnet | a02516a7ffa9e6fd5730e05fb5f95386bbb4b3d6 | [
"MIT"
] | 1 | 2021-12-28T11:14:04.000Z | 2021-12-28T11:14:04.000Z | models/unet_original_dice&BCE.py | FarCaptain/MyUnet | a02516a7ffa9e6fd5730e05fb5f95386bbb4b3d6 | [
"MIT"
] | null | null | null | models/unet_original_dice&BCE.py | FarCaptain/MyUnet | a02516a7ffa9e6fd5730e05fb5f95386bbb4b3d6 | [
"MIT"
] | null | null | null |
# coding: utf-8
# In[1]:
import matplotlib as mpl
import matplotlib.pyplot as plt
#get_ipython().magic('matplotlib inline')
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split
import pandas as pd
import os
import sys
import time
#import keras
#from keras.layers.convolutional import Conv2D
import tensorflow as tf
tf.enable_eager_execution()
from tensorflow import keras
print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
print(module.__name__, module.__version__)
# In[2]:
def conv2d_block(input_tensor, n_filters, kernel_size = 3, batchnorm = True):
"""Function to add 2 convolutional layers with the parameters passed to it"""
# first layer
x = keras.layers.Conv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size), kernel_initializer = 'he_normal', padding = 'same')(input_tensor)
if batchnorm:
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation('relu')(x)
# second layer
x = keras.layers.Conv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size),
kernel_initializer = 'he_normal', padding = 'same')(x)
if batchnorm:
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation('relu')(x)
return x
def get_unet(input_img, n_filters = 16, dropout = 0.1, batchnorm = False):
# Contracting Path
input_img = keras.Input(shape = [224,800,1])#224*800
c1 = conv2d_block(input_img, n_filters * 1, kernel_size = 3, batchnorm = batchnorm)
p1 = keras.layers.MaxPooling2D((2, 2))(c1)
p1 = keras.layers.Dropout(dropout)(p1)
c2 = conv2d_block(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm)
p2 = keras.layers.MaxPooling2D((2, 2))(c2)
p2 = keras.layers.Dropout(dropout)(p2)
c3 = conv2d_block(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm)
p3 = keras.layers.MaxPooling2D((2, 2))(c3)
p3 = keras.layers.Dropout(dropout)(p3)
c4 = conv2d_block(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm)
p4 = keras.layers.MaxPooling2D((2, 2))(c4)
p4 = keras.layers.Dropout(dropout)(p4)
c5 = conv2d_block(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm)
# Expansive Path
u6 = keras.layers.Conv2DTranspose(n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(c5)
u6 = keras.layers.concatenate([u6, c4])
u6 = keras.layers.Dropout(dropout)(u6)
c6 = conv2d_block(u6, n_filters * 8, kernel_size = 3, batchnorm = batchnorm)
u7 = keras.layers.Conv2DTranspose(n_filters * 4, (3, 3), strides = (2, 2), padding = 'same')(c6)
u7 = keras.layers.concatenate([u7, c3])
u7 = keras.layers.Dropout(dropout)(u7)
c7 = conv2d_block(u7, n_filters * 4, kernel_size = 3, batchnorm = batchnorm)
u8 = keras.layers.Conv2DTranspose(n_filters * 2, (3, 3), strides = (2, 2), padding = 'same')(c7)
u8 = keras.layers.concatenate([u8, c2])
u8 = keras.layers.Dropout(dropout)(u8)
c8 = conv2d_block(u8, n_filters * 2, kernel_size = 3, batchnorm = batchnorm)
u9 = keras.layers.Conv2DTranspose(n_filters * 1, (3, 3), strides = (2, 2), padding = 'same')(c8)
u9 = keras.layers.concatenate([u9, c1])
u9 = keras.layers.Dropout(dropout)(u9)
c9 = conv2d_block(u9, n_filters * 1, kernel_size = 3, batchnorm = batchnorm)
outputs = keras.layers.Conv2D(1, (1, 1), activation='sigmoid')(c9)
model = keras.Model(inputs=[input_img], outputs=[outputs])
return model
# In[3]:
def get_number_list(ss):
ss = ss.split(',')
ss = [int(x) for x in ss]
return ss
#224*1016 -> 224*800
def read_file(index):
mat = np.array([[0]*800 for i in range(224) ])
string = "D:\\0Sphinx\\SaveData\\data_"+str(index)+".txt"
f = open(string,"r")
line = f.readline()
i = 0
while line:
j = 0
line = get_number_list(line)
line = line[:800]
for x in line:
mat[i][j] = x
j+=1
i+=1
line = f.readline()
f.close()
return mat
def read_label(index):
mat =np.array([[0]*800 for i in range(224) ])
string = "D:\\0Sphinx\\SaveData\\data_"+str(index)+"_label.txt"
f = open(string,"r")
line = f.readline()
i = 0
while line:
j = 0
line = get_number_list(line)
line = line[:800]
for x in line:
mat[i][j] = x
j+=1
i+=1
line = f.readline()
f.close()
return mat
images = []
labels = []
for i in range(1,721):
images.append(read_file(i))
string = "D:\\0Sphinx\\SaveData\\data_"+str(i)+"_label.txt"
if os.path.exists(string):
labels.append(read_label(i))
else:
labels.append(read_label(78)) #零矩阵
X = np.array(images) #数据集
X = np.reshape(X,(-1,224,800,1))
print(X.shape)
Y = np.array(labels)
Y = np.reshape(Y,(-1,224,800,1))
print(Y.shape)
#split into train,valid,test
#x_train_all, x_test, y_train_all, y_test = train_test_split(X,Y,train_size=0.79,random_state=30)
# In[4]:
x_train_all, x_test, y_train_all, y_test = train_test_split(X,Y,train_size=0.896,random_state=30)
print(x_train_all.shape)
print(x_test.shape)
# In[5]:
def make_dataset(images, labels, epochs, batch_size, shuffle = True):
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
if shuffle:
dataset = dataset.shuffle(10000)
dataset = dataset.repeat(epochs).batch(batch_size)
return dataset
# In[6]:
x_train, x_valid = x_train_all[:570], x_train_all[570:]
y_train, y_valid = y_train_all[:570], y_train_all[570:]
print(x_train.shape," ", x_valid.shape," ", x_test.shape) #6:2:2
#scale here
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
#x_train: [None,224,800,1] -> [None,179200,1]
x_train_scaled = scaler.fit_transform(
x_train.astype(np.float32).reshape(-1,1)).reshape(-1,224,800,1)
x_valid_scaled = scaler.transform(
x_valid.astype(np.float32).reshape(-1,1)).reshape(-1,224,800,1)
x_test_scaled = scaler.transform(
x_test.astype(np.float32).reshape(-1,1)).reshape(-1,224,800,1)
y_train.astype(np.float32)
y_valid.astype(np.float32)
y_test.astype(np.float32)
# In[7]:
print(np.max(x_train_scaled), np.min(x_train_scaled))
train_set = make_dataset(x_train_scaled,y_train,epochs=20 ,batch_size=10)
valid_set = make_dataset(x_valid_scaled,y_valid,epochs=10 ,batch_size=15)#75/5 = 15
test_set = make_dataset(x_test_scaled,y_test,epochs=10 ,batch_size=15)
# In[8]:
x_train_scaled= tf.convert_to_tensor(x_train_scaled)
x_valid_scaled= tf.convert_to_tensor(x_valid_scaled)
x_test_scaled= tf.convert_to_tensor(x_test_scaled)
y_train= tf.convert_to_tensor(y_train)
y_valid= tf.convert_to_tensor(y_valid)
y_test= tf.convert_to_tensor(y_test)
#x_train = tf.reshape(x_train,[-1,1,224,1016])
#x_valid = tf.reshape(x_valid,[-1,1,224,1016])
#x_test = tf.reshape(x_test,[-1,224,1016])
print(x_train.shape," ", x_valid.shape," ", x_test.shape) #6:2:2
# In[9]:
from tensorflow.contrib.opt import AdamWOptimizer
def dice_coe(output, target, loss_type='sorensen', axis=(1, 2, 3), smooth=1.):
"""
Soft dice (Sørensen or Jaccard) coefficient for comparing the similarity of two batch of data,
usually be used for binary image segmentation
i.e. labels are binary.
The coefficient between 0 to 1, 1 means totally match.
Parameters
-----------
output : Tensor
A distribution with shape: [batch_size, ....], (any dimensions).
target : Tensor
The target distribution, format the same with `output`.
loss_type : str
``jaccard`` or ``sorensen``, default is ``jaccard``.
axis : tuple of int
All dimensions are reduced, default ``[1,2,3]``.
smooth : float
This small value will be added to the numerator and denominator.
- If both output and target are empty, it makes sure dice is 1.
- If either output or target are empty (all pixels are background), dice = ```smooth/(small_value + smooth)``, then if smooth is very small, dice close to 0 (even the image values lower than the threshold), so in this case, higher smooth can have a higher dice.
Examples
---------
References
-----------
- `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`__
"""
inse = tf.reduce_sum(output * target, axis=axis)
if loss_type == 'jaccard':
l = tf.reduce_sum(output * output, axis=axis)
r = tf.reduce_sum(target * target, axis=axis)
elif loss_type == 'sorensen':
l = tf.reduce_sum(output, axis=axis)
r = tf.reduce_sum(target, axis=axis)
else:
raise Exception("Unknow loss_type")
dice = (2. * inse + smooth) / (l + r + smooth)
dice = tf.reduce_mean(dice)
return dice
def dice_coef_loss(y_true, y_pred):
return 1. - dice_coe(y_true, y_pred)
from keras.losses import binary_crossentropy
def dice_p_bce(in_gt, in_pred):
return 1.0*tf.reduce_mean(binary_crossentropy(in_gt, in_pred)) + dice_coef_loss(in_gt, in_pred)
# adam = tf.train.AdamOptimizer()
# adam with weight decay
adamw = AdamWOptimizer(weight_decay=1e-4)
model = get_unet(x_train_scaled, n_filters = 16, dropout = 0.4, batchnorm = False)
model.compile(optimizer = adamw,
loss=dice_p_bce,
metrics=[dice_coe])
# In[10]:
model.summary()
# In[ ]:
batch_size = 10
history = model.fit(train_set,
validation_data=valid_set,
shuffle = True,
steps_per_epoch = 570 // batch_size, #batch_size = data_size/steps_per_epoch
validation_steps = 5, #75
epochs = 20
) # starts training
# In[ ]:
model.evaluate(test_set, steps = 5)#75
# In[ ]:
# In[ ]:
| 30.101538 | 273 | 0.660227 |
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split
import pandas as pd
import os
import sys
import time
import tensorflow as tf
tf.enable_eager_execution()
from tensorflow import keras
print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
print(module.__name__, module.__version__)
def conv2d_block(input_tensor, n_filters, kernel_size = 3, batchnorm = True):
x = keras.layers.Conv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size), kernel_initializer = 'he_normal', padding = 'same')(input_tensor)
if batchnorm:
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation('relu')(x)
x = keras.layers.Conv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size),
kernel_initializer = 'he_normal', padding = 'same')(x)
if batchnorm:
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation('relu')(x)
return x
def get_unet(input_img, n_filters = 16, dropout = 0.1, batchnorm = False):
input_img = keras.Input(shape = [224,800,1])
c1 = conv2d_block(input_img, n_filters * 1, kernel_size = 3, batchnorm = batchnorm)
p1 = keras.layers.MaxPooling2D((2, 2))(c1)
p1 = keras.layers.Dropout(dropout)(p1)
c2 = conv2d_block(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm)
p2 = keras.layers.MaxPooling2D((2, 2))(c2)
p2 = keras.layers.Dropout(dropout)(p2)
c3 = conv2d_block(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm)
p3 = keras.layers.MaxPooling2D((2, 2))(c3)
p3 = keras.layers.Dropout(dropout)(p3)
c4 = conv2d_block(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm)
p4 = keras.layers.MaxPooling2D((2, 2))(c4)
p4 = keras.layers.Dropout(dropout)(p4)
c5 = conv2d_block(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm)
u6 = keras.layers.Conv2DTranspose(n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(c5)
u6 = keras.layers.concatenate([u6, c4])
u6 = keras.layers.Dropout(dropout)(u6)
c6 = conv2d_block(u6, n_filters * 8, kernel_size = 3, batchnorm = batchnorm)
u7 = keras.layers.Conv2DTranspose(n_filters * 4, (3, 3), strides = (2, 2), padding = 'same')(c6)
u7 = keras.layers.concatenate([u7, c3])
u7 = keras.layers.Dropout(dropout)(u7)
c7 = conv2d_block(u7, n_filters * 4, kernel_size = 3, batchnorm = batchnorm)
u8 = keras.layers.Conv2DTranspose(n_filters * 2, (3, 3), strides = (2, 2), padding = 'same')(c7)
u8 = keras.layers.concatenate([u8, c2])
u8 = keras.layers.Dropout(dropout)(u8)
c8 = conv2d_block(u8, n_filters * 2, kernel_size = 3, batchnorm = batchnorm)
u9 = keras.layers.Conv2DTranspose(n_filters * 1, (3, 3), strides = (2, 2), padding = 'same')(c8)
u9 = keras.layers.concatenate([u9, c1])
u9 = keras.layers.Dropout(dropout)(u9)
c9 = conv2d_block(u9, n_filters * 1, kernel_size = 3, batchnorm = batchnorm)
outputs = keras.layers.Conv2D(1, (1, 1), activation='sigmoid')(c9)
model = keras.Model(inputs=[input_img], outputs=[outputs])
return model
def get_number_list(ss):
ss = ss.split(',')
ss = [int(x) for x in ss]
return ss
def read_file(index):
mat = np.array([[0]*800 for i in range(224) ])
string = "D:\\0Sphinx\\SaveData\\data_"+str(index)+".txt"
f = open(string,"r")
line = f.readline()
i = 0
while line:
j = 0
line = get_number_list(line)
line = line[:800]
for x in line:
mat[i][j] = x
j+=1
i+=1
line = f.readline()
f.close()
return mat
def read_label(index):
mat =np.array([[0]*800 for i in range(224) ])
string = "D:\\0Sphinx\\SaveData\\data_"+str(index)+"_label.txt"
f = open(string,"r")
line = f.readline()
i = 0
while line:
j = 0
line = get_number_list(line)
line = line[:800]
for x in line:
mat[i][j] = x
j+=1
i+=1
line = f.readline()
f.close()
return mat
images = []
labels = []
for i in range(1,721):
images.append(read_file(i))
string = "D:\\0Sphinx\\SaveData\\data_"+str(i)+"_label.txt"
if os.path.exists(string):
labels.append(read_label(i))
else:
labels.append(read_label(78))
X = np.array(images)
X = np.reshape(X,(-1,224,800,1))
print(X.shape)
Y = np.array(labels)
Y = np.reshape(Y,(-1,224,800,1))
print(Y.shape)
x_train_all, x_test, y_train_all, y_test = train_test_split(X,Y,train_size=0.896,random_state=30)
print(x_train_all.shape)
print(x_test.shape)
def make_dataset(images, labels, epochs, batch_size, shuffle = True):
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
if shuffle:
dataset = dataset.shuffle(10000)
dataset = dataset.repeat(epochs).batch(batch_size)
return dataset
x_train, x_valid = x_train_all[:570], x_train_all[570:]
y_train, y_valid = y_train_all[:570], y_train_all[570:]
print(x_train.shape," ", x_valid.shape," ", x_test.shape)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(
x_train.astype(np.float32).reshape(-1,1)).reshape(-1,224,800,1)
x_valid_scaled = scaler.transform(
x_valid.astype(np.float32).reshape(-1,1)).reshape(-1,224,800,1)
x_test_scaled = scaler.transform(
x_test.astype(np.float32).reshape(-1,1)).reshape(-1,224,800,1)
y_train.astype(np.float32)
y_valid.astype(np.float32)
y_test.astype(np.float32)
print(np.max(x_train_scaled), np.min(x_train_scaled))
train_set = make_dataset(x_train_scaled,y_train,epochs=20 ,batch_size=10)
valid_set = make_dataset(x_valid_scaled,y_valid,epochs=10 ,batch_size=15)
test_set = make_dataset(x_test_scaled,y_test,epochs=10 ,batch_size=15)
x_train_scaled= tf.convert_to_tensor(x_train_scaled)
x_valid_scaled= tf.convert_to_tensor(x_valid_scaled)
x_test_scaled= tf.convert_to_tensor(x_test_scaled)
y_train= tf.convert_to_tensor(y_train)
y_valid= tf.convert_to_tensor(y_valid)
y_test= tf.convert_to_tensor(y_test)
print(x_train.shape," ", x_valid.shape," ", x_test.shape)
from tensorflow.contrib.opt import AdamWOptimizer
def dice_coe(output, target, loss_type='sorensen', axis=(1, 2, 3), smooth=1.):
inse = tf.reduce_sum(output * target, axis=axis)
if loss_type == 'jaccard':
l = tf.reduce_sum(output * output, axis=axis)
r = tf.reduce_sum(target * target, axis=axis)
elif loss_type == 'sorensen':
l = tf.reduce_sum(output, axis=axis)
r = tf.reduce_sum(target, axis=axis)
else:
raise Exception("Unknow loss_type")
dice = (2. * inse + smooth) / (l + r + smooth)
dice = tf.reduce_mean(dice)
return dice
def dice_coef_loss(y_true, y_pred):
return 1. - dice_coe(y_true, y_pred)
from keras.losses import binary_crossentropy
def dice_p_bce(in_gt, in_pred):
return 1.0*tf.reduce_mean(binary_crossentropy(in_gt, in_pred)) + dice_coef_loss(in_gt, in_pred)
adamw = AdamWOptimizer(weight_decay=1e-4)
model = get_unet(x_train_scaled, n_filters = 16, dropout = 0.4, batchnorm = False)
model.compile(optimizer = adamw,
loss=dice_p_bce,
metrics=[dice_coe])
model.summary()
batch_size = 10
history = model.fit(train_set,
validation_data=valid_set,
shuffle = True,
steps_per_epoch = 570 // batch_size,
validation_steps = 5,
epochs = 20
)
model.evaluate(test_set, steps = 5)
| true | true |
f7f704dab6740d9e28698ecf657c76bf7754f1a3 | 9,160 | py | Python | longformer_scripts/sequence_encoder.py | ChristineBasta/fairseq | c349ec3ec7fa0da7306c91769fbdc6146569a0ee | [
"MIT"
] | null | null | null | longformer_scripts/sequence_encoder.py | ChristineBasta/fairseq | c349ec3ec7fa0da7306c91769fbdc6146569a0ee | [
"MIT"
] | null | null | null | longformer_scripts/sequence_encoder.py | ChristineBasta/fairseq | c349ec3ec7fa0da7306c91769fbdc6146569a0ee | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
from torch import Tensor
from fairseq.ngram_repeat_block import NGramRepeatBlock
class SequenceEncoder(nn.Module):
def __init__(
self,
models
):
"""Generates encodings of a given source sentence.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
"""
super().__init__()
if isinstance(models, EnsembleModel):
self.model = models
else:
self.model = EnsembleModel(models)
self.model.eval()
def cuda(self):
self.model.cuda()
return self
@torch.no_grad()
def forward(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
"""Generate a batch of translations.
Args:
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, prefix_tokens, bos_token=bos_token)
# TODO(myleott): unused, deprecate after pytorch-translate migration
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None):
"""Iterate over a batched dataset and yield individual translations.
Args:
cuda (bool, optional): use GPU for generation
timer (StopwatchMeter, optional): time generations
"""
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if "net_input" not in s:
continue
input = s["net_input"]
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in input.items() if k != "prev_output_tokens"
}
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(encoder_input)
if timer is not None:
timer.stop(sum(len(h[0]["tokens"]) for h in hypos))
for i, id in enumerate(s["id"].data):
# remove padding
src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad)
ref = (
utils.strip_pad(s["target"].data[i, :], self.pad)
if s["target"] is not None
else None
)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs) -> List[List[Dict[str, Tensor]]]:
"""Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
constraints (torch.LongTensor, optional): force decoder to include
the list of constraints
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, **kwargs)
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
encoder_outs = self.model.forward_encoder(net_input)
return encoder_outs
class EnsembleModel(nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models_size = len(models)
# method '__len__' is not supported in ModuleList for torch script
self.single_model = models[0]
self.models = nn.ModuleList(models)
self.has_incremental: bool = False
if all(
hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder)
for m in models
):
self.has_incremental = True
def forward(self):
pass
def has_encoder(self):
return hasattr(self.single_model, "encoder")
def has_incremental_states(self):
return self.has_incremental
def max_decoder_positions(self):
return min([m.max_decoder_positions() for m in self.models])
@torch.jit.export
def forward_encoder(self, net_input: Dict[str, Tensor]):
if not self.has_encoder():
return None
return [model.encoder.forward_torchscript(net_input) for model in self.models]
@torch.jit.export
def forward_decoder(
self,
tokens,
encoder_outs: List[Dict[str, List[Tensor]]],
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
temperature: float = 1.0,
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[Dict[str, List[Tensor]]] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
# decode each model
if self.has_incremental_states():
decoder_out = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
)
else:
decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
decoder_out_tuple = (
decoder_out[0][:, -1:, :].div_(temperature),
None if decoder_len <= 1 else decoder_out[1],
)
probs = model.get_normalized_probs(
decoder_out_tuple, log_probs=True, sample=None
)
probs = probs[:, -1, :]
if self.models_size == 1:
return probs, attn
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_attn
@torch.jit.export
def reorder_encoder_out(
self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order
):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_outs: List[Dict[str, List[Tensor]]] = []
if not self.has_encoder():
return new_outs
for i, model in enumerate(self.models):
assert encoder_outs is not None
new_outs.append(
model.encoder.reorder_encoder_out(encoder_outs[i], new_order)
)
return new_outs
@torch.jit.export
def reorder_incremental_state(
self,
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
new_order,
):
if not self.has_incremental_states():
return
for i, model in enumerate(self.models):
model.decoder.reorder_incremental_state_scripting(
incremental_states[i], new_order
)
| 33.800738 | 112 | 0.572707 |
import math
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
from torch import Tensor
from fairseq.ngram_repeat_block import NGramRepeatBlock
class SequenceEncoder(nn.Module):
def __init__(
self,
models
):
super().__init__()
if isinstance(models, EnsembleModel):
self.model = models
else:
self.model = EnsembleModel(models)
self.model.eval()
def cuda(self):
self.model.cuda()
return self
@torch.no_grad()
def forward(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
return self._generate(sample, prefix_tokens, bos_token=bos_token)
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None):
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if "net_input" not in s:
continue
input = s["net_input"]
encoder_input = {
k: v for k, v in input.items() if k != "prev_output_tokens"
}
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(encoder_input)
if timer is not None:
timer.stop(sum(len(h[0]["tokens"]) for h in hypos))
for i, id in enumerate(s["id"].data):
src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad)
ref = (
utils.strip_pad(s["target"].data[i, :], self.pad)
if s["target"] is not None
else None
)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs) -> List[List[Dict[str, Tensor]]]:
return self._generate(sample, **kwargs)
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
encoder_outs = self.model.forward_encoder(net_input)
return encoder_outs
class EnsembleModel(nn.Module):
def __init__(self, models):
super().__init__()
self.models_size = len(models)
self.single_model = models[0]
self.models = nn.ModuleList(models)
self.has_incremental: bool = False
if all(
hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder)
for m in models
):
self.has_incremental = True
def forward(self):
pass
def has_encoder(self):
return hasattr(self.single_model, "encoder")
def has_incremental_states(self):
return self.has_incremental
def max_decoder_positions(self):
return min([m.max_decoder_positions() for m in self.models])
@torch.jit.export
def forward_encoder(self, net_input: Dict[str, Tensor]):
if not self.has_encoder():
return None
return [model.encoder.forward_torchscript(net_input) for model in self.models]
@torch.jit.export
def forward_decoder(
self,
tokens,
encoder_outs: List[Dict[str, List[Tensor]]],
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
temperature: float = 1.0,
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[Dict[str, List[Tensor]]] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
if self.has_incremental_states():
decoder_out = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
)
else:
decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
decoder_out_tuple = (
decoder_out[0][:, -1:, :].div_(temperature),
None if decoder_len <= 1 else decoder_out[1],
)
probs = model.get_normalized_probs(
decoder_out_tuple, log_probs=True, sample=None
)
probs = probs[:, -1, :]
if self.models_size == 1:
return probs, attn
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_attn
@torch.jit.export
def reorder_encoder_out(
self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order
):
new_outs: List[Dict[str, List[Tensor]]] = []
if not self.has_encoder():
return new_outs
for i, model in enumerate(self.models):
assert encoder_outs is not None
new_outs.append(
model.encoder.reorder_encoder_out(encoder_outs[i], new_order)
)
return new_outs
@torch.jit.export
def reorder_incremental_state(
self,
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
new_order,
):
if not self.has_incremental_states():
return
for i, model in enumerate(self.models):
model.decoder.reorder_incremental_state_scripting(
incremental_states[i], new_order
)
| true | true |
f7f706a9be7a12dadff0bc73a6a7babb0ab53dab | 1,594 | py | Python | test/test_events.py | odanoburu/rdflib | af625d0bc48b656b614629b9ad56df63b88a0d17 | [
"BSD-3-Clause"
] | 1 | 2021-08-09T16:32:00.000Z | 2021-08-09T16:32:00.000Z | test/test_events.py | odanoburu/rdflib | af625d0bc48b656b614629b9ad56df63b88a0d17 | [
"BSD-3-Clause"
] | null | null | null | test/test_events.py | odanoburu/rdflib | af625d0bc48b656b614629b9ad56df63b88a0d17 | [
"BSD-3-Clause"
] | null | null | null |
import unittest
from rdflib import events
class AddedEvent(events.Event):
pass
class RemovedEvent(events.Event):
pass
def subscribe_to(source, target):
target.subscribe(AddedEvent, source._add_handler)
target.subscribe(RemovedEvent, source._remove_handler)
def subscribe_all(caches):
for cache in caches:
for other in caches:
if other != cache:
subscribe_to(cache, other)
class Cache(events.Dispatcher):
def __init__(self, data=None):
if data is None:
data = {}
self._data = data
self.subscribe(AddedEvent, self._add_handler)
self.subscribe(RemovedEvent, self._remove_handler)
def _add_handler(self, event):
self._data[event.key] = event.value
def _remove_handler(self, event):
del self._data[event.key]
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self.dispatch(AddedEvent(key=key, value=value))
def __delitem__(self, key):
self.dispatch(RemovedEvent(key=key))
def __contains__(self, key):
return key in self._data
has_key = __contains__
class EventTestCase(unittest.TestCase):
def testEvents(self):
c1 = Cache()
c2 = Cache()
c3 = Cache()
subscribe_all([c1, c2, c3])
c1['bob'] = 'uncle'
assert c2['bob'] == 'uncle'
assert c3['bob'] == 'uncle'
del c3['bob']
assert ('bob' in c1) == False
assert ('bob' in c2) == False
if __name__ == "__main__":
unittest.main()
| 21.835616 | 58 | 0.617942 |
import unittest
from rdflib import events
class AddedEvent(events.Event):
pass
class RemovedEvent(events.Event):
pass
def subscribe_to(source, target):
target.subscribe(AddedEvent, source._add_handler)
target.subscribe(RemovedEvent, source._remove_handler)
def subscribe_all(caches):
for cache in caches:
for other in caches:
if other != cache:
subscribe_to(cache, other)
class Cache(events.Dispatcher):
def __init__(self, data=None):
if data is None:
data = {}
self._data = data
self.subscribe(AddedEvent, self._add_handler)
self.subscribe(RemovedEvent, self._remove_handler)
def _add_handler(self, event):
self._data[event.key] = event.value
def _remove_handler(self, event):
del self._data[event.key]
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self.dispatch(AddedEvent(key=key, value=value))
def __delitem__(self, key):
self.dispatch(RemovedEvent(key=key))
def __contains__(self, key):
return key in self._data
has_key = __contains__
class EventTestCase(unittest.TestCase):
def testEvents(self):
c1 = Cache()
c2 = Cache()
c3 = Cache()
subscribe_all([c1, c2, c3])
c1['bob'] = 'uncle'
assert c2['bob'] == 'uncle'
assert c3['bob'] == 'uncle'
del c3['bob']
assert ('bob' in c1) == False
assert ('bob' in c2) == False
if __name__ == "__main__":
unittest.main()
| true | true |
f7f7077f2960b540d3a1561391600c2bbca2cb12 | 15,800 | py | Python | zipline/_version.py | liudengfeng/zipline | 01fdd51d83efeb3453e92b7d02c255a06eba49ac | [
"Apache-2.0"
] | 6 | 2017-12-11T06:12:00.000Z | 2019-05-23T17:39:10.000Z | zipline/_version.py | liudengfeng/zipline | 01fdd51d83efeb3453e92b7d02c255a06eba49ac | [
"Apache-2.0"
] | null | null | null | zipline/_version.py | liudengfeng/zipline | 01fdd51d83efeb3453e92b7d02c255a06eba49ac | [
"Apache-2.0"
] | 1 | 2018-01-26T14:19:38.000Z | 2018-01-26T14:19:38.000Z |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = " (HEAD -> master)"
git_full = "47b5f506f2fccb472417daef014fdce9734c5262"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "zipline-"
cfg.versionfile_source = "zipline/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| 34.273319 | 79 | 0.588924 |
import errno
import os
import re
import subprocess
import sys
def get_keywords():
git_refnames = " (HEAD -> master)"
git_full = "47b5f506f2fccb472417daef014fdce9734c5262"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "zipline-"
cfg.versionfile_source = "zipline/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method):
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7]
pieces["error"] = None
git_describe = describe_out
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
if "-" in git_describe:
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
pieces["distance"] = int(mo.group(2))
pieces["short"] = mo.group(3)
else:
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out)
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# --always'
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# --always -long'. The distance/hash is unconditional.
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440"
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| true | true |
f7f70854847abbc571223b1e682156066637ec94 | 7,501 | py | Python | cloudkeeperV1/plugins/cleanup_aws_vpcs/cloudkeeper_plugin_cleanup_aws_vpcs/__init__.py | mesosphere/cloudkeeper | 11be262df5874c1033cfec9964bba1596cab6a36 | [
"Apache-2.0"
] | 99 | 2020-04-15T22:56:34.000Z | 2021-06-13T15:04:55.000Z | cloudkeeperV1/plugins/cleanup_aws_vpcs/cloudkeeper_plugin_cleanup_aws_vpcs/__init__.py | mesosphere/cloudkeeper | 11be262df5874c1033cfec9964bba1596cab6a36 | [
"Apache-2.0"
] | null | null | null | cloudkeeperV1/plugins/cleanup_aws_vpcs/cloudkeeper_plugin_cleanup_aws_vpcs/__init__.py | mesosphere/cloudkeeper | 11be262df5874c1033cfec9964bba1596cab6a36 | [
"Apache-2.0"
] | 14 | 2020-04-14T22:13:59.000Z | 2021-04-05T16:42:31.000Z | import cklib.logging
import threading
import yaml
from cklib.baseplugin import BasePlugin
from cloudkeeper_plugin_aws.resources import (
AWSVPC,
AWSVPCPeeringConnection,
AWSEC2NetworkAcl,
AWSEC2NetworkInterface,
AWSELB,
AWSALB,
AWSALBTargetGroup,
AWSEC2Subnet,
AWSEC2SecurityGroup,
AWSEC2InternetGateway,
AWSEC2NATGateway,
AWSEC2RouteTable,
AWSVPCEndpoint,
AWSEC2Instance,
AWSEC2ElasticIP,
)
from cklib.args import ArgumentParser
from cklib.event import (
Event,
EventType,
add_event_listener,
remove_event_listener,
)
log = cklib.logging.getLogger("cloudkeeper." + __name__)
class CleanupAWSVPCsPlugin(BasePlugin):
def __init__(self):
super().__init__()
self.name = "cleanup_aws_vpcs"
self.exit = threading.Event()
if ArgumentParser.args.cleanup_aws_vpcs:
add_event_listener(EventType.SHUTDOWN, self.shutdown)
add_event_listener(
EventType.CLEANUP_BEGIN,
self.vpc_cleanup,
blocking=True,
timeout=3600,
)
else:
self.exit.set()
self.config = {}
if ArgumentParser.args.cleanup_aws_vpcs_config:
self.config = CleanupAWSVPCsConfig(
config_file=ArgumentParser.args.cleanup_aws_vpcs_config
)
self.config.read() # initial read to ensure config format is valid
def __del__(self):
remove_event_listener(EventType.CLEANUP_BEGIN, self.vpc_cleanup)
remove_event_listener(EventType.SHUTDOWN, self.shutdown)
def go(self):
self.exit.wait()
def vpc_cleanup(self, event: Event):
graph = event.data
log.info("AWS VPC cleanup called")
for node in graph.nodes:
if node.protected or not node.clean or not isinstance(node, AWSVPC):
continue
cloud = node.cloud(graph)
account = node.account(graph)
region = node.region(graph)
log_prefix = (
f"Found AWS VPC {node.dname} in cloud {cloud.name} account {account.dname} "
f"region {region.name} marked for cleanup."
)
if len(self.config) > 0:
if (
cloud.id not in self.config
or account.id not in self.config[cloud.id]
):
log.debug(
(
f"{log_prefix} Account not found in config - ignoring dependent resources."
)
)
continue
vpc_instances = [
i
for i in node.descendants(graph)
if isinstance(i, AWSEC2Instance)
and i.instance_status not in ("shutting-down", "terminated")
and not i.clean
]
if len(vpc_instances) > 0:
log_msg = "VPC contains active EC2 instances - not cleaning VPC."
log.debug(f"{log_prefix} {log_msg}")
node.log(log_msg)
node.clean = False
continue
log.debug(f"{log_prefix} Marking dependent resources for cleanup as well.")
for descendant in node.descendants(graph):
log.debug(f"Found descendant {descendant.rtdname} of VPC {node.dname}")
if isinstance(
descendant,
(
AWSVPCPeeringConnection,
AWSEC2NetworkAcl,
AWSEC2NetworkInterface,
AWSELB,
AWSALB,
AWSALBTargetGroup,
AWSEC2Subnet,
AWSEC2SecurityGroup,
AWSEC2InternetGateway,
AWSEC2NATGateway,
AWSEC2RouteTable,
AWSVPCEndpoint,
AWSEC2ElasticIP,
),
):
descendant.log(
(
f"Marking for cleanup because resource is a descendant of VPC {node.dname} "
f"which is set to be cleaned"
)
)
node.log(
f"Marking {descendant.rtdname} for cleanup because resource is a descendant"
)
descendant.clean = True
else:
if descendant.clean:
log.debug(
(
f"Descendant {descendant.rtdname} of VPC {node.dname} is not targeted but "
f"already marked for cleaning"
)
)
else:
log.error(
(
f"Descendant {descendant.rtdname} of VPC {node.dname} is not targeted and "
f"not marked for cleaning - VPC cleanup will likely fail"
)
)
node.log(
(
f"Descendant {descendant.rtdname} is not targeted and not marked for cleaning "
f"- cleanup will likely fail"
)
)
@staticmethod
def add_args(arg_parser: ArgumentParser) -> None:
arg_parser.add_argument(
"--cleanup-aws-vpcs",
help="Cleanup AWS VPCs (default: False)",
dest="cleanup_aws_vpcs",
action="store_true",
default=False,
)
arg_parser.add_argument(
"--cleanup-aws-vpcs-config",
help="Path to Cleanup AWS VPCs Plugin Config",
default=None,
dest="cleanup_aws_vpcs_config",
)
def shutdown(self, event: Event):
log.debug(
f"Received event {event.event_type} - shutting down AWS VPC Cleanup plugin"
)
self.exit.set()
class CleanupAWSVPCsConfig(dict):
def __init__(self, *args, config_file: str = None, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.config_file = config_file
def read(self) -> bool:
if not self.config_file:
raise ValueError(
"Attribute config_file is not set on CleanupAWSVPCsConfig() instance"
)
with open(self.config_file) as config_file:
config = yaml.load(config_file, Loader=yaml.FullLoader)
if self.validate(config):
self.update(config)
return True
return False
@staticmethod
def validate(config) -> bool:
if not isinstance(config, dict):
raise ValueError("Config is no dict")
for cloud_id, account_ids in config.items():
if not isinstance(cloud_id, str):
raise ValueError(f"Cloud ID {cloud_id} is no string")
if not isinstance(account_ids, list):
raise ValueError(f"Account IDs {account_ids} is no list")
for account_id in account_ids:
if not isinstance(account_id, str):
raise ValueError(f"Account ID {account_id} is no string")
return True
| 35.051402 | 111 | 0.510599 | import cklib.logging
import threading
import yaml
from cklib.baseplugin import BasePlugin
from cloudkeeper_plugin_aws.resources import (
AWSVPC,
AWSVPCPeeringConnection,
AWSEC2NetworkAcl,
AWSEC2NetworkInterface,
AWSELB,
AWSALB,
AWSALBTargetGroup,
AWSEC2Subnet,
AWSEC2SecurityGroup,
AWSEC2InternetGateway,
AWSEC2NATGateway,
AWSEC2RouteTable,
AWSVPCEndpoint,
AWSEC2Instance,
AWSEC2ElasticIP,
)
from cklib.args import ArgumentParser
from cklib.event import (
Event,
EventType,
add_event_listener,
remove_event_listener,
)
log = cklib.logging.getLogger("cloudkeeper." + __name__)
class CleanupAWSVPCsPlugin(BasePlugin):
def __init__(self):
super().__init__()
self.name = "cleanup_aws_vpcs"
self.exit = threading.Event()
if ArgumentParser.args.cleanup_aws_vpcs:
add_event_listener(EventType.SHUTDOWN, self.shutdown)
add_event_listener(
EventType.CLEANUP_BEGIN,
self.vpc_cleanup,
blocking=True,
timeout=3600,
)
else:
self.exit.set()
self.config = {}
if ArgumentParser.args.cleanup_aws_vpcs_config:
self.config = CleanupAWSVPCsConfig(
config_file=ArgumentParser.args.cleanup_aws_vpcs_config
)
self.config.read()
def __del__(self):
remove_event_listener(EventType.CLEANUP_BEGIN, self.vpc_cleanup)
remove_event_listener(EventType.SHUTDOWN, self.shutdown)
def go(self):
self.exit.wait()
def vpc_cleanup(self, event: Event):
graph = event.data
log.info("AWS VPC cleanup called")
for node in graph.nodes:
if node.protected or not node.clean or not isinstance(node, AWSVPC):
continue
cloud = node.cloud(graph)
account = node.account(graph)
region = node.region(graph)
log_prefix = (
f"Found AWS VPC {node.dname} in cloud {cloud.name} account {account.dname} "
f"region {region.name} marked for cleanup."
)
if len(self.config) > 0:
if (
cloud.id not in self.config
or account.id not in self.config[cloud.id]
):
log.debug(
(
f"{log_prefix} Account not found in config - ignoring dependent resources."
)
)
continue
vpc_instances = [
i
for i in node.descendants(graph)
if isinstance(i, AWSEC2Instance)
and i.instance_status not in ("shutting-down", "terminated")
and not i.clean
]
if len(vpc_instances) > 0:
log_msg = "VPC contains active EC2 instances - not cleaning VPC."
log.debug(f"{log_prefix} {log_msg}")
node.log(log_msg)
node.clean = False
continue
log.debug(f"{log_prefix} Marking dependent resources for cleanup as well.")
for descendant in node.descendants(graph):
log.debug(f"Found descendant {descendant.rtdname} of VPC {node.dname}")
if isinstance(
descendant,
(
AWSVPCPeeringConnection,
AWSEC2NetworkAcl,
AWSEC2NetworkInterface,
AWSELB,
AWSALB,
AWSALBTargetGroup,
AWSEC2Subnet,
AWSEC2SecurityGroup,
AWSEC2InternetGateway,
AWSEC2NATGateway,
AWSEC2RouteTable,
AWSVPCEndpoint,
AWSEC2ElasticIP,
),
):
descendant.log(
(
f"Marking for cleanup because resource is a descendant of VPC {node.dname} "
f"which is set to be cleaned"
)
)
node.log(
f"Marking {descendant.rtdname} for cleanup because resource is a descendant"
)
descendant.clean = True
else:
if descendant.clean:
log.debug(
(
f"Descendant {descendant.rtdname} of VPC {node.dname} is not targeted but "
f"already marked for cleaning"
)
)
else:
log.error(
(
f"Descendant {descendant.rtdname} of VPC {node.dname} is not targeted and "
f"not marked for cleaning - VPC cleanup will likely fail"
)
)
node.log(
(
f"Descendant {descendant.rtdname} is not targeted and not marked for cleaning "
f"- cleanup will likely fail"
)
)
@staticmethod
def add_args(arg_parser: ArgumentParser) -> None:
arg_parser.add_argument(
"--cleanup-aws-vpcs",
help="Cleanup AWS VPCs (default: False)",
dest="cleanup_aws_vpcs",
action="store_true",
default=False,
)
arg_parser.add_argument(
"--cleanup-aws-vpcs-config",
help="Path to Cleanup AWS VPCs Plugin Config",
default=None,
dest="cleanup_aws_vpcs_config",
)
def shutdown(self, event: Event):
log.debug(
f"Received event {event.event_type} - shutting down AWS VPC Cleanup plugin"
)
self.exit.set()
class CleanupAWSVPCsConfig(dict):
def __init__(self, *args, config_file: str = None, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.config_file = config_file
def read(self) -> bool:
if not self.config_file:
raise ValueError(
"Attribute config_file is not set on CleanupAWSVPCsConfig() instance"
)
with open(self.config_file) as config_file:
config = yaml.load(config_file, Loader=yaml.FullLoader)
if self.validate(config):
self.update(config)
return True
return False
@staticmethod
def validate(config) -> bool:
if not isinstance(config, dict):
raise ValueError("Config is no dict")
for cloud_id, account_ids in config.items():
if not isinstance(cloud_id, str):
raise ValueError(f"Cloud ID {cloud_id} is no string")
if not isinstance(account_ids, list):
raise ValueError(f"Account IDs {account_ids} is no list")
for account_id in account_ids:
if not isinstance(account_id, str):
raise ValueError(f"Account ID {account_id} is no string")
return True
| true | true |
f7f708fdd1fd65fa0937410bf18ec42e90c36cff | 26,844 | py | Python | pytorch_lightning/trainer/connectors/logger_connector/result.py | FeryET/pytorch-lightning | b1f8b111b5085373599758a4e155a482259cdbf0 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/trainer/connectors/logger_connector/result.py | FeryET/pytorch-lightning | b1f8b111b5085373599758a4e155a482259cdbf0 | [
"Apache-2.0"
] | 1 | 2022-02-09T17:24:56.000Z | 2022-02-09T17:24:56.000Z | pytorch_lightning/trainer/connectors/logger_connector/result.py | FeryET/pytorch-lightning | b1f8b111b5085373599758a4e155a482259cdbf0 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Generator
from dataclasses import asdict, dataclass, replace
from functools import partial, wraps
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union
import torch
from torchmetrics import Metric
from typing_extensions import TypedDict
from pytorch_lightning.core.mixins import DeviceDtypeModuleMixin
from pytorch_lightning.utilities.apply_func import apply_to_collection, apply_to_collections, move_data_to_device
from pytorch_lightning.utilities.data import extract_batch_size
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from pytorch_lightning.utilities.metrics import metrics_to_scalars
from pytorch_lightning.utilities.rank_zero import rank_zero_warn
from pytorch_lightning.utilities.warnings import WarningCache
_IN_METRIC = Union[Metric, torch.Tensor] # Do not include scalars as they were converted to tensors
_OUT_METRIC = Union[torch.Tensor, Dict[str, torch.Tensor]]
_PBAR_METRIC = Union[float, Dict[str, float]]
_OUT_DICT = Dict[str, _OUT_METRIC]
_PBAR_DICT = Dict[str, _PBAR_METRIC]
class _METRICS(TypedDict):
callback: _OUT_DICT
log: _OUT_DICT
pbar: _PBAR_DICT
warning_cache = WarningCache()
@dataclass
class _Sync:
fn: Optional[Callable] = None
_should: bool = False
rank_zero_only: bool = False
_op: Optional[str] = None
_group: Optional[Any] = None
def __post_init__(self) -> None:
self._generate_sync_fn()
@property
def should(self) -> bool:
return self._should
@should.setter
def should(self, should: bool) -> None:
self._should = should
# `self._fn` needs to be re-generated.
self._generate_sync_fn()
@property
def op(self) -> Optional[str]:
return self._op
@op.setter
def op(self, op: Optional[str]) -> None:
self._op = op
# `self._fn` needs to be re-generated.
self._generate_sync_fn()
@property
def group(self) -> Optional[Any]:
return self._group
@group.setter
def group(self, group: Optional[Any]) -> None:
self._group = group
# `self._fn` needs to be re-generated.
self._generate_sync_fn()
def _generate_sync_fn(self) -> None:
"""Used to compute the syncing function and cache it."""
fn = self.no_op if self.fn is None or not self.should or self.rank_zero_only else self.fn
# save the function as `_fn` as the meta are being re-created and the object references need to match.
# ignore typing, bad support for `partial`: mypy/issues/1484
self._fn: Callable = partial(fn, reduce_op=self.op, group=self.group) # type: ignore [arg-type]
@property
def __call__(self) -> Any:
return self._fn
@staticmethod
def no_op(value: Any, *_: Any, **__: Any) -> Any:
return value
@dataclass
class _Metadata:
fx: str
name: str
prog_bar: bool = False
logger: bool = True
on_step: bool = False
on_epoch: bool = True
reduce_fx: Callable = torch.mean
enable_graph: bool = False
add_dataloader_idx: bool = True
dataloader_idx: Optional[int] = None
metric_attribute: Optional[str] = None
_sync: Optional[_Sync] = None
def __post_init__(self) -> None:
if not self.on_step and not self.on_epoch:
raise MisconfigurationException("`self.log(on_step=False, on_epoch=False)` is not useful.")
self._parse_reduce_fx()
def _parse_reduce_fx(self) -> None:
error = (
"Only `self.log(..., reduce_fx={min,max,mean,sum})` are currently supported."
" Please, open an issue in `https://github.com/PyTorchLightning/pytorch-lightning/issues`."
f" Found: {self.reduce_fx}"
)
if isinstance(self.reduce_fx, str):
reduce_fx = self.reduce_fx.lower()
if reduce_fx == "avg":
reduce_fx = "mean"
if reduce_fx not in ("min", "max", "mean", "sum"):
raise MisconfigurationException(error)
self.reduce_fx = getattr(torch, reduce_fx)
elif self.is_custom_reduction:
raise MisconfigurationException(error)
@property
def sync(self) -> _Sync:
assert self._sync is not None
return self._sync
@sync.setter
def sync(self, sync: _Sync) -> None:
if sync.op is None:
sync.op = self.reduce_fx.__name__
self._sync = sync
@property
def forked(self) -> bool:
return self.on_step and self.on_epoch
def forked_name(self, on_step: bool) -> str:
if self.forked:
return f'{self.name}_{"step" if on_step else "epoch"}'
return self.name
@property
def is_mean_reduction(self) -> bool:
return self.reduce_fx is torch.mean
@property
def is_sum_reduction(self) -> bool:
return self.reduce_fx in (torch.sum, sum)
@property
def is_max_reduction(self) -> bool:
return self.reduce_fx in (torch.max, max)
@property
def is_min_reduction(self) -> bool:
return self.reduce_fx in (torch.min, min)
@property
def is_custom_reduction(self) -> bool:
return not (self.is_mean_reduction or self.is_max_reduction or self.is_min_reduction or self.is_sum_reduction)
def __getstate__(self) -> dict:
# drop the `sync.fn` to avoid potential pickle errors
# need to drop `fn` first otherwise `asdict` produces a `RecursionError`
copy = replace(self, _sync=replace(self.sync, fn=None))
d = asdict(copy)
# delete the `None` value so it does not override
del d["_sync"]["fn"]
return d
def __setstate__(self, state: dict, sync_fn: Optional[Callable] = None) -> None:
d = {**state, "_sync": _Sync(**state["_sync"], fn=sync_fn)}
self.__dict__.update(d)
@classmethod
def _reconstruct(cls, state: dict, sync_fn: Optional[Callable] = None) -> "_Metadata":
meta = cls(state["fx"], state["name"])
meta.__setstate__(state, sync_fn=sync_fn)
return meta
class _ResultMetric(Metric, DeviceDtypeModuleMixin):
"""Wraps the value provided to `:meth:`~pytorch_lightning.core.lightning.LightningModule.log`"""
def __init__(self, metadata: _Metadata, is_tensor: bool) -> None:
super().__init__()
self.is_tensor = is_tensor
self.meta = metadata
self.has_reset = False
if is_tensor:
if metadata.is_max_reduction:
default = float("-inf")
elif metadata.is_min_reduction:
default = float("inf")
else:
default = 0.0
# do not set a dtype in case the default dtype was changed
self.add_state("value", torch.tensor(default), dist_reduce_fx=torch.sum)
if self.meta.is_mean_reduction:
self.cumulated_batch_size: torch.Tensor
self.add_state("cumulated_batch_size", torch.tensor(0), dist_reduce_fx=torch.sum)
# this is defined here only because upstream is missing the type annotation
self._forward_cache: Optional[Any] = None
def update(self, value: _IN_METRIC, batch_size: int) -> None: # type: ignore[override]
if self.is_tensor:
value = cast(torch.Tensor, value)
if not torch.is_floating_point(value):
dtype = torch.get_default_dtype()
warning_cache.warn(
# do not include the value to avoid cache misses
f"You called `self.log({self.meta.name!r}, ...)` in your `{self.meta.fx}` but the value needs to"
f" be floating point. Converting it to {dtype}."
)
value = value.to(dtype)
if self.meta.on_step:
self._forward_cache = self.meta.sync(value.clone()) # `clone` because `sync` is in-place
# performance: no need to accumulate on values only logged on_step
if not self.meta.on_epoch:
self.value = self._forward_cache
return
# perform accumulation with reduction
if self.meta.is_mean_reduction:
# do not use `+=` as it doesn't do type promotion
self.value = self.value + value.mean() * batch_size
self.cumulated_batch_size = self.cumulated_batch_size + batch_size
elif self.meta.is_max_reduction or self.meta.is_min_reduction:
self.value = self.meta.reduce_fx(self.value, value.mean())
elif self.meta.is_sum_reduction:
self.value = self.value + value.mean()
else:
value = cast(Metric, value)
self.value = value
self._forward_cache = value._forward_cache
def compute(self) -> torch.Tensor:
if self.is_tensor:
value = self.meta.sync(self.value)
if self.meta.is_mean_reduction:
cumulated_batch_size = self.meta.sync(self.cumulated_batch_size)
return value / cumulated_batch_size
return value
return self.value.compute()
def reset(self) -> None:
if self.is_tensor:
super().reset()
else:
self.value.reset()
self.has_reset = True
def forward(self, value: _IN_METRIC, batch_size: int) -> None:
if self.meta.enable_graph:
with torch.no_grad():
self.update(value, batch_size)
else:
# performance: skip the `torch.no_grad` context manager by calling `update` directly
self.update(value, batch_size)
def _wrap_compute(self, compute: Any) -> Any:
# Override to avoid syncing - we handle it ourselves.
@wraps(compute)
def wrapped_func(*args: Any, **kwargs: Any) -> Optional[Any]:
if not self._update_called:
rank_zero_warn(
f"The ``compute`` method of metric {self.__class__.__name__}"
" was called before the ``update`` method which may lead to errors,"
" as metric states have not yet been updated.",
)
# return cached value
if self._computed is not None:
return self._computed
self._computed = compute(*args, **kwargs)
return self._computed
return wrapped_func
def __setattr__(self, key: str, value: Any) -> None:
# performance: skip the `torch.nn.Module.__setattr__` checks
object.__setattr__(self, key, value)
def __repr__(self) -> str:
state = f"{repr(self.meta.name)}, value={self.value}"
if self.is_tensor and self.meta.is_mean_reduction:
state += f", cumulated_batch_size={self.cumulated_batch_size}"
return f"{self.__class__.__name__}({state})"
def __getstate__(self, drop_value: bool = False) -> dict:
skip = ["update", "compute", "_update_signature", "_cache"]
if not self.is_tensor and drop_value:
# Avoid serializing ResultMetrics which are passed Metrics
skip.append("value")
d = {k: v for k, v in self.__dict__.items() if k not in skip}
d["meta"] = d["meta"].__getstate__()
d["_class"] = self.__class__.__name__
d["_is_synced"] = False # don't consider the state as synced on reload
return d
def __setstate__(self, state: dict, sync_fn: Optional[Callable] = None) -> None:
d = {**state, "meta": _Metadata._reconstruct(state["meta"], sync_fn=sync_fn)}
super().__setstate__(d)
@classmethod
def _reconstruct(cls, state: dict, sync_fn: Optional[Callable] = None) -> "_ResultMetric":
# need to reconstruct twice because `meta` is used in `__init__`
meta = _Metadata._reconstruct(state["meta"])
result_metric = cls(meta, state["is_tensor"])
result_metric.__setstate__(state, sync_fn=sync_fn)
return result_metric
def to(self, *args: Any, **kwargs: Any) -> "_ResultMetric":
self.__dict__.update(
apply_to_collection(self.__dict__, (torch.Tensor, Metric), move_data_to_device, *args, **kwargs)
)
return self
class _ResultMetricCollection(dict):
"""Dict wrapper for easy access to metadata.
All of the leaf items should be instances of
:class:`~pytorch_lightning.trainer.connectors.logger_connector.result._ResultMetric`
with the same metadata.
"""
@property
def meta(self) -> _Metadata:
return next(iter(self.values())).meta
@property
def has_tensor(self) -> bool:
return any(v.is_tensor for v in self.values())
def __getstate__(self, drop_value: bool = False) -> dict:
def getstate(item: _ResultMetric) -> dict:
return item.__getstate__(drop_value=drop_value)
items = apply_to_collection(dict(self), _ResultMetric, getstate)
return {"items": items, "meta": self.meta.__getstate__(), "_class": self.__class__.__name__}
def __setstate__(self, state: dict, sync_fn: Optional[Callable] = None) -> None:
# can't use `apply_to_collection` as it does not recurse items of the same type
items = {k: _ResultMetric._reconstruct(v, sync_fn=sync_fn) for k, v in state["items"].items()}
self.update(items)
@classmethod
def _reconstruct(cls, state: dict, sync_fn: Optional[Callable] = None) -> "_ResultMetricCollection":
rmc = cls()
rmc.__setstate__(state, sync_fn=sync_fn)
return rmc
_METRIC_COLLECTION = Union[_IN_METRIC, _ResultMetricCollection]
class _ResultCollection(dict):
"""
Collection (dictionary) of :class:`~pytorch_lightning.trainer.connectors.logger_connector.result._ResultMetric` or
:class:`~pytorch_lightning.trainer.connectors.logger_connector.result._ResultMetricCollection`
Example:
# `device` needs to be provided before logging
result = _ResultCollection(training=True, torch.device("cpu"))
# you can log to a specific collection.
# arguments: fx, key, value, metadata
result.log('training_step', 'acc', torch.tensor(...), on_step=True, on_epoch=True)
result.log('validation_step', 'recall', torch.tensor(...), on_step=True, on_epoch=True)
"""
DATALOADER_SUFFIX = "/dataloader_idx_{}"
def __init__(self, training: bool, device: Optional[Union[str, torch.device]] = None) -> None:
super().__init__()
self.training = training
self.device: Optional[Union[str, torch.device]] = device
self.batch: Optional[Any] = None
self.batch_size: Optional[int] = None
self.dataloader_idx: Optional[int] = None
@property
def result_metrics(self) -> List[_ResultMetric]:
o = []
def append_fn(v: _ResultMetric) -> None:
nonlocal o
o.append(v)
apply_to_collection(list(self.values()), _ResultMetric, append_fn)
return o
def _extract_batch_size(
self, value: Union[_ResultMetric, _ResultMetricCollection], batch_size: Optional[int], meta: _Metadata
) -> int:
# check if we have extracted the batch size already
if batch_size is None:
batch_size = self.batch_size
if batch_size is not None:
return batch_size
batch_size = 1
is_tensor = value.is_tensor if isinstance(value, _ResultMetric) else value.has_tensor
if self.batch is not None and is_tensor and meta.on_epoch and meta.is_mean_reduction:
batch_size = extract_batch_size(self.batch)
self.batch_size = batch_size
return batch_size
def log(
self,
fx: str,
name: str,
value: _METRIC_COLLECTION,
prog_bar: bool = False,
logger: bool = True,
on_step: bool = False,
on_epoch: bool = True,
reduce_fx: Callable = torch.mean,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_fn: Callable = _Sync.no_op,
sync_dist_group: Optional[Any] = None,
add_dataloader_idx: bool = True,
batch_size: Optional[int] = None,
metric_attribute: Optional[str] = None,
rank_zero_only: bool = False,
) -> None:
"""See :meth:`~pytorch_lightning.core.lightning.LightningModule.log`"""
# no metrics should be logged with graphs
if not enable_graph:
value = recursive_detach(value)
# move metrics to cpu on TPU.
if isinstance(value, torch.Tensor) and value.device.type == "xla":
value = value.cpu()
# storage key
key = f"{fx}.{name}"
# add dataloader_suffix to both key and fx
if add_dataloader_idx and self.dataloader_idx is not None:
key += f".{self.dataloader_idx}"
fx += f".{self.dataloader_idx}"
meta = _Metadata(
fx=fx,
name=name,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
add_dataloader_idx=add_dataloader_idx,
dataloader_idx=self.dataloader_idx,
metric_attribute=metric_attribute,
)
meta.sync = _Sync(_should=sync_dist, fn=sync_dist_fn, _group=sync_dist_group, rank_zero_only=rank_zero_only)
# register logged value if it doesn't exist
if key not in self:
self.register_key(key, meta, value)
# check the stored metadata and the current one match
elif meta != self[key].meta:
raise MisconfigurationException(
f"You called `self.log({name}, ...)` twice in `{fx}` with different arguments. This is not allowed"
)
batch_size = self._extract_batch_size(self[key], batch_size, meta)
self.update_metrics(key, value, batch_size)
def register_key(self, key: str, meta: _Metadata, value: _METRIC_COLLECTION) -> None:
"""Create one _ResultMetric object per value.
Value can be provided as a nested collection
"""
def fn(v: _IN_METRIC) -> _ResultMetric:
metric = _ResultMetric(meta, isinstance(v, torch.Tensor))
return metric.to(self.device)
value = apply_to_collection(value, (torch.Tensor, Metric), fn)
if isinstance(value, dict):
value = _ResultMetricCollection(value)
self[key] = value
def update_metrics(self, key: str, value: _METRIC_COLLECTION, batch_size: int) -> None:
def fn(result_metric: _ResultMetric, v: torch.Tensor) -> None:
# performance: avoid calling `__call__` to avoid the checks in `torch.nn.Module._call_impl`
result_metric.forward(v.to(self.device), batch_size)
result_metric.has_reset = False
apply_to_collections(self[key], value, _ResultMetric, fn)
@staticmethod
def _get_cache(result_metric: _ResultMetric, on_step: bool) -> Optional[torch.Tensor]:
cache = None
if on_step and result_metric.meta.on_step:
cache = result_metric._forward_cache
elif not on_step and result_metric.meta.on_epoch:
if result_metric._computed is None:
# always reduce on epoch end
should = result_metric.meta.sync.should
result_metric.meta.sync.should = True
result_metric.compute()
result_metric.meta.sync.should = should
cache = result_metric._computed
if cache is not None and not result_metric.meta.enable_graph:
return cache.detach()
return cache
def valid_items(self) -> Generator:
"""This function is used to iterate over current valid metrics."""
return (
(k, v)
for k, v in self.items()
if not (isinstance(v, _ResultMetric) and v.has_reset) and self.dataloader_idx == v.meta.dataloader_idx
)
def _forked_name(self, result_metric: _ResultMetric, on_step: bool) -> Tuple[str, str]:
name = result_metric.meta.name
forked_name = result_metric.meta.forked_name(on_step)
add_dataloader_idx = result_metric.meta.add_dataloader_idx
dl_idx = result_metric.meta.dataloader_idx
if add_dataloader_idx and dl_idx is not None:
dataloader_suffix = self.DATALOADER_SUFFIX.format(dl_idx)
name += dataloader_suffix
forked_name += dataloader_suffix
return name, forked_name
def metrics(self, on_step: bool) -> _METRICS:
metrics = _METRICS(callback={}, log={}, pbar={})
for _, result_metric in self.valid_items():
# extract forward_cache or computed from the _ResultMetric. ignore when the output is None
value = apply_to_collection(result_metric, _ResultMetric, self._get_cache, on_step, include_none=False)
# convert metric collection to dict container.
if isinstance(value, _ResultMetricCollection):
value = dict(value.items())
# check if the collection is empty
has_tensor = False
def any_tensor(_: Any) -> None:
nonlocal has_tensor
has_tensor = True
apply_to_collection(value, torch.Tensor, any_tensor)
if not has_tensor:
continue
name, forked_name = self._forked_name(result_metric, on_step)
# populate logging metrics
if result_metric.meta.logger:
metrics["log"][forked_name] = value
# populate callback metrics. callback metrics don't take `_step` forked metrics
if self.training or result_metric.meta.on_epoch and not on_step:
metrics["callback"][name] = value
metrics["callback"][forked_name] = value
# populate progress_bar metrics. convert tensors to numbers
if result_metric.meta.prog_bar:
metrics["pbar"][forked_name] = metrics_to_scalars(value)
return metrics
def reset(self, metrics: Optional[bool] = None, fx: Optional[str] = None) -> None:
"""Reset the result collection.
Args:
metrics: If True, only ``torchmetrics.Metric`` results are reset,
if False, only ``torch.Tensors`` are reset,
if ``None``, both are.
fx: Function to reset
"""
def fn(item: _ResultMetric) -> None:
requested_type = metrics is None or metrics ^ item.is_tensor
same_fx = fx is None or fx == item.meta.fx
if requested_type and same_fx:
item.reset()
apply_to_collection(self, _ResultMetric, fn)
def to(self, *args: Any, **kwargs: Any) -> "_ResultCollection":
"""Move all data to the given device."""
self.update(apply_to_collection(dict(self), (torch.Tensor, Metric), move_data_to_device, *args, **kwargs))
if "device" in kwargs:
self.device = kwargs["device"]
return self
def cpu(self) -> "_ResultCollection":
"""Move all data to CPU."""
return self.to(device="cpu")
def sync(self) -> None:
for result_metric in self.result_metrics:
if result_metric.is_tensor and not result_metric._is_synced:
result_metric.sync(should_sync=not result_metric.meta.sync.rank_zero_only)
def unsync(self) -> None:
for result_metric in self.result_metrics:
if result_metric.is_tensor and result_metric._is_synced:
result_metric.unsync()
def __str__(self) -> str:
# remove empty values
self_str = str({k: v for k, v in self.items() if v})
return f"{self.__class__.__name__}({self_str})"
def __repr__(self) -> str:
return f"{{{self.training}, {repr(self.device)}, {super().__repr__()}}}"
def __getstate__(self, drop_value: bool = True) -> dict:
d = self.__dict__.copy()
# all the items should be either `_ResultMetric`s or `_ResultMetricCollection`s
items = {k: v.__getstate__(drop_value=drop_value) for k, v in self.items()}
return {**d, "items": items}
def __setstate__(
self, state: dict, map_location: Optional[Union[str, torch.device]] = None, sync_fn: Optional[Callable] = None
) -> None:
self.__dict__.update({k: v for k, v in state.items() if k != "items"})
def setstate(k: str, item: dict) -> Union[_ResultMetric, _ResultMetricCollection]:
if not isinstance(item, dict):
raise ValueError(f"Unexpected value: {item}")
cls = item["_class"]
if cls == _ResultMetric.__name__:
cls = _ResultMetric
elif cls == _ResultMetricCollection.__name__:
cls = _ResultMetricCollection
else:
raise ValueError(f"Unexpected class name: {cls}")
_sync_fn = sync_fn or (self[k].meta.sync.fn if k in self else None)
return cls._reconstruct(item, sync_fn=_sync_fn)
items = {k: setstate(k, v) for k, v in state["items"].items()}
self.update(items)
device = map_location or self.device
self.to(device)
def state_dict(self, drop_value: bool = True) -> dict:
return self.__getstate__(drop_value)
def load_state_dict(
self,
state_dict: dict,
map_location: Optional[Union[str, torch.device]] = None,
sync_fn: Optional[Callable] = None,
metrics: Optional[Dict[str, Metric]] = None,
) -> None:
self.__setstate__(state_dict, map_location=map_location, sync_fn=sync_fn)
if not metrics:
return
# iterate through result metrics and re-attached Metric references on reload.
result_metrics = self.result_metrics
for metric_attribute, metric in metrics.items():
for result_metric in result_metrics:
if result_metric.meta.metric_attribute == metric_attribute:
result_metric.value = metric
| 38.680115 | 118 | 0.631985 |
from collections.abc import Generator
from dataclasses import asdict, dataclass, replace
from functools import partial, wraps
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union
import torch
from torchmetrics import Metric
from typing_extensions import TypedDict
from pytorch_lightning.core.mixins import DeviceDtypeModuleMixin
from pytorch_lightning.utilities.apply_func import apply_to_collection, apply_to_collections, move_data_to_device
from pytorch_lightning.utilities.data import extract_batch_size
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from pytorch_lightning.utilities.metrics import metrics_to_scalars
from pytorch_lightning.utilities.rank_zero import rank_zero_warn
from pytorch_lightning.utilities.warnings import WarningCache
_IN_METRIC = Union[Metric, torch.Tensor]
_OUT_METRIC = Union[torch.Tensor, Dict[str, torch.Tensor]]
_PBAR_METRIC = Union[float, Dict[str, float]]
_OUT_DICT = Dict[str, _OUT_METRIC]
_PBAR_DICT = Dict[str, _PBAR_METRIC]
class _METRICS(TypedDict):
callback: _OUT_DICT
log: _OUT_DICT
pbar: _PBAR_DICT
warning_cache = WarningCache()
@dataclass
class _Sync:
fn: Optional[Callable] = None
_should: bool = False
rank_zero_only: bool = False
_op: Optional[str] = None
_group: Optional[Any] = None
def __post_init__(self) -> None:
self._generate_sync_fn()
@property
def should(self) -> bool:
return self._should
@should.setter
def should(self, should: bool) -> None:
self._should = should
self._generate_sync_fn()
@property
def op(self) -> Optional[str]:
return self._op
@op.setter
def op(self, op: Optional[str]) -> None:
self._op = op
self._generate_sync_fn()
@property
def group(self) -> Optional[Any]:
return self._group
@group.setter
def group(self, group: Optional[Any]) -> None:
self._group = group
self._generate_sync_fn()
def _generate_sync_fn(self) -> None:
fn = self.no_op if self.fn is None or not self.should or self.rank_zero_only else self.fn
self._fn: Callable = partial(fn, reduce_op=self.op, group=self.group)
@property
def __call__(self) -> Any:
return self._fn
@staticmethod
def no_op(value: Any, *_: Any, **__: Any) -> Any:
return value
@dataclass
class _Metadata:
fx: str
name: str
prog_bar: bool = False
logger: bool = True
on_step: bool = False
on_epoch: bool = True
reduce_fx: Callable = torch.mean
enable_graph: bool = False
add_dataloader_idx: bool = True
dataloader_idx: Optional[int] = None
metric_attribute: Optional[str] = None
_sync: Optional[_Sync] = None
def __post_init__(self) -> None:
if not self.on_step and not self.on_epoch:
raise MisconfigurationException("`self.log(on_step=False, on_epoch=False)` is not useful.")
self._parse_reduce_fx()
def _parse_reduce_fx(self) -> None:
error = (
"Only `self.log(..., reduce_fx={min,max,mean,sum})` are currently supported."
" Please, open an issue in `https://github.com/PyTorchLightning/pytorch-lightning/issues`."
f" Found: {self.reduce_fx}"
)
if isinstance(self.reduce_fx, str):
reduce_fx = self.reduce_fx.lower()
if reduce_fx == "avg":
reduce_fx = "mean"
if reduce_fx not in ("min", "max", "mean", "sum"):
raise MisconfigurationException(error)
self.reduce_fx = getattr(torch, reduce_fx)
elif self.is_custom_reduction:
raise MisconfigurationException(error)
@property
def sync(self) -> _Sync:
assert self._sync is not None
return self._sync
@sync.setter
def sync(self, sync: _Sync) -> None:
if sync.op is None:
sync.op = self.reduce_fx.__name__
self._sync = sync
@property
def forked(self) -> bool:
return self.on_step and self.on_epoch
def forked_name(self, on_step: bool) -> str:
if self.forked:
return f'{self.name}_{"step" if on_step else "epoch"}'
return self.name
@property
def is_mean_reduction(self) -> bool:
return self.reduce_fx is torch.mean
@property
def is_sum_reduction(self) -> bool:
return self.reduce_fx in (torch.sum, sum)
@property
def is_max_reduction(self) -> bool:
return self.reduce_fx in (torch.max, max)
@property
def is_min_reduction(self) -> bool:
return self.reduce_fx in (torch.min, min)
@property
def is_custom_reduction(self) -> bool:
return not (self.is_mean_reduction or self.is_max_reduction or self.is_min_reduction or self.is_sum_reduction)
def __getstate__(self) -> dict:
copy = replace(self, _sync=replace(self.sync, fn=None))
d = asdict(copy)
del d["_sync"]["fn"]
return d
def __setstate__(self, state: dict, sync_fn: Optional[Callable] = None) -> None:
d = {**state, "_sync": _Sync(**state["_sync"], fn=sync_fn)}
self.__dict__.update(d)
@classmethod
def _reconstruct(cls, state: dict, sync_fn: Optional[Callable] = None) -> "_Metadata":
meta = cls(state["fx"], state["name"])
meta.__setstate__(state, sync_fn=sync_fn)
return meta
class _ResultMetric(Metric, DeviceDtypeModuleMixin):
def __init__(self, metadata: _Metadata, is_tensor: bool) -> None:
super().__init__()
self.is_tensor = is_tensor
self.meta = metadata
self.has_reset = False
if is_tensor:
if metadata.is_max_reduction:
default = float("-inf")
elif metadata.is_min_reduction:
default = float("inf")
else:
default = 0.0
self.add_state("value", torch.tensor(default), dist_reduce_fx=torch.sum)
if self.meta.is_mean_reduction:
self.cumulated_batch_size: torch.Tensor
self.add_state("cumulated_batch_size", torch.tensor(0), dist_reduce_fx=torch.sum)
self._forward_cache: Optional[Any] = None
def update(self, value: _IN_METRIC, batch_size: int) -> None:
if self.is_tensor:
value = cast(torch.Tensor, value)
if not torch.is_floating_point(value):
dtype = torch.get_default_dtype()
warning_cache.warn(
f"You called `self.log({self.meta.name!r}, ...)` in your `{self.meta.fx}` but the value needs to"
f" be floating point. Converting it to {dtype}."
)
value = value.to(dtype)
if self.meta.on_step:
self._forward_cache = self.meta.sync(value.clone())
if not self.meta.on_epoch:
self.value = self._forward_cache
return
if self.meta.is_mean_reduction:
self.value = self.value + value.mean() * batch_size
self.cumulated_batch_size = self.cumulated_batch_size + batch_size
elif self.meta.is_max_reduction or self.meta.is_min_reduction:
self.value = self.meta.reduce_fx(self.value, value.mean())
elif self.meta.is_sum_reduction:
self.value = self.value + value.mean()
else:
value = cast(Metric, value)
self.value = value
self._forward_cache = value._forward_cache
def compute(self) -> torch.Tensor:
if self.is_tensor:
value = self.meta.sync(self.value)
if self.meta.is_mean_reduction:
cumulated_batch_size = self.meta.sync(self.cumulated_batch_size)
return value / cumulated_batch_size
return value
return self.value.compute()
def reset(self) -> None:
if self.is_tensor:
super().reset()
else:
self.value.reset()
self.has_reset = True
def forward(self, value: _IN_METRIC, batch_size: int) -> None:
if self.meta.enable_graph:
with torch.no_grad():
self.update(value, batch_size)
else:
# performance: skip the `torch.no_grad` context manager by calling `update` directly
self.update(value, batch_size)
def _wrap_compute(self, compute: Any) -> Any:
# Override to avoid syncing - we handle it ourselves.
@wraps(compute)
def wrapped_func(*args: Any, **kwargs: Any) -> Optional[Any]:
if not self._update_called:
rank_zero_warn(
f"The ``compute`` method of metric {self.__class__.__name__}"
" was called before the ``update`` method which may lead to errors,"
" as metric states have not yet been updated.",
)
# return cached value
if self._computed is not None:
return self._computed
self._computed = compute(*args, **kwargs)
return self._computed
return wrapped_func
def __setattr__(self, key: str, value: Any) -> None:
# performance: skip the `torch.nn.Module.__setattr__` checks
object.__setattr__(self, key, value)
def __repr__(self) -> str:
state = f"{repr(self.meta.name)}, value={self.value}"
if self.is_tensor and self.meta.is_mean_reduction:
state += f", cumulated_batch_size={self.cumulated_batch_size}"
return f"{self.__class__.__name__}({state})"
def __getstate__(self, drop_value: bool = False) -> dict:
skip = ["update", "compute", "_update_signature", "_cache"]
if not self.is_tensor and drop_value:
# Avoid serializing ResultMetrics which are passed Metrics
skip.append("value")
d = {k: v for k, v in self.__dict__.items() if k not in skip}
d["meta"] = d["meta"].__getstate__()
d["_class"] = self.__class__.__name__
d["_is_synced"] = False # don't consider the state as synced on reload
return d
def __setstate__(self, state: dict, sync_fn: Optional[Callable] = None) -> None:
d = {**state, "meta": _Metadata._reconstruct(state["meta"], sync_fn=sync_fn)}
super().__setstate__(d)
@classmethod
def _reconstruct(cls, state: dict, sync_fn: Optional[Callable] = None) -> "_ResultMetric":
meta = _Metadata._reconstruct(state["meta"])
result_metric = cls(meta, state["is_tensor"])
result_metric.__setstate__(state, sync_fn=sync_fn)
return result_metric
def to(self, *args: Any, **kwargs: Any) -> "_ResultMetric":
self.__dict__.update(
apply_to_collection(self.__dict__, (torch.Tensor, Metric), move_data_to_device, *args, **kwargs)
)
return self
class _ResultMetricCollection(dict):
@property
def meta(self) -> _Metadata:
return next(iter(self.values())).meta
@property
def has_tensor(self) -> bool:
return any(v.is_tensor for v in self.values())
def __getstate__(self, drop_value: bool = False) -> dict:
def getstate(item: _ResultMetric) -> dict:
return item.__getstate__(drop_value=drop_value)
items = apply_to_collection(dict(self), _ResultMetric, getstate)
return {"items": items, "meta": self.meta.__getstate__(), "_class": self.__class__.__name__}
def __setstate__(self, state: dict, sync_fn: Optional[Callable] = None) -> None:
items = {k: _ResultMetric._reconstruct(v, sync_fn=sync_fn) for k, v in state["items"].items()}
self.update(items)
@classmethod
def _reconstruct(cls, state: dict, sync_fn: Optional[Callable] = None) -> "_ResultMetricCollection":
rmc = cls()
rmc.__setstate__(state, sync_fn=sync_fn)
return rmc
_METRIC_COLLECTION = Union[_IN_METRIC, _ResultMetricCollection]
class _ResultCollection(dict):
DATALOADER_SUFFIX = "/dataloader_idx_{}"
def __init__(self, training: bool, device: Optional[Union[str, torch.device]] = None) -> None:
super().__init__()
self.training = training
self.device: Optional[Union[str, torch.device]] = device
self.batch: Optional[Any] = None
self.batch_size: Optional[int] = None
self.dataloader_idx: Optional[int] = None
@property
def result_metrics(self) -> List[_ResultMetric]:
o = []
def append_fn(v: _ResultMetric) -> None:
nonlocal o
o.append(v)
apply_to_collection(list(self.values()), _ResultMetric, append_fn)
return o
def _extract_batch_size(
self, value: Union[_ResultMetric, _ResultMetricCollection], batch_size: Optional[int], meta: _Metadata
) -> int:
# check if we have extracted the batch size already
if batch_size is None:
batch_size = self.batch_size
if batch_size is not None:
return batch_size
batch_size = 1
is_tensor = value.is_tensor if isinstance(value, _ResultMetric) else value.has_tensor
if self.batch is not None and is_tensor and meta.on_epoch and meta.is_mean_reduction:
batch_size = extract_batch_size(self.batch)
self.batch_size = batch_size
return batch_size
def log(
self,
fx: str,
name: str,
value: _METRIC_COLLECTION,
prog_bar: bool = False,
logger: bool = True,
on_step: bool = False,
on_epoch: bool = True,
reduce_fx: Callable = torch.mean,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_fn: Callable = _Sync.no_op,
sync_dist_group: Optional[Any] = None,
add_dataloader_idx: bool = True,
batch_size: Optional[int] = None,
metric_attribute: Optional[str] = None,
rank_zero_only: bool = False,
) -> None:
# no metrics should be logged with graphs
if not enable_graph:
value = recursive_detach(value)
# move metrics to cpu on TPU.
if isinstance(value, torch.Tensor) and value.device.type == "xla":
value = value.cpu()
# storage key
key = f"{fx}.{name}"
# add dataloader_suffix to both key and fx
if add_dataloader_idx and self.dataloader_idx is not None:
key += f".{self.dataloader_idx}"
fx += f".{self.dataloader_idx}"
meta = _Metadata(
fx=fx,
name=name,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
add_dataloader_idx=add_dataloader_idx,
dataloader_idx=self.dataloader_idx,
metric_attribute=metric_attribute,
)
meta.sync = _Sync(_should=sync_dist, fn=sync_dist_fn, _group=sync_dist_group, rank_zero_only=rank_zero_only)
# register logged value if it doesn't exist
if key not in self:
self.register_key(key, meta, value)
elif meta != self[key].meta:
raise MisconfigurationException(
f"You called `self.log({name}, ...)` twice in `{fx}` with different arguments. This is not allowed"
)
batch_size = self._extract_batch_size(self[key], batch_size, meta)
self.update_metrics(key, value, batch_size)
def register_key(self, key: str, meta: _Metadata, value: _METRIC_COLLECTION) -> None:
def fn(v: _IN_METRIC) -> _ResultMetric:
metric = _ResultMetric(meta, isinstance(v, torch.Tensor))
return metric.to(self.device)
value = apply_to_collection(value, (torch.Tensor, Metric), fn)
if isinstance(value, dict):
value = _ResultMetricCollection(value)
self[key] = value
def update_metrics(self, key: str, value: _METRIC_COLLECTION, batch_size: int) -> None:
def fn(result_metric: _ResultMetric, v: torch.Tensor) -> None:
result_metric.forward(v.to(self.device), batch_size)
result_metric.has_reset = False
apply_to_collections(self[key], value, _ResultMetric, fn)
@staticmethod
def _get_cache(result_metric: _ResultMetric, on_step: bool) -> Optional[torch.Tensor]:
cache = None
if on_step and result_metric.meta.on_step:
cache = result_metric._forward_cache
elif not on_step and result_metric.meta.on_epoch:
if result_metric._computed is None:
should = result_metric.meta.sync.should
result_metric.meta.sync.should = True
result_metric.compute()
result_metric.meta.sync.should = should
cache = result_metric._computed
if cache is not None and not result_metric.meta.enable_graph:
return cache.detach()
return cache
def valid_items(self) -> Generator:
return (
(k, v)
for k, v in self.items()
if not (isinstance(v, _ResultMetric) and v.has_reset) and self.dataloader_idx == v.meta.dataloader_idx
)
def _forked_name(self, result_metric: _ResultMetric, on_step: bool) -> Tuple[str, str]:
name = result_metric.meta.name
forked_name = result_metric.meta.forked_name(on_step)
add_dataloader_idx = result_metric.meta.add_dataloader_idx
dl_idx = result_metric.meta.dataloader_idx
if add_dataloader_idx and dl_idx is not None:
dataloader_suffix = self.DATALOADER_SUFFIX.format(dl_idx)
name += dataloader_suffix
forked_name += dataloader_suffix
return name, forked_name
def metrics(self, on_step: bool) -> _METRICS:
metrics = _METRICS(callback={}, log={}, pbar={})
for _, result_metric in self.valid_items():
value = apply_to_collection(result_metric, _ResultMetric, self._get_cache, on_step, include_none=False)
if isinstance(value, _ResultMetricCollection):
value = dict(value.items())
has_tensor = False
def any_tensor(_: Any) -> None:
nonlocal has_tensor
has_tensor = True
apply_to_collection(value, torch.Tensor, any_tensor)
if not has_tensor:
continue
name, forked_name = self._forked_name(result_metric, on_step)
if result_metric.meta.logger:
metrics["log"][forked_name] = value
if self.training or result_metric.meta.on_epoch and not on_step:
metrics["callback"][name] = value
metrics["callback"][forked_name] = value
# populate progress_bar metrics. convert tensors to numbers
if result_metric.meta.prog_bar:
metrics["pbar"][forked_name] = metrics_to_scalars(value)
return metrics
def reset(self, metrics: Optional[bool] = None, fx: Optional[str] = None) -> None:
def fn(item: _ResultMetric) -> None:
requested_type = metrics is None or metrics ^ item.is_tensor
same_fx = fx is None or fx == item.meta.fx
if requested_type and same_fx:
item.reset()
apply_to_collection(self, _ResultMetric, fn)
def to(self, *args: Any, **kwargs: Any) -> "_ResultCollection":
self.update(apply_to_collection(dict(self), (torch.Tensor, Metric), move_data_to_device, *args, **kwargs))
if "device" in kwargs:
self.device = kwargs["device"]
return self
def cpu(self) -> "_ResultCollection":
return self.to(device="cpu")
def sync(self) -> None:
for result_metric in self.result_metrics:
if result_metric.is_tensor and not result_metric._is_synced:
result_metric.sync(should_sync=not result_metric.meta.sync.rank_zero_only)
def unsync(self) -> None:
for result_metric in self.result_metrics:
if result_metric.is_tensor and result_metric._is_synced:
result_metric.unsync()
def __str__(self) -> str:
# remove empty values
self_str = str({k: v for k, v in self.items() if v})
return f"{self.__class__.__name__}({self_str})"
def __repr__(self) -> str:
return f"{{{self.training}, {repr(self.device)}, {super().__repr__()}}}"
def __getstate__(self, drop_value: bool = True) -> dict:
d = self.__dict__.copy()
# all the items should be either `_ResultMetric`s or `_ResultMetricCollection`s
items = {k: v.__getstate__(drop_value=drop_value) for k, v in self.items()}
return {**d, "items": items}
def __setstate__(
self, state: dict, map_location: Optional[Union[str, torch.device]] = None, sync_fn: Optional[Callable] = None
) -> None:
self.__dict__.update({k: v for k, v in state.items() if k != "items"})
def setstate(k: str, item: dict) -> Union[_ResultMetric, _ResultMetricCollection]:
if not isinstance(item, dict):
raise ValueError(f"Unexpected value: {item}")
cls = item["_class"]
if cls == _ResultMetric.__name__:
cls = _ResultMetric
elif cls == _ResultMetricCollection.__name__:
cls = _ResultMetricCollection
else:
raise ValueError(f"Unexpected class name: {cls}")
_sync_fn = sync_fn or (self[k].meta.sync.fn if k in self else None)
return cls._reconstruct(item, sync_fn=_sync_fn)
items = {k: setstate(k, v) for k, v in state["items"].items()}
self.update(items)
device = map_location or self.device
self.to(device)
def state_dict(self, drop_value: bool = True) -> dict:
return self.__getstate__(drop_value)
def load_state_dict(
self,
state_dict: dict,
map_location: Optional[Union[str, torch.device]] = None,
sync_fn: Optional[Callable] = None,
metrics: Optional[Dict[str, Metric]] = None,
) -> None:
self.__setstate__(state_dict, map_location=map_location, sync_fn=sync_fn)
if not metrics:
return
# iterate through result metrics and re-attached Metric references on reload.
result_metrics = self.result_metrics
for metric_attribute, metric in metrics.items():
for result_metric in result_metrics:
if result_metric.meta.metric_attribute == metric_attribute:
result_metric.value = metric
| true | true |
f7f70935c9b029703531a2e75bfe3d5fdc21b428 | 13,176 | py | Python | mealpy/swarm_based/SSpiderO.py | rishavpramanik/mealpy | d4a4d5810f15837764e4ee61517350fef3dc92b3 | [
"MIT"
] | null | null | null | mealpy/swarm_based/SSpiderO.py | rishavpramanik/mealpy | d4a4d5810f15837764e4ee61517350fef3dc92b3 | [
"MIT"
] | null | null | null | mealpy/swarm_based/SSpiderO.py | rishavpramanik/mealpy | d4a4d5810f15837764e4ee61517350fef3dc92b3 | [
"MIT"
] | null | null | null | # !/usr/bin/env python
# Created by "Thieu" at 12:00, 17/03/2020 ----------%
# Email: nguyenthieu2102@gmail.com %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%
import numpy as np
from copy import deepcopy
from mealpy.optimizer import Optimizer
class BaseSSpiderO(Optimizer):
"""
The original version of: Social Spider Optimization (SSpiderO)
Links:
1. https://www.hindawi.com/journals/mpe/2018/6843923/
Hyper-parameters should fine tuned in approximate range to get faster convergence toward the global optimum:
+ fp (list, tuple): (fp_min, fp_max): Female Percent, default = (0.65, 0.9)
Examples
~~~~~~~~
>>> import numpy as np
>>> from mealpy.swarm_based.SSpiderO import BaseSSpiderO
>>>
>>> def fitness_function(solution):
>>> return np.sum(solution**2)
>>>
>>> problem_dict1 = {
>>> "fit_func": fitness_function,
>>> "lb": [-10, -15, -4, -2, -8],
>>> "ub": [10, 15, 12, 8, 20],
>>> "minmax": "min",
>>> }
>>>
>>> epoch = 1000
>>> pop_size = 50
>>> fb = [0.65, 0.9]
>>> model = BaseSSpiderO(problem_dict1, epoch, pop_size, fb)
>>> best_position, best_fitness = model.solve()
>>> print(f"Solution: {best_position}, Fitness: {best_fitness}")
References
~~~~~~~~~~
[1] Luque-Chang, A., Cuevas, E., Fausto, F., Zaldivar, D. and Pérez, M., 2018. Social spider
optimization algorithm: modifications, applications, and perspectives. Mathematical
Problems in Engineering, 2018.
"""
ID_POS = 0
ID_TAR = 1
ID_WEI = 2
def __init__(self, problem, epoch=10000, pop_size=100, fp=(0.65, 0.9), **kwargs):
"""
Args:
problem (dict): The problem dictionary
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
fp (list, tuple): (fp_min, fp_max): Female Percent, default = (0.65, 0.9)
"""
super().__init__(problem, kwargs)
self.epoch = self.validator.check_int("epoch", epoch, [1, 100000])
self.pop_size = self.validator.check_int("pop_size", pop_size, [10, 10000])
fp = self.validator.check_tuple_float("fp (min, max)", fp, ((0, 1.0), (0, 1.0)))
self.fp = (min(fp), max(fp))
def create_solution(self, lb=None, ub=None):
"""
To get the position, fitness wrapper, target and obj list
+ A[self.ID_POS] --> Return: position
+ A[self.ID_TAR] --> Return: [target, [obj1, obj2, ...]]
+ A[self.ID_TAR][self.ID_FIT] --> Return: target
+ A[self.ID_TAR][self.ID_OBJ] --> Return: [obj1, obj2, ...]
Returns:
list: wrapper of solution with format [position, target, weight]
"""
position = np.random.uniform(lb, ub)
position = self.amend_position(position, lb, ub)
target = self.get_target_wrapper(position)
weight = 0.0
return [position, target, weight]
def amend_position(self, position=None, lb=None, ub=None):
"""
Depend on what kind of problem are we trying to solve, there will be an different amend_position
function to rebound the position of agent into the valid range.
Args:
position: vector position (location) of the solution.
lb: list of lower bound values
ub: list of upper bound values
Returns:
Amended position (make the position is in bound)
"""
return np.where(np.logical_and(lb <= position, position <= ub), position, np.random.uniform(lb, ub))
def initialization(self):
fp_temp = self.fp[0] + (self.fp[1] - self.fp[0]) * np.random.uniform() # Female Aleatory Percent
self.n_f = int(self.pop_size * fp_temp) # number of female
self.n_m = self.pop_size - self.n_f # number of male
# Probabilities of attraction or repulsion Proper tuning for better results
self.p_m = (self.epoch + 1 - np.array(range(1, self.epoch + 1))) / (self.epoch + 1)
self.pop_males = self.create_population(self.n_m)
self.pop_females = self.create_population(self.n_f)
pop = deepcopy(self.pop_females) + deepcopy(self.pop_males)
self.pop = self._recalculate_weights(pop)
_, self.g_best = self.get_global_best_solution(self.pop)
def _move_females(self, epoch=None):
scale_distance = np.sum(self.problem.ub - self.problem.lb)
pop = self.pop_females + self.pop_males
# Start looking for any stronger vibration
for i in range(0, self.n_f): # Move the females
## Find the position s
id_min = None
dist_min = 2 ** 16
for j in range(0, self.pop_size):
if self.pop_females[i][self.ID_WEI] < pop[j][self.ID_WEI]:
dt = np.linalg.norm(pop[j][self.ID_POS] - self.pop_females[i][self.ID_POS]) / scale_distance
if dt < dist_min and dt != 0:
dist_min = dt
id_min = j
x_s = np.zeros(self.problem.n_dims)
vibs = 0
if id_min is not None:
vibs = 2 * (pop[id_min][self.ID_WEI] * np.exp(-(np.random.uniform() * dist_min ** 2))) # Vib for the shortest
x_s = pop[id_min][self.ID_POS]
## Find the position b
dtb = np.linalg.norm(self.g_best[self.ID_POS] - self.pop_females[i][self.ID_POS]) / scale_distance
vibb = 2 * (self.g_best[self.ID_WEI] * np.exp(-(np.random.uniform() * dtb ** 2)))
## Do attraction or repulsion
beta = np.random.uniform(0, 1, self.problem.n_dims)
gamma = np.random.uniform(0, 1, self.problem.n_dims)
random = 2 * self.p_m[epoch] * (np.random.uniform(0, 1, self.problem.n_dims) - 0.5)
if np.random.uniform() >= self.p_m[epoch]: # Do an attraction
pos_new = self.pop_females[i][self.ID_POS] + vibs * (x_s - self.pop_females[i][self.ID_POS]) * beta + \
vibb * (self.g_best[self.ID_POS] - self.pop_females[i][self.ID_POS]) * gamma + random
else: # Do a repulsion
pos_new = self.pop_females[i][self.ID_POS] - vibs * (x_s - self.pop_females[i][self.ID_POS]) * beta - \
vibb * (self.g_best[self.ID_POS] - self.pop_females[i][self.ID_POS]) * gamma + random
self.pop_females[i][self.ID_POS] = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
self.pop_females = self.update_target_wrapper_population(self.pop_females)
self.nfe_epoch += self.n_f
def _move_males(self, epoch=None):
scale_distance = np.sum(self.problem.ub - self.problem.lb)
my_median = np.median([it[self.ID_WEI] for it in self.pop_males])
pop = self.pop_females + self.pop_males
all_pos = np.array([it[self.ID_POS] for it in pop])
all_wei = np.array([it[self.ID_WEI] for it in pop]).reshape((self.pop_size, 1))
total_wei = np.sum(all_wei)
if total_wei == 0:
mean = np.mean(all_pos, axis=0)
else:
mean = np.sum(all_wei * all_pos, axis=0) / total_wei
for i in range(0, self.n_m):
delta = 2 * np.random.uniform(0, 1, self.problem.n_dims) - 0.5
random = 2 * self.p_m[epoch] * (np.random.uniform(0, 1, self.problem.n_dims) - 0.5)
if self.pop_males[i][self.ID_WEI] >= my_median: # Spider above the median
# Start looking for a female with stronger vibration
id_min = None
dist_min = 99999999
for j in range(0, self.n_f):
if self.pop_females[j][self.ID_WEI] > self.pop_males[i][self.ID_WEI]:
dt = np.linalg.norm(self.pop_females[j][self.ID_POS] - self.pop_males[i][self.ID_POS]) / scale_distance
if dt < dist_min and dt != 0:
dist_min = dt
id_min = j
x_s = np.zeros(self.problem.n_dims)
vibs = 0
if id_min != None:
# Vib for the shortest
vibs = 2 * (self.pop_females[id_min][self.ID_WEI] * np.exp(-(np.random.uniform() * dist_min ** 2)))
x_s = self.pop_females[id_min][self.ID_POS]
pos_new = self.pop_males[i][self.ID_POS] + vibs * (x_s - self.pop_males[i][self.ID_POS]) * delta + random
else:
# Spider below median, go to weighted mean
pos_new = self.pop_males[i][self.ID_POS] + delta * (mean - self.pop_males[i][self.ID_POS]) + random
self.pop_males[i][self.ID_POS] = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
self.pop_males = self.update_target_wrapper_population(self.pop_males)
self.nfe_epoch += self.n_m
### Crossover
def _crossover__(self, mom=None, dad=None, id=0):
child1 = np.zeros(self.problem.n_dims)
child2 = np.zeros(self.problem.n_dims)
if id == 0: # arithmetic recombination
r = np.random.uniform(0.5, 1) # w1 = w2 when r =0.5
child1 = np.multiply(r, mom) + np.multiply((1 - r), dad)
child2 = np.multiply(r, dad) + np.multiply((1 - r), mom)
elif id == 1:
id1 = np.random.randint(1, int(self.problem.n_dims / 2))
id2 = int(id1 + self.problem.n_dims / 2)
child1[:id1] = mom[:id1]
child1[id1:id2] = dad[id1:id2]
child1[id2:] = mom[id2:]
child2[:id1] = dad[:id1]
child2[id1:id2] = mom[id1:id2]
child2[id2:] = dad[id2:]
elif id == 2:
temp = int(self.problem.n_dims / 2)
child1[:temp] = mom[:temp]
child1[temp:] = dad[temp:]
child2[:temp] = dad[:temp]
child2[temp:] = mom[temp:]
return child1, child2
def _mating(self):
# Check whether a spider is good or not (above median)
my_median = np.median([it[self.ID_WEI] for it in self.pop_males])
pop_males_new = [self.pop_males[i] for i in range(self.n_m) if self.pop_males[i][self.ID_WEI] > my_median]
# Calculate the radio
pop = self.pop_females + self.pop_males
all_pos = np.array([it[self.ID_POS] for it in pop])
rad = np.max(all_pos, axis=1) - np.min(all_pos, axis=1)
r = np.sum(rad) / (2 * self.problem.n_dims)
# Start looking if there's a good female near
list_child = []
couples = []
for i in range(0, len(pop_males_new)):
for j in range(0, self.n_f):
dist = np.linalg.norm(pop_males_new[i][self.ID_POS] - self.pop_females[j][self.ID_POS])
if dist < r:
couples.append([pop_males_new[i], self.pop_females[j]])
if couples:
n_child = len(couples)
for k in range(n_child):
child1, child2 = self._crossover__(couples[k][0][self.ID_POS], couples[k][1][self.ID_POS], 0)
pos1 = self.amend_position(child1, self.problem.lb, self.problem.ub)
pos2 = self.amend_position(child2, self.problem.lb, self.problem.ub)
target1 = self.get_target_wrapper(pos1)
target2 = self.get_target_wrapper(pos2)
list_child.append([pos1, target1, 0.0])
list_child.append([pos2, target2, 0.0])
else:
list_child = self.create_population(self.pop_size)
self.nfe_epoch += len(list_child)
return list_child
def _survive(self, pop=None, pop_child=None):
n_child = len(pop)
pop_child = self.get_sorted_strim_population(pop_child, n_child)
for i in range(0, n_child):
if self.compare_agent(pop_child[i], pop[i]):
pop[i] = deepcopy(pop_child[i])
return pop
def _recalculate_weights(self, pop=None):
fit_total, fit_best, fit_worst = self.get_special_fitness(pop)
for i in range(len(pop)):
if fit_best == fit_worst:
pop[i][self.ID_WEI] = np.random.uniform(0.2, 0.8)
else:
pop[i][self.ID_WEI] = 0.001 + (pop[i][self.ID_TAR][self.ID_FIT] - fit_worst) / (fit_best - fit_worst)
return pop
def evolve(self, epoch):
"""
The main operations (equations) of algorithm. Inherit from Optimizer class
Args:
epoch (int): The current iteration
"""
self.nfe_epoch = 0
### Movement of spiders
self._move_females(epoch)
self._move_males(epoch)
# Recalculate weights
pop = self.pop_females + self.pop_males
pop = self._recalculate_weights(pop)
# Mating Operator
pop_child = self._mating()
pop = self._survive(pop, pop_child)
self.pop = self._recalculate_weights(pop)
self.nfe_per_epoch = self.nfe_epoch
| 44.969283 | 127 | 0.571949 |
import numpy as np
from copy import deepcopy
from mealpy.optimizer import Optimizer
class BaseSSpiderO(Optimizer):
ID_POS = 0
ID_TAR = 1
ID_WEI = 2
def __init__(self, problem, epoch=10000, pop_size=100, fp=(0.65, 0.9), **kwargs):
super().__init__(problem, kwargs)
self.epoch = self.validator.check_int("epoch", epoch, [1, 100000])
self.pop_size = self.validator.check_int("pop_size", pop_size, [10, 10000])
fp = self.validator.check_tuple_float("fp (min, max)", fp, ((0, 1.0), (0, 1.0)))
self.fp = (min(fp), max(fp))
def create_solution(self, lb=None, ub=None):
position = np.random.uniform(lb, ub)
position = self.amend_position(position, lb, ub)
target = self.get_target_wrapper(position)
weight = 0.0
return [position, target, weight]
def amend_position(self, position=None, lb=None, ub=None):
return np.where(np.logical_and(lb <= position, position <= ub), position, np.random.uniform(lb, ub))
def initialization(self):
fp_temp = self.fp[0] + (self.fp[1] - self.fp[0]) * np.random.uniform()
self.n_f = int(self.pop_size * fp_temp)
self.n_m = self.pop_size - self.n_f
self.p_m = (self.epoch + 1 - np.array(range(1, self.epoch + 1))) / (self.epoch + 1)
self.pop_males = self.create_population(self.n_m)
self.pop_females = self.create_population(self.n_f)
pop = deepcopy(self.pop_females) + deepcopy(self.pop_males)
self.pop = self._recalculate_weights(pop)
_, self.g_best = self.get_global_best_solution(self.pop)
def _move_females(self, epoch=None):
scale_distance = np.sum(self.problem.ub - self.problem.lb)
pop = self.pop_females + self.pop_males
for i in range(0, self.n_f):
None
dist_min = 2 ** 16
for j in range(0, self.pop_size):
if self.pop_females[i][self.ID_WEI] < pop[j][self.ID_WEI]:
dt = np.linalg.norm(pop[j][self.ID_POS] - self.pop_females[i][self.ID_POS]) / scale_distance
if dt < dist_min and dt != 0:
dist_min = dt
id_min = j
x_s = np.zeros(self.problem.n_dims)
vibs = 0
if id_min is not None:
vibs = 2 * (pop[id_min][self.ID_WEI] * np.exp(-(np.random.uniform() * dist_min ** 2)))
x_s = pop[id_min][self.ID_POS]
.linalg.norm(self.g_best[self.ID_POS] - self.pop_females[i][self.ID_POS]) / scale_distance
vibb = 2 * (self.g_best[self.ID_WEI] * np.exp(-(np.random.uniform() * dtb ** 2)))
m.uniform(0, 1, self.problem.n_dims)
gamma = np.random.uniform(0, 1, self.problem.n_dims)
random = 2 * self.p_m[epoch] * (np.random.uniform(0, 1, self.problem.n_dims) - 0.5)
if np.random.uniform() >= self.p_m[epoch]:
pos_new = self.pop_females[i][self.ID_POS] + vibs * (x_s - self.pop_females[i][self.ID_POS]) * beta + \
vibb * (self.g_best[self.ID_POS] - self.pop_females[i][self.ID_POS]) * gamma + random
else:
pos_new = self.pop_females[i][self.ID_POS] - vibs * (x_s - self.pop_females[i][self.ID_POS]) * beta - \
vibb * (self.g_best[self.ID_POS] - self.pop_females[i][self.ID_POS]) * gamma + random
self.pop_females[i][self.ID_POS] = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
self.pop_females = self.update_target_wrapper_population(self.pop_females)
self.nfe_epoch += self.n_f
def _move_males(self, epoch=None):
scale_distance = np.sum(self.problem.ub - self.problem.lb)
my_median = np.median([it[self.ID_WEI] for it in self.pop_males])
pop = self.pop_females + self.pop_males
all_pos = np.array([it[self.ID_POS] for it in pop])
all_wei = np.array([it[self.ID_WEI] for it in pop]).reshape((self.pop_size, 1))
total_wei = np.sum(all_wei)
if total_wei == 0:
mean = np.mean(all_pos, axis=0)
else:
mean = np.sum(all_wei * all_pos, axis=0) / total_wei
for i in range(0, self.n_m):
delta = 2 * np.random.uniform(0, 1, self.problem.n_dims) - 0.5
random = 2 * self.p_m[epoch] * (np.random.uniform(0, 1, self.problem.n_dims) - 0.5)
if self.pop_males[i][self.ID_WEI] >= my_median:
id_min = None
dist_min = 99999999
for j in range(0, self.n_f):
if self.pop_females[j][self.ID_WEI] > self.pop_males[i][self.ID_WEI]:
dt = np.linalg.norm(self.pop_females[j][self.ID_POS] - self.pop_males[i][self.ID_POS]) / scale_distance
if dt < dist_min and dt != 0:
dist_min = dt
id_min = j
x_s = np.zeros(self.problem.n_dims)
vibs = 0
if id_min != None:
vibs = 2 * (self.pop_females[id_min][self.ID_WEI] * np.exp(-(np.random.uniform() * dist_min ** 2)))
x_s = self.pop_females[id_min][self.ID_POS]
pos_new = self.pop_males[i][self.ID_POS] + vibs * (x_s - self.pop_males[i][self.ID_POS]) * delta + random
else:
pos_new = self.pop_males[i][self.ID_POS] + delta * (mean - self.pop_males[i][self.ID_POS]) + random
self.pop_males[i][self.ID_POS] = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
self.pop_males = self.update_target_wrapper_population(self.pop_males)
self.nfe_epoch += self.n_m
elf, mom=None, dad=None, id=0):
child1 = np.zeros(self.problem.n_dims)
child2 = np.zeros(self.problem.n_dims)
if id == 0:
r = np.random.uniform(0.5, 1)
child1 = np.multiply(r, mom) + np.multiply((1 - r), dad)
child2 = np.multiply(r, dad) + np.multiply((1 - r), mom)
elif id == 1:
id1 = np.random.randint(1, int(self.problem.n_dims / 2))
id2 = int(id1 + self.problem.n_dims / 2)
child1[:id1] = mom[:id1]
child1[id1:id2] = dad[id1:id2]
child1[id2:] = mom[id2:]
child2[:id1] = dad[:id1]
child2[id1:id2] = mom[id1:id2]
child2[id2:] = dad[id2:]
elif id == 2:
temp = int(self.problem.n_dims / 2)
child1[:temp] = mom[:temp]
child1[temp:] = dad[temp:]
child2[:temp] = dad[:temp]
child2[temp:] = mom[temp:]
return child1, child2
def _mating(self):
my_median = np.median([it[self.ID_WEI] for it in self.pop_males])
pop_males_new = [self.pop_males[i] for i in range(self.n_m) if self.pop_males[i][self.ID_WEI] > my_median]
pop = self.pop_females + self.pop_males
all_pos = np.array([it[self.ID_POS] for it in pop])
rad = np.max(all_pos, axis=1) - np.min(all_pos, axis=1)
r = np.sum(rad) / (2 * self.problem.n_dims)
list_child = []
couples = []
for i in range(0, len(pop_males_new)):
for j in range(0, self.n_f):
dist = np.linalg.norm(pop_males_new[i][self.ID_POS] - self.pop_females[j][self.ID_POS])
if dist < r:
couples.append([pop_males_new[i], self.pop_females[j]])
if couples:
n_child = len(couples)
for k in range(n_child):
child1, child2 = self._crossover__(couples[k][0][self.ID_POS], couples[k][1][self.ID_POS], 0)
pos1 = self.amend_position(child1, self.problem.lb, self.problem.ub)
pos2 = self.amend_position(child2, self.problem.lb, self.problem.ub)
target1 = self.get_target_wrapper(pos1)
target2 = self.get_target_wrapper(pos2)
list_child.append([pos1, target1, 0.0])
list_child.append([pos2, target2, 0.0])
else:
list_child = self.create_population(self.pop_size)
self.nfe_epoch += len(list_child)
return list_child
def _survive(self, pop=None, pop_child=None):
n_child = len(pop)
pop_child = self.get_sorted_strim_population(pop_child, n_child)
for i in range(0, n_child):
if self.compare_agent(pop_child[i], pop[i]):
pop[i] = deepcopy(pop_child[i])
return pop
def _recalculate_weights(self, pop=None):
fit_total, fit_best, fit_worst = self.get_special_fitness(pop)
for i in range(len(pop)):
if fit_best == fit_worst:
pop[i][self.ID_WEI] = np.random.uniform(0.2, 0.8)
else:
pop[i][self.ID_WEI] = 0.001 + (pop[i][self.ID_TAR][self.ID_FIT] - fit_worst) / (fit_best - fit_worst)
return pop
def evolve(self, epoch):
self.nfe_epoch = 0
### Movement of spiders
self._move_females(epoch)
self._move_males(epoch)
# Recalculate weights
pop = self.pop_females + self.pop_males
pop = self._recalculate_weights(pop)
# Mating Operator
pop_child = self._mating()
pop = self._survive(pop, pop_child)
self.pop = self._recalculate_weights(pop)
self.nfe_per_epoch = self.nfe_epoch
| true | true |
f7f70a5ecfd42971596974d39c584f7e3a1ad0e9 | 196 | py | Python | syslog_parse/message.py | BingerYang/syslog_parse | c5d88c6792d67c4a5cf92e0344475a7f84f59447 | [
"MIT"
] | 2 | 2019-08-25T13:37:35.000Z | 2020-03-19T13:58:34.000Z | syslog_parse/message.py | BingerYang/syslog_parse | c5d88c6792d67c4a5cf92e0344475a7f84f59447 | [
"MIT"
] | 1 | 2020-10-24T04:50:08.000Z | 2020-10-24T04:50:08.000Z | syslog_parse/message.py | BingerYang/syslog_parse | c5d88c6792d67c4a5cf92e0344475a7f84f59447 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from collections import namedtuple
Message = namedtuple('Message',
['facility', 'severity', 'timestamp', 'hostname', 'module', 'digest', 'content'])
| 28 | 102 | 0.596939 |
from collections import namedtuple
Message = namedtuple('Message',
['facility', 'severity', 'timestamp', 'hostname', 'module', 'digest', 'content'])
| true | true |
f7f70a6c061b0a1b0fd0b4a0972ff9ce651c67fd | 518 | py | Python | mysite/storehouse/migrations/0007_user_create_date.py | othmankurdi/storehouse | c702abac6ad7bceef59913485ae7ead4f0f884d7 | [
"MIT"
] | null | null | null | mysite/storehouse/migrations/0007_user_create_date.py | othmankurdi/storehouse | c702abac6ad7bceef59913485ae7ead4f0f884d7 | [
"MIT"
] | 3 | 2021-11-28T10:18:00.000Z | 2021-11-28T10:39:55.000Z | mysite/storehouse/migrations/0007_user_create_date.py | othmankurdi/storehouse | c702abac6ad7bceef59913485ae7ead4f0f884d7 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-11-27 20:17
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('storehouse', '0006_rename_update_time_product_update_date'),
]
operations = [
migrations.AddField(
model_name='user',
name='create_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| 24.666667 | 93 | 0.65251 |
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('storehouse', '0006_rename_update_time_product_update_date'),
]
operations = [
migrations.AddField(
model_name='user',
name='create_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| true | true |
f7f70ae3e3cc30f82f990dc17bb654eea7024c34 | 8,868 | py | Python | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/module_utils/network/exos/exos.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/module_utils/network/exos/exos.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/module_utils/network/exos/exos.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.connection import Connection, ConnectionError
_DEVICE_CONNECTION = None
class Cli:
def __init__(self, module):
self._module = module
self._device_configs = {}
self._connection = None
def get_capabilities(self):
"""Returns platform info of the remove device
"""
connection = self._get_connection()
return json.loads(connection.get_capabilities())
def _get_connection(self):
if not self._connection:
self._connection = Connection(self._module._socket_path)
return self._connection
def get_config(self, flags=None):
"""Retrieves the current config from the device or cache
"""
flags = [] if flags is None else flags
if self._device_configs == {}:
connection = self._get_connection()
try:
out = connection.get_config(flags=flags)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
self._device_configs = to_text(out, errors='surrogate_then_replace').strip()
return self._device_configs
def run_commands(self, commands, check_rc=True):
"""Runs list of commands on remote device and returns results
"""
connection = self._get_connection()
try:
response = connection.run_commands(commands=commands, check_rc=check_rc)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return response
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
conn = self._get_connection()
try:
diff = conn.get_diff(candidate=candidate, running=running, diff_match=diff_match,
diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return diff
class HttpApi:
def __init__(self, module):
self._module = module
self._device_configs = {}
self._connection_obj = None
def get_capabilities(self):
"""Returns platform info of the remove device
"""
try:
capabilities = self._connection.get_capabilities()
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return json.loads(capabilities)
@property
def _connection(self):
if not self._connection_obj:
self._connection_obj = Connection(self._module._socket_path)
return self._connection_obj
def get_config(self, flags=None):
"""Retrieves the current config from the device or cache
"""
flags = [] if flags is None else flags
if self._device_configs == {}:
try:
out = self._connection.get_config(flags=flags)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
self._device_configs = to_text(out, errors='surrogate_then_replace').strip()
return self._device_configs
def run_commands(self, commands, check_rc=True):
"""Runs list of commands on remote device and returns results
"""
try:
response = self._connection.run_commands(commands=commands, check_rc=check_rc)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return response
def send_requests(self, requests):
"""Send a list of http requests to remote device and return results
"""
if requests is None:
raise ValueError("'requests' value is required")
responses = list()
for req in to_list(requests):
try:
response = self._connection.send_request(**req)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
responses.append(response)
return responses
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
try:
diff = self._connection.get_diff(candidate=candidate, running=running, diff_match=diff_match,
diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return diff
def get_capabilities(module):
conn = get_connection(module)
return conn.get_capabilities()
def get_connection(module):
global _DEVICE_CONNECTION
if not _DEVICE_CONNECTION:
connection_proxy = Connection(module._socket_path)
cap = json.loads(connection_proxy.get_capabilities())
if cap['network_api'] == 'cliconf':
conn = Cli(module)
elif cap['network_api'] == 'exosapi':
conn = HttpApi(module)
else:
module.fail_json(msg='Invalid connection type %s' % cap['network_api'])
_DEVICE_CONNECTION = conn
return _DEVICE_CONNECTION
def get_config(module, flags=None):
flags = None if flags is None else flags
conn = get_connection(module)
return conn.get_config(flags)
def load_config(module, commands):
conn = get_connection(module)
return conn.run_commands(to_command(module, commands))
def run_commands(module, commands, check_rc=True):
conn = get_connection(module)
return conn.run_commands(to_command(module, commands), check_rc=check_rc)
def to_command(module, commands):
transform = ComplexList(dict(
command=dict(key=True),
output=dict(default='text'),
prompt=dict(type='list'),
answer=dict(type='list'),
sendonly=dict(type='bool', default=False),
check_all=dict(type='bool', default=False),
), module)
return transform(to_list(commands))
def send_requests(module, requests):
conn = get_connection(module)
return conn.send_requests(to_request(module, requests))
def to_request(module, requests):
transform = ComplexList(dict(
path=dict(key=True),
method=dict(),
data=dict(type='dict'),
), module)
return transform(to_list(requests))
def get_diff(module, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
conn = get_connection(module)
return conn.get_diff(candidate=candidate, running=running, diff_match=diff_match, diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
| 40.309091 | 160 | 0.689671 |
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.connection import Connection, ConnectionError
_DEVICE_CONNECTION = None
class Cli:
def __init__(self, module):
self._module = module
self._device_configs = {}
self._connection = None
def get_capabilities(self):
connection = self._get_connection()
return json.loads(connection.get_capabilities())
def _get_connection(self):
if not self._connection:
self._connection = Connection(self._module._socket_path)
return self._connection
def get_config(self, flags=None):
flags = [] if flags is None else flags
if self._device_configs == {}:
connection = self._get_connection()
try:
out = connection.get_config(flags=flags)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
self._device_configs = to_text(out, errors='surrogate_then_replace').strip()
return self._device_configs
def run_commands(self, commands, check_rc=True):
connection = self._get_connection()
try:
response = connection.run_commands(commands=commands, check_rc=check_rc)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return response
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
conn = self._get_connection()
try:
diff = conn.get_diff(candidate=candidate, running=running, diff_match=diff_match,
diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return diff
class HttpApi:
def __init__(self, module):
self._module = module
self._device_configs = {}
self._connection_obj = None
def get_capabilities(self):
try:
capabilities = self._connection.get_capabilities()
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return json.loads(capabilities)
@property
def _connection(self):
if not self._connection_obj:
self._connection_obj = Connection(self._module._socket_path)
return self._connection_obj
def get_config(self, flags=None):
flags = [] if flags is None else flags
if self._device_configs == {}:
try:
out = self._connection.get_config(flags=flags)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
self._device_configs = to_text(out, errors='surrogate_then_replace').strip()
return self._device_configs
def run_commands(self, commands, check_rc=True):
try:
response = self._connection.run_commands(commands=commands, check_rc=check_rc)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return response
def send_requests(self, requests):
if requests is None:
raise ValueError("'requests' value is required")
responses = list()
for req in to_list(requests):
try:
response = self._connection.send_request(**req)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
responses.append(response)
return responses
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
try:
diff = self._connection.get_diff(candidate=candidate, running=running, diff_match=diff_match,
diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return diff
def get_capabilities(module):
conn = get_connection(module)
return conn.get_capabilities()
def get_connection(module):
global _DEVICE_CONNECTION
if not _DEVICE_CONNECTION:
connection_proxy = Connection(module._socket_path)
cap = json.loads(connection_proxy.get_capabilities())
if cap['network_api'] == 'cliconf':
conn = Cli(module)
elif cap['network_api'] == 'exosapi':
conn = HttpApi(module)
else:
module.fail_json(msg='Invalid connection type %s' % cap['network_api'])
_DEVICE_CONNECTION = conn
return _DEVICE_CONNECTION
def get_config(module, flags=None):
flags = None if flags is None else flags
conn = get_connection(module)
return conn.get_config(flags)
def load_config(module, commands):
conn = get_connection(module)
return conn.run_commands(to_command(module, commands))
def run_commands(module, commands, check_rc=True):
conn = get_connection(module)
return conn.run_commands(to_command(module, commands), check_rc=check_rc)
def to_command(module, commands):
transform = ComplexList(dict(
command=dict(key=True),
output=dict(default='text'),
prompt=dict(type='list'),
answer=dict(type='list'),
sendonly=dict(type='bool', default=False),
check_all=dict(type='bool', default=False),
), module)
return transform(to_list(commands))
def send_requests(module, requests):
conn = get_connection(module)
return conn.send_requests(to_request(module, requests))
def to_request(module, requests):
transform = ComplexList(dict(
path=dict(key=True),
method=dict(),
data=dict(type='dict'),
), module)
return transform(to_list(requests))
def get_diff(module, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
conn = get_connection(module)
return conn.get_diff(candidate=candidate, running=running, diff_match=diff_match, diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
| true | true |
f7f70b7e1e0c6f557a53a74eb09efafeb4968d24 | 9,438 | py | Python | main.py | ArkAngeL43/DBROWSER-V2 | 2b753d919314d5c3bab8eb8d04257fd352da1d44 | [
"MIT"
] | 6 | 2021-07-30T17:42:32.000Z | 2021-09-28T17:48:38.000Z | main.py | ThomTiber/DBROWSER-V2 | 2b753d919314d5c3bab8eb8d04257fd352da1d44 | [
"MIT"
] | null | null | null | main.py | ThomTiber/DBROWSER-V2 | 2b753d919314d5c3bab8eb8d04257fd352da1d44 | [
"MIT"
] | 1 | 2021-10-04T21:46:41.000Z | 2021-10-04T21:46:41.000Z | import os
import sys
import time
import colorama
import pyfiglet
from colorama import Fore
import json
import subprocess
os.system(' clear ')
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
curdir = os.getcwd()
def load_animation():
load_str = "Loading DBROWSER."
ls_len = len(load_str)
time.sleep(1)
animation = "|/-\\|/-\|/-/"
anicount = 0
counttime = 0
i = 0
while (counttime != 30):
time.sleep(0.075)
load_str_list = list(load_str)
x = ord(load_str_list[i])
y = 0
if x != 32 and x != 46:
if x>90:
y = x-32
else:
y = x + 32
load_str_list[i]= chr(y)
res =''
for j in range(ls_len):
res = res + load_str_list[j]
sys.stdout.write("\r"+res + animation[anicount])
sys.stdout.flush()
load_str = res
anicount = (anicount + 1)% 4
i =(i + 1)% ls_len
counttime = counttime + 1
else:
os.system("clear")
# Driver program
if __name__ == '__main__':
load_animation()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
curdir = os.getcwd()
def screen_clear():
if name == 'nt':
_ = system('cls')
def CS(X): # CS = clear sleep
time.sleep(X)
os.system("clear")
print(Fore.RED+"")
banner = pyfiglet.figlet_format("DBROW", font = "isometric1" )
CS(2)
print(banner)
print(Fore.RED+" WelC0me to DBROW the hackers browser ")
print(" V2.0")
print(" [1] Just browse the net ")
print(" [2] go to my index for my website ")
print(" [3] just browse google ")
print(" [4] view supported websites ")
print(" [5] see whats new for vserions 2.0 ")
print("--------------------------------------------------------------------------------------------")
# run.py = index
# rung = run google
# run1 = run duckduckgo
N = str(input(" Options ===> "))
####### BROWSE DUCKDUCKGO ###########
if '5' == N:
CS(2)
print(banner)
print("-"*40)
time.sleep(1)
print(" fixxed web bugs and index bugs ")
time.sleep(1)
print(" added netsniff-ng as a packte monitor ")
time.sleep(1)
print(" added bash scripts for setup ")
time.sleep(1)
print(" added automation ")
time.sleep(1)
print(" added json files for easier loads and time ")
time.sleep(1)
print(" added newer websites that are supported for the browser itself ")
time.sleep(1)
print(" updated CSS ")
time.sleep(1)
print(" added bash script that checks for required packages ")
time.sleep(1)
print(" added new terminal for gnome where it opens a new term for netsniff ")
time.sleep(1)
print(" added termcolor ")
time.sleep(1)
print(" added more proxies into the browser ")
time.sleep(1)
print(" added a few more lines and took away input for if proxychains is installed ")
time.sleep(1)
print(" [!] stay tunned for further updates on the browser [!] ")
restart_program()
if '4' == N:
F = open('links.json','r+',encoding='utf-8') # open encoding
data = json.load(F) #load the file
for x in data['prints']:
print(x) # print value x in this case the fi;e
time.sleep(0.1)
restart_program()
if '1' == N:
time.sleep(1)
print(" [!] running Dark browser [!] ")
time.sleep(3)
Yn = str(input(" would you like to run proxys along side Netsniff-ng Y/n? "))
time.sleep(1)
if 'n' in Yn:
time.sleep(1)
print(" alright then running browser ")
CS(2)
print(banner)
os.system(' sudo python3 run1.py ')
print(" [!] Stopping tor service and breaking connections [!] ")
os.system(' sudo service tor stop && clear ')
print(" would you like to view the cap file from netsniff? ")
V = str(input(" Y/n: ==> "))
if 'y' in V:
time.sleep(1)
os.system(' sudo wireshark pack.pcap ')
time.sleep(1)
print(" have a nice one :D [!] ")
sys.exit()
elif 'Y' in V:
time.sleep(1)
os.system(' clear' )
os.system(' sudo wireshark pack.pcap ')
print(" Have a ncie one ")
sys.exit()
if 'n' in V:
CS(2)
print(" have a nice one :D [!] ")
sys.exit()
elif 'N' in V:
CS(2)
print(" Have a ncie one ")
sys.exit()
if 'Y' == Yn:
time.sleep(1)
print(" [=] alright then running browser with proxychains and tor service ")
CS(2)
print(banner)
os.system(" chmod +x ./newterm.sh && ./newterm.sh ")
os.system(' sudo service tor start && proxychains python3 run1.py')
print(" [!] Stopping tor service and breaking connections [!] ")
os.system(' sudo service tor stop && clear ')
print(" would you like to view the cap file from netsniff? ")
V = str(input(" Y/n: ==> "))
if 'y' in V:
time.sleep(1)
os.system(' sudo wireshark pack.pcap ')
time.sleep(1)
print(" have a nice one :D [!] ")
sys.exit()
elif 'Y' in V:
time.sleep(1)
os.system(' clear' )
os.system(' sudo wireshark pack.pcap ')
print(" Have a ncie one ")
sys.exit()
if 'n' in V:
CS(2)
print(" have a nice one :D [!] ")
sys.exit()
elif 'N' in V:
CS(2)
print(" Have a ncie one ")
sys.exit()
##############################################
###########BROWSE GOOGLE
elif '3' == N:
time.sleep(1)
Yn = str(input(" Would you like to use proxies Y/n? "))
if 'Y' in Yn:
CS(2)
time.sleep(1)
print(banner)
print(" [=] alright then running browser [=] ")
os.system(" chmod +x ./newterm.sh && ./newterm.sh ")
os.system(' sudo service tor start && proxychains python3 rung.py ')
print(" [!] Stopping tor service and breaking connections [!] ")
os.system(' sudo service tor stop && clear ')
CS(2)
print(" would you like to view the cap file from netsniff? ")
V = str(input(" Y/n: ==> "))
if 'y' in V:
time.sleep(1)
os.system(' sudo wireshark pack.pcap ')
time.sleep(1)
print(" have a nice one :D [!] ")
sys.exit()
elif 'Y' in V:
time.sleep(1)
os.system(' clear' )
os.system(' sudo wireshark pack.pcap ')
print(" Have a ncie one ")
sys.exit()
if 'n' in V:
CS(2)
print(" have a nice one :D [!] ")
sys.exit()
elif 'N' in V:
CS(2)
print(" Have a ncie one ")
sys.exit()
if 'n' in Yn:
CS(2)
time.sleep(1)
print(banner)
os.system(' python3 rung.py ')
os.system(' clear ')
print(" [!] Stopping tor service and breaking connections [!] ")
os.system(' sudo service tor stop && clear ')
sys.exit()
###################BROWSE MTY INDEX################
elif '2' == N:
time.sleep(1)
Yn = str(input(" Would you like to use proxies Y/n? "))
print(" [!] running Dark browser [!] ")
if 'Y' in Yn:
CS(2)
print(banner)
print(" [=] alright then running script [=] ")
time.sleep(1)
os.system(" chmod +x ./newterm.sh && ./newterm.sh ")
os.system(' sudo service tor start && proxychains python3 run.py ')
print(" [!] Stopping tor service and breaking connections [!] ")
os.system(' sudo service tor stop && clear ')
CS(2)
print(" would you like to view the cap file from netsniff? ")
V = str(input(" You: ==> "))
if 'y' in V:
time.sleep(1)
os.system(' sudo wireshark pack.pcap ')
time.sleep(1)
print(" have a nice one :D [!] ")
sys.exit()
elif 'Y' in V:
time.sleep(1)
os.system(' sudo wireshark pack.pcap ')
time.sleep(1)
print(" Thanks for stopping by :D [+] ")
sys.exit()
elif 'n' in V:
time.sleep(1)
print(" have a nice one :D [+] ")
sys.exit()
if 'n' == Yn:
CS(2)
print(" [=] alright then running script [=] ")
print(banner)
time.sleep(1)
os.system(' python3 run.py ')
print(" [!] Stopping tor service and breaking connections [!] ")
else:
print(" [!] that doesnt seem to be a command ")
restart_program()
| 28.862385 | 101 | 0.475207 | import os
import sys
import time
import colorama
import pyfiglet
from colorama import Fore
import json
import subprocess
os.system(' clear ')
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
curdir = os.getcwd()
def load_animation():
load_str = "Loading DBROWSER."
ls_len = len(load_str)
time.sleep(1)
animation = "|/-\\|/-\|/-/"
anicount = 0
counttime = 0
i = 0
while (counttime != 30):
time.sleep(0.075)
load_str_list = list(load_str)
x = ord(load_str_list[i])
y = 0
if x != 32 and x != 46:
if x>90:
y = x-32
else:
y = x + 32
load_str_list[i]= chr(y)
res =''
for j in range(ls_len):
res = res + load_str_list[j]
sys.stdout.write("\r"+res + animation[anicount])
sys.stdout.flush()
load_str = res
anicount = (anicount + 1)% 4
i =(i + 1)% ls_len
counttime = counttime + 1
else:
os.system("clear")
if __name__ == '__main__':
load_animation()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
curdir = os.getcwd()
def screen_clear():
if name == 'nt':
_ = system('cls')
def CS(X):
time.sleep(X)
os.system("clear")
print(Fore.RED+"")
banner = pyfiglet.figlet_format("DBROW", font = "isometric1" )
CS(2)
print(banner)
print(Fore.RED+" WelC0me to DBROW the hackers browser ")
print(" V2.0")
print(" [1] Just browse the net ")
print(" [2] go to my index for my website ")
print(" [3] just browse google ")
print(" [4] view supported websites ")
print(" [5] see whats new for vserions 2.0 ")
print("--------------------------------------------------------------------------------------------")
N = str(input(" Options ===> "))
.sleep(1)
print(" added automation ")
time.sleep(1)
print(" added json files for easier loads and time ")
time.sleep(1)
print(" added newer websites that are supported for the browser itself ")
time.sleep(1)
print(" updated CSS ")
time.sleep(1)
print(" added bash script that checks for required packages ")
time.sleep(1)
print(" added new terminal for gnome where it opens a new term for netsniff ")
time.sleep(1)
print(" added termcolor ")
time.sleep(1)
print(" added more proxies into the browser ")
time.sleep(1)
print(" added a few more lines and took away input for if proxychains is installed ")
time.sleep(1)
print(" [!] stay tunned for further updates on the browser [!] ")
restart_program()
if '4' == N:
F = open('links.json','r+',encoding='utf-8')
data = json.load(F)
for x in data['prints']:
print(x)
time.sleep(0.1)
restart_program()
if '1' == N:
time.sleep(1)
print(" [!] running Dark browser [!] ")
time.sleep(3)
Yn = str(input(" would you like to run proxys along side Netsniff-ng Y/n? "))
time.sleep(1)
if 'n' in Yn:
time.sleep(1)
print(" alright then running browser ")
CS(2)
print(banner)
os.system(' sudo python3 run1.py ')
print(" [!] Stopping tor service and breaking connections [!] ")
os.system(' sudo service tor stop && clear ')
print(" would you like to view the cap file from netsniff? ")
V = str(input(" Y/n: ==> "))
if 'y' in V:
time.sleep(1)
os.system(' sudo wireshark pack.pcap ')
time.sleep(1)
print(" have a nice one :D [!] ")
sys.exit()
elif 'Y' in V:
time.sleep(1)
os.system(' clear' )
os.system(' sudo wireshark pack.pcap ')
print(" Have a ncie one ")
sys.exit()
if 'n' in V:
CS(2)
print(" have a nice one :D [!] ")
sys.exit()
elif 'N' in V:
CS(2)
print(" Have a ncie one ")
sys.exit()
if 'Y' == Yn:
time.sleep(1)
print(" [=] alright then running browser with proxychains and tor service ")
CS(2)
print(banner)
os.system(" chmod +x ./newterm.sh && ./newterm.sh ")
os.system(' sudo service tor start && proxychains python3 run1.py')
print(" [!] Stopping tor service and breaking connections [!] ")
os.system(' sudo service tor stop && clear ')
print(" would you like to view the cap file from netsniff? ")
V = str(input(" Y/n: ==> "))
if 'y' in V:
time.sleep(1)
os.system(' sudo wireshark pack.pcap ')
time.sleep(1)
print(" have a nice one :D [!] ")
sys.exit()
elif 'Y' in V:
time.sleep(1)
os.system(' clear' )
os.system(' sudo wireshark pack.pcap ')
print(" Have a ncie one ")
sys.exit()
if 'n' in V:
CS(2)
print(" have a nice one :D [!] ")
sys.exit()
elif 'N' in V:
CS(2)
print(" Have a ncie one ")
sys.exit()
elif 'N' in V:
CS(2)
print(" Have a ncie one ")
sys.exit()
if 'n' in Yn:
CS(2)
time.sleep(1)
print(banner)
os.system(' python3 rung.py ')
os.system(' clear ')
print(" [!] Stopping tor service and breaking connections [!] ")
os.system(' sudo service tor stop && clear ')
sys.exit()
elif 'Y' in V:
time.sleep(1)
os.system(' sudo wireshark pack.pcap ')
time.sleep(1)
print(" Thanks for stopping by :D [+] ")
sys.exit()
elif 'n' in V:
time.sleep(1)
print(" have a nice one :D [+] ")
sys.exit()
if 'n' == Yn:
CS(2)
print(" [=] alright then running script [=] ")
print(banner)
time.sleep(1)
os.system(' python3 run.py ')
print(" [!] Stopping tor service and breaking connections [!] ")
else:
print(" [!] that doesnt seem to be a command ")
restart_program()
| true | true |
f7f70bba530c2576066b46b6f29fec1d0183d8fb | 2,533 | py | Python | modules/frame.py | manparvesh/chenab | b91d4fe9a3295526d46a8424ec22ed8a4f5e110f | [
"MIT"
] | 1 | 2018-06-16T20:19:07.000Z | 2018-06-16T20:19:07.000Z | modules/frame.py | manparvesh/chenab | b91d4fe9a3295526d46a8424ec22ed8a4f5e110f | [
"MIT"
] | null | null | null | modules/frame.py | manparvesh/chenab | b91d4fe9a3295526d46a8424ec22ed8a4f5e110f | [
"MIT"
] | null | null | null | import collections
Block = collections.namedtuple("Block", "type, handler, stack_height")
class Frame(object):
"""
collection of attributes with no methods
the attributes include the code object created by the compiler;
the local, global, and builtin namespaces; a reference to the previous frame;
a data stack; a block stack; and the last instruction executed
"""
def __init__(self, code_object, global_names, local_names, previous_frame):
self.code_obj = code_object
self.global_names = global_names
self.local_names = local_names
self.prev_frame = previous_frame
self.stack = []
if previous_frame:
self.builtin_names = previous_frame.builtin_names
else:
self.builtin_names = local_names['__builtins__']
if hasattr(self.builtin_names, '__dict__'):
self.builtin_names = self.builtin_names.__dict__
self.last_instruction = 0
self.block_stack = []
"""
Data stack manipulation
"""
def top(self):
"""
top
:return:
"""
return self.stack[-1]
def pop(self):
"""
pop
:return:
"""
return self.stack.pop()
def push(self, *values):
"""
push
:param values:
"""
self.stack.extend(values)
def pop_n(self, n):
"""Pop a number of values from the value stack.
A list of `n` values is returned, the deepest value first.
"""
if n:
ret = self.stack[-n:]
self.stack[-n:] = []
return ret
else:
return []
# Block stack manipulation
def push_block(self, b_type, handler=None):
"""
push block
:param b_type:
:param handler:
"""
stack_height = len(self.stack)
self.block_stack.append(Block(b_type, handler, stack_height))
def pop_block(self):
"""
pop block
:return:
"""
return self.block_stack.pop()
def unwind_block(self, block):
"""Unwind the values on the data stack when a given block is finished."""
if block.type == 'except-handler':
offset = 3
else:
offset = 0
while len(self.stack) > block.stack_height + offset:
self.pop()
if block.type == 'except-handler':
traceback, value, exctype = self.pop_n(3)
return exctype, value, traceback
| 26.113402 | 81 | 0.564153 | import collections
Block = collections.namedtuple("Block", "type, handler, stack_height")
class Frame(object):
def __init__(self, code_object, global_names, local_names, previous_frame):
self.code_obj = code_object
self.global_names = global_names
self.local_names = local_names
self.prev_frame = previous_frame
self.stack = []
if previous_frame:
self.builtin_names = previous_frame.builtin_names
else:
self.builtin_names = local_names['__builtins__']
if hasattr(self.builtin_names, '__dict__'):
self.builtin_names = self.builtin_names.__dict__
self.last_instruction = 0
self.block_stack = []
def top(self):
return self.stack[-1]
def pop(self):
return self.stack.pop()
def push(self, *values):
self.stack.extend(values)
def pop_n(self, n):
if n:
ret = self.stack[-n:]
self.stack[-n:] = []
return ret
else:
return []
def push_block(self, b_type, handler=None):
stack_height = len(self.stack)
self.block_stack.append(Block(b_type, handler, stack_height))
def pop_block(self):
return self.block_stack.pop()
def unwind_block(self, block):
if block.type == 'except-handler':
offset = 3
else:
offset = 0
while len(self.stack) > block.stack_height + offset:
self.pop()
if block.type == 'except-handler':
traceback, value, exctype = self.pop_n(3)
return exctype, value, traceback
| true | true |
f7f70c1eae77eab98bb230631a9efba5e604e4ad | 19,347 | py | Python | ml-agents/mlagents/trainers/ppo/trainer.py | yueqiw/ml-agents | 499120a45c6a0203ff39770ded9a9dc6069ffa46 | [
"Apache-2.0"
] | null | null | null | ml-agents/mlagents/trainers/ppo/trainer.py | yueqiw/ml-agents | 499120a45c6a0203ff39770ded9a9dc6069ffa46 | [
"Apache-2.0"
] | null | null | null | ml-agents/mlagents/trainers/ppo/trainer.py | yueqiw/ml-agents | 499120a45c6a0203ff39770ded9a9dc6069ffa46 | [
"Apache-2.0"
] | null | null | null | # # Unity ML-Agents Toolkit
# ## ML-Agent Learning (PPO)
# Contains an implementation of PPO as described (https://arxiv.org/abs/1707.06347).
import logging
import os
from collections import deque
import numpy as np
import tensorflow as tf
from mlagents.envs import AllBrainInfo, BrainInfo
from mlagents.trainers.buffer import Buffer
from mlagents.trainers.ppo.policy import PPOPolicy
from mlagents.trainers.trainer import UnityTrainerException, Trainer
logger = logging.getLogger("mlagents.envs")
class PPOTrainer(Trainer):
"""The PPOTrainer is an implementation of the PPO algorithm."""
def __init__(self, sess, brain, reward_buff_cap, trainer_parameters, training, seed, run_id):
"""
Responsible for collecting experiences and training PPO model.
:param sess: Tensorflow session.
:param trainer_parameters: The parameters for the trainer (dictionary).
:param training: Whether the trainer is set for training.
"""
super(PPOTrainer, self).__init__(sess, brain.brain_name, trainer_parameters, training, run_id)
self.param_keys = ['batch_size', 'beta', 'buffer_size', 'epsilon', 'gamma', 'hidden_units', 'lambd',
'learning_rate', 'max_steps', 'normalize', 'num_epoch', 'num_layers',
'time_horizon', 'sequence_length', 'summary_freq', 'use_recurrent',
'graph_scope', 'summary_path', 'memory_size', 'use_curiosity', 'curiosity_strength',
'curiosity_enc_size', 'only_vec']
#print(self.param_keys)
#print(trainer_parameters)
for k in self.param_keys:
if k not in trainer_parameters:
raise UnityTrainerException("The hyperparameter {0} could not be found for the PPO trainer of "
"brain {1}.".format(k, brain.brain_name))
self.use_curiosity = bool(trainer_parameters['use_curiosity'])
self.step = 0
self.policy = PPOPolicy(seed, brain, trainer_parameters,
sess, self.is_training)
stats = {'cumulative_reward': [], 'episode_length': [], 'value_estimate': [],
'entropy': [], 'value_loss': [], 'policy_loss': [], 'learning_rate': []}
if self.use_curiosity:
stats['forward_loss'] = []
stats['inverse_loss'] = []
stats['intrinsic_reward'] = []
self.intrinsic_rewards = {}
self.stats = stats
self.training_buffer = Buffer()
self.cumulative_rewards = {}
self._reward_buffer = deque(maxlen=reward_buff_cap)
self.episode_steps = {}
self.summary_path = trainer_parameters['summary_path']
if not os.path.exists(self.summary_path):
os.makedirs(self.summary_path)
self.summary_writer = tf.summary.FileWriter(self.summary_path)
def __str__(self):
return '''Hyperparameters for the PPO Trainer of brain {0}: \n{1}'''.format(
self.brain_name, '\n'.join(['\t{0}:\t{1}'.format(x, self.trainer_parameters[x]) for x in self.param_keys]))
@property
def parameters(self):
"""
Returns the trainer parameters of the trainer.
"""
return self.trainer_parameters
@property
def get_max_steps(self):
"""
Returns the maximum number of steps. Is used to know when the trainer should be stopped.
:return: The maximum number of steps of the trainer
"""
return float(self.trainer_parameters['max_steps'])
@property
def get_step(self):
"""
Returns the number of steps the trainer has performed
:return: the step count of the trainer
"""
return self.step
@property
def reward_buffer(self):
"""
Returns the reward buffer. The reward buffer contains the cumulative
rewards of the most recent episodes completed by agents using this
trainer.
:return: the reward buffer.
"""
return self._reward_buffer
def increment_step_and_update_last_reward(self):
"""
Increment the step count of the trainer and Updates the last reward
"""
if len(self.stats['cumulative_reward']) > 0:
mean_reward = np.mean(self.stats['cumulative_reward'])
self.policy.update_reward(mean_reward)
self.policy.increment_step()
self.step = self.policy.get_current_step()
def take_action(self, all_brain_info: AllBrainInfo):
"""
Decides actions given observations information, and takes them in environment.
:param all_brain_info: A dictionary of brain names and BrainInfo from environment.
:return: a tuple containing action, memories, values and an object
to be passed to add experiences
"""
curr_brain_info = all_brain_info[self.brain_name]
if len(curr_brain_info.agents) == 0:
return [], [], [], None, None
run_out = self.policy.evaluate(curr_brain_info)
self.stats['value_estimate'].append(run_out['value'].mean())
self.stats['entropy'].append(run_out['entropy'].mean())
self.stats['learning_rate'].append(run_out['learning_rate'])
if self.policy.use_recurrent:
return run_out['action'], run_out['memory_out'], None, \
run_out['value'], run_out
else:
return run_out['action'], None, None, run_out['value'], run_out
def construct_curr_info(self, next_info: BrainInfo) -> BrainInfo:
"""
Constructs a BrainInfo which contains the most recent previous experiences for all agents info
which correspond to the agents in a provided next_info.
:BrainInfo next_info: A t+1 BrainInfo.
:return: curr_info: Reconstructed BrainInfo to match agents of next_info.
"""
visual_observations = [[]]
vector_observations = []
text_observations = []
memories = []
rewards = []
local_dones = []
max_reacheds = []
agents = []
prev_vector_actions = []
prev_text_actions = []
for agent_id in next_info.agents:
agent_brain_info = self.training_buffer[agent_id].last_brain_info
if agent_brain_info is None:
agent_brain_info = next_info
agent_index = agent_brain_info.agents.index(agent_id)
for i in range(len(next_info.visual_observations)):
visual_observations[i].append(agent_brain_info.visual_observations[i][agent_index])
vector_observations.append(agent_brain_info.vector_observations[agent_index])
text_observations.append(agent_brain_info.text_observations[agent_index])
if self.policy.use_recurrent:
if len(agent_brain_info.memories > 0):
memories.append(agent_brain_info.memories[agent_index])
else:
memories.append(self.policy.make_empty_memory(1))
rewards.append(agent_brain_info.rewards[agent_index])
local_dones.append(agent_brain_info.local_done[agent_index])
max_reacheds.append(agent_brain_info.max_reached[agent_index])
agents.append(agent_brain_info.agents[agent_index])
prev_vector_actions.append(agent_brain_info.previous_vector_actions[agent_index])
prev_text_actions.append(agent_brain_info.previous_text_actions[agent_index])
if self.policy.use_recurrent:
memories = np.vstack(memories)
curr_info = BrainInfo(visual_observations, vector_observations, text_observations,
memories, rewards, agents, local_dones, prev_vector_actions,
prev_text_actions, max_reacheds)
return curr_info
def add_experiences(self, curr_all_info: AllBrainInfo, next_all_info: AllBrainInfo, take_action_outputs):
"""
Adds experiences to each agent's experience history.
:param curr_all_info: Dictionary of all current brains and corresponding BrainInfo.
:param next_all_info: Dictionary of all current brains and corresponding BrainInfo.
:param take_action_outputs: The outputs of the take action method.
"""
curr_info = curr_all_info[self.brain_name]
next_info = next_all_info[self.brain_name]
for agent_id in curr_info.agents:
self.training_buffer[agent_id].last_brain_info = curr_info
self.training_buffer[agent_id].last_take_action_outputs = take_action_outputs
if curr_info.agents != next_info.agents:
curr_to_use = self.construct_curr_info(next_info)
else:
curr_to_use = curr_info
intrinsic_rewards = self.policy.get_intrinsic_rewards(curr_to_use, next_info)
for agent_id in next_info.agents:
stored_info = self.training_buffer[agent_id].last_brain_info
stored_take_action_outputs = self.training_buffer[agent_id].last_take_action_outputs
if stored_info is not None:
idx = stored_info.agents.index(agent_id)
next_idx = next_info.agents.index(agent_id)
if not stored_info.local_done[idx]:
for i, _ in enumerate(stored_info.visual_observations):
self.training_buffer[agent_id]['visual_obs%d' % i].append(
stored_info.visual_observations[i][idx])
self.training_buffer[agent_id]['next_visual_obs%d' % i].append(
next_info.visual_observations[i][next_idx])
if self.policy.use_vec_obs:
self.training_buffer[agent_id]['vector_obs'].append(stored_info.vector_observations[idx])
self.training_buffer[agent_id]['next_vector_in'].append(
next_info.vector_observations[next_idx])
if self.policy.use_recurrent:
if stored_info.memories.shape[1] == 0:
stored_info.memories = np.zeros((len(stored_info.agents), self.policy.m_size))
self.training_buffer[agent_id]['memory'].append(stored_info.memories[idx])
actions = stored_take_action_outputs['action']
if self.policy.use_continuous_act:
actions_pre = stored_take_action_outputs['pre_action']
self.training_buffer[agent_id]['actions_pre'].append(actions_pre[idx])
else:
self.training_buffer[agent_id]['action_mask'].append(
stored_info.action_masks[idx])
a_dist = stored_take_action_outputs['log_probs']
value = stored_take_action_outputs['value']
self.training_buffer[agent_id]['actions'].append(actions[idx])
self.training_buffer[agent_id]['prev_action'].append(stored_info.previous_vector_actions[idx])
self.training_buffer[agent_id]['masks'].append(1.0)
if self.use_curiosity:
self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx] +
intrinsic_rewards[next_idx])
else:
self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx])
self.training_buffer[agent_id]['action_probs'].append(a_dist[idx])
self.training_buffer[agent_id]['value_estimates'].append(value[idx][0])
if agent_id not in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
self.cumulative_rewards[agent_id] += next_info.rewards[next_idx]
if self.use_curiosity:
if agent_id not in self.intrinsic_rewards:
self.intrinsic_rewards[agent_id] = 0
self.intrinsic_rewards[agent_id] += intrinsic_rewards[next_idx]
if not next_info.local_done[next_idx]:
if agent_id not in self.episode_steps:
self.episode_steps[agent_id] = 0
self.episode_steps[agent_id] += 1
def process_experiences(self, current_info: AllBrainInfo, new_info: AllBrainInfo):
"""
Checks agent histories for processing condition, and processes them as necessary.
Processing involves calculating value and advantage targets for model updating step.
:param current_info: Dictionary of all current brains and corresponding BrainInfo.
:param new_info: Dictionary of all next brains and corresponding BrainInfo.
"""
info = new_info[self.brain_name]
for l in range(len(info.agents)):
agent_actions = self.training_buffer[info.agents[l]]['actions']
if ((info.local_done[l] or len(agent_actions) > self.trainer_parameters['time_horizon'])
and len(agent_actions) > 0):
agent_id = info.agents[l]
if info.local_done[l] and not info.max_reached[l]:
value_next = 0.0
else:
if info.max_reached[l]:
bootstrapping_info = self.training_buffer[agent_id].last_brain_info
idx = bootstrapping_info.agents.index(agent_id)
else:
bootstrapping_info = info
idx = l
value_next = self.policy.get_value_estimate(bootstrapping_info, idx)
self.training_buffer[agent_id]['advantages'].set(
get_gae(
rewards=self.training_buffer[agent_id]['rewards'].get_batch(),
value_estimates=self.training_buffer[agent_id]['value_estimates'].get_batch(),
value_next=value_next,
gamma=self.trainer_parameters['gamma'],
lambd=self.trainer_parameters['lambd']))
self.training_buffer[agent_id]['discounted_returns'].set(
self.training_buffer[agent_id]['advantages'].get_batch()
+ self.training_buffer[agent_id]['value_estimates'].get_batch())
self.training_buffer.append_update_buffer(agent_id, batch_size=None,
training_length=self.policy.sequence_length)
self.training_buffer[agent_id].reset_agent()
if info.local_done[l]:
self.stats['cumulative_reward'].append(
self.cumulative_rewards.get(agent_id, 0))
self.reward_buffer.appendleft(self.cumulative_rewards.get(agent_id, 0))
self.stats['episode_length'].append(
self.episode_steps.get(agent_id, 0))
self.cumulative_rewards[agent_id] = 0
self.episode_steps[agent_id] = 0
if self.use_curiosity:
self.stats['intrinsic_reward'].append(
self.intrinsic_rewards.get(agent_id, 0))
self.intrinsic_rewards[agent_id] = 0
def end_episode(self):
"""
A signal that the Episode has ended. The buffer must be reset.
Get only called when the academy resets.
"""
self.training_buffer.reset_all()
for agent_id in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
for agent_id in self.episode_steps:
self.episode_steps[agent_id] = 0
if self.use_curiosity:
for agent_id in self.intrinsic_rewards:
self.intrinsic_rewards[agent_id] = 0
def is_ready_update(self):
"""
Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to whether or not update_model() can be run
"""
size_of_buffer = len(self.training_buffer.update_buffer['actions'])
return size_of_buffer > max(int(self.trainer_parameters['buffer_size'] / self.policy.sequence_length), 1)
def update_policy(self):
"""
Uses training_buffer to update the policy.
"""
n_sequences = max(int(self.trainer_parameters['batch_size'] / self.policy.sequence_length), 1)
value_total, policy_total, forward_total, inverse_total = [], [], [], []
advantages = self.training_buffer.update_buffer['advantages'].get_batch()
self.training_buffer.update_buffer['advantages'].set(
(advantages - advantages.mean()) / (advantages.std() + 1e-10))
num_epoch = self.trainer_parameters['num_epoch']
for k in range(num_epoch):
self.training_buffer.update_buffer.shuffle()
buffer = self.training_buffer.update_buffer
for l in range(len(self.training_buffer.update_buffer['actions']) // n_sequences):
start = l * n_sequences
end = (l + 1) * n_sequences
run_out = self.policy.update(buffer.make_mini_batch(start, end), n_sequences)
value_total.append(run_out['value_loss'])
policy_total.append(np.abs(run_out['policy_loss']))
if self.use_curiosity:
inverse_total.append(run_out['inverse_loss'])
forward_total.append(run_out['forward_loss'])
self.stats['value_loss'].append(np.mean(value_total))
self.stats['policy_loss'].append(np.mean(policy_total))
if self.use_curiosity:
self.stats['forward_loss'].append(np.mean(forward_total))
self.stats['inverse_loss'].append(np.mean(inverse_total))
self.training_buffer.reset_update_buffer()
def discount_rewards(r, gamma=0.99, value_next=0.0):
"""
Computes discounted sum of future rewards for use in updating value estimate.
:param r: List of rewards.
:param gamma: Discount factor.
:param value_next: T+1 value estimate for returns calculation.
:return: discounted sum of future rewards as list.
"""
discounted_r = np.zeros_like(r)
running_add = value_next
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def get_gae(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):
"""
Computes generalized advantage estimate for use in updating policy.
:param rewards: list of rewards for time-steps t to T.
:param value_next: Value estimate for time-step T+1.
:param value_estimates: list of value estimates for time-steps t to T.
:param gamma: Discount factor.
:param lambd: GAE weighing factor.
:return: list of advantage estimates for time-steps t to T.
"""
value_estimates = np.asarray(value_estimates.tolist() + [value_next])
delta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]
advantage = discount_rewards(r=delta_t, gamma=gamma * lambd)
return advantage
| 49.735219 | 119 | 0.622732 | np
import tensorflow as tf
from mlagents.envs import AllBrainInfo, BrainInfo
from mlagents.trainers.buffer import Buffer
from mlagents.trainers.ppo.policy import PPOPolicy
from mlagents.trainers.trainer import UnityTrainerException, Trainer
logger = logging.getLogger("mlagents.envs")
class PPOTrainer(Trainer):
def __init__(self, sess, brain, reward_buff_cap, trainer_parameters, training, seed, run_id):
super(PPOTrainer, self).__init__(sess, brain.brain_name, trainer_parameters, training, run_id)
self.param_keys = ['batch_size', 'beta', 'buffer_size', 'epsilon', 'gamma', 'hidden_units', 'lambd',
'learning_rate', 'max_steps', 'normalize', 'num_epoch', 'num_layers',
'time_horizon', 'sequence_length', 'summary_freq', 'use_recurrent',
'graph_scope', 'summary_path', 'memory_size', 'use_curiosity', 'curiosity_strength',
'curiosity_enc_size', 'only_vec']
for k in self.param_keys:
if k not in trainer_parameters:
raise UnityTrainerException("The hyperparameter {0} could not be found for the PPO trainer of "
"brain {1}.".format(k, brain.brain_name))
self.use_curiosity = bool(trainer_parameters['use_curiosity'])
self.step = 0
self.policy = PPOPolicy(seed, brain, trainer_parameters,
sess, self.is_training)
stats = {'cumulative_reward': [], 'episode_length': [], 'value_estimate': [],
'entropy': [], 'value_loss': [], 'policy_loss': [], 'learning_rate': []}
if self.use_curiosity:
stats['forward_loss'] = []
stats['inverse_loss'] = []
stats['intrinsic_reward'] = []
self.intrinsic_rewards = {}
self.stats = stats
self.training_buffer = Buffer()
self.cumulative_rewards = {}
self._reward_buffer = deque(maxlen=reward_buff_cap)
self.episode_steps = {}
self.summary_path = trainer_parameters['summary_path']
if not os.path.exists(self.summary_path):
os.makedirs(self.summary_path)
self.summary_writer = tf.summary.FileWriter(self.summary_path)
def __str__(self):
return '''Hyperparameters for the PPO Trainer of brain {0}: \n{1}'''.format(
self.brain_name, '\n'.join(['\t{0}:\t{1}'.format(x, self.trainer_parameters[x]) for x in self.param_keys]))
@property
def parameters(self):
return self.trainer_parameters
@property
def get_max_steps(self):
return float(self.trainer_parameters['max_steps'])
@property
def get_step(self):
return self.step
@property
def reward_buffer(self):
return self._reward_buffer
def increment_step_and_update_last_reward(self):
if len(self.stats['cumulative_reward']) > 0:
mean_reward = np.mean(self.stats['cumulative_reward'])
self.policy.update_reward(mean_reward)
self.policy.increment_step()
self.step = self.policy.get_current_step()
def take_action(self, all_brain_info: AllBrainInfo):
curr_brain_info = all_brain_info[self.brain_name]
if len(curr_brain_info.agents) == 0:
return [], [], [], None, None
run_out = self.policy.evaluate(curr_brain_info)
self.stats['value_estimate'].append(run_out['value'].mean())
self.stats['entropy'].append(run_out['entropy'].mean())
self.stats['learning_rate'].append(run_out['learning_rate'])
if self.policy.use_recurrent:
return run_out['action'], run_out['memory_out'], None, \
run_out['value'], run_out
else:
return run_out['action'], None, None, run_out['value'], run_out
def construct_curr_info(self, next_info: BrainInfo) -> BrainInfo:
visual_observations = [[]]
vector_observations = []
text_observations = []
memories = []
rewards = []
local_dones = []
max_reacheds = []
agents = []
prev_vector_actions = []
prev_text_actions = []
for agent_id in next_info.agents:
agent_brain_info = self.training_buffer[agent_id].last_brain_info
if agent_brain_info is None:
agent_brain_info = next_info
agent_index = agent_brain_info.agents.index(agent_id)
for i in range(len(next_info.visual_observations)):
visual_observations[i].append(agent_brain_info.visual_observations[i][agent_index])
vector_observations.append(agent_brain_info.vector_observations[agent_index])
text_observations.append(agent_brain_info.text_observations[agent_index])
if self.policy.use_recurrent:
if len(agent_brain_info.memories > 0):
memories.append(agent_brain_info.memories[agent_index])
else:
memories.append(self.policy.make_empty_memory(1))
rewards.append(agent_brain_info.rewards[agent_index])
local_dones.append(agent_brain_info.local_done[agent_index])
max_reacheds.append(agent_brain_info.max_reached[agent_index])
agents.append(agent_brain_info.agents[agent_index])
prev_vector_actions.append(agent_brain_info.previous_vector_actions[agent_index])
prev_text_actions.append(agent_brain_info.previous_text_actions[agent_index])
if self.policy.use_recurrent:
memories = np.vstack(memories)
curr_info = BrainInfo(visual_observations, vector_observations, text_observations,
memories, rewards, agents, local_dones, prev_vector_actions,
prev_text_actions, max_reacheds)
return curr_info
def add_experiences(self, curr_all_info: AllBrainInfo, next_all_info: AllBrainInfo, take_action_outputs):
curr_info = curr_all_info[self.brain_name]
next_info = next_all_info[self.brain_name]
for agent_id in curr_info.agents:
self.training_buffer[agent_id].last_brain_info = curr_info
self.training_buffer[agent_id].last_take_action_outputs = take_action_outputs
if curr_info.agents != next_info.agents:
curr_to_use = self.construct_curr_info(next_info)
else:
curr_to_use = curr_info
intrinsic_rewards = self.policy.get_intrinsic_rewards(curr_to_use, next_info)
for agent_id in next_info.agents:
stored_info = self.training_buffer[agent_id].last_brain_info
stored_take_action_outputs = self.training_buffer[agent_id].last_take_action_outputs
if stored_info is not None:
idx = stored_info.agents.index(agent_id)
next_idx = next_info.agents.index(agent_id)
if not stored_info.local_done[idx]:
for i, _ in enumerate(stored_info.visual_observations):
self.training_buffer[agent_id]['visual_obs%d' % i].append(
stored_info.visual_observations[i][idx])
self.training_buffer[agent_id]['next_visual_obs%d' % i].append(
next_info.visual_observations[i][next_idx])
if self.policy.use_vec_obs:
self.training_buffer[agent_id]['vector_obs'].append(stored_info.vector_observations[idx])
self.training_buffer[agent_id]['next_vector_in'].append(
next_info.vector_observations[next_idx])
if self.policy.use_recurrent:
if stored_info.memories.shape[1] == 0:
stored_info.memories = np.zeros((len(stored_info.agents), self.policy.m_size))
self.training_buffer[agent_id]['memory'].append(stored_info.memories[idx])
actions = stored_take_action_outputs['action']
if self.policy.use_continuous_act:
actions_pre = stored_take_action_outputs['pre_action']
self.training_buffer[agent_id]['actions_pre'].append(actions_pre[idx])
else:
self.training_buffer[agent_id]['action_mask'].append(
stored_info.action_masks[idx])
a_dist = stored_take_action_outputs['log_probs']
value = stored_take_action_outputs['value']
self.training_buffer[agent_id]['actions'].append(actions[idx])
self.training_buffer[agent_id]['prev_action'].append(stored_info.previous_vector_actions[idx])
self.training_buffer[agent_id]['masks'].append(1.0)
if self.use_curiosity:
self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx] +
intrinsic_rewards[next_idx])
else:
self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx])
self.training_buffer[agent_id]['action_probs'].append(a_dist[idx])
self.training_buffer[agent_id]['value_estimates'].append(value[idx][0])
if agent_id not in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
self.cumulative_rewards[agent_id] += next_info.rewards[next_idx]
if self.use_curiosity:
if agent_id not in self.intrinsic_rewards:
self.intrinsic_rewards[agent_id] = 0
self.intrinsic_rewards[agent_id] += intrinsic_rewards[next_idx]
if not next_info.local_done[next_idx]:
if agent_id not in self.episode_steps:
self.episode_steps[agent_id] = 0
self.episode_steps[agent_id] += 1
def process_experiences(self, current_info: AllBrainInfo, new_info: AllBrainInfo):
info = new_info[self.brain_name]
for l in range(len(info.agents)):
agent_actions = self.training_buffer[info.agents[l]]['actions']
if ((info.local_done[l] or len(agent_actions) > self.trainer_parameters['time_horizon'])
and len(agent_actions) > 0):
agent_id = info.agents[l]
if info.local_done[l] and not info.max_reached[l]:
value_next = 0.0
else:
if info.max_reached[l]:
bootstrapping_info = self.training_buffer[agent_id].last_brain_info
idx = bootstrapping_info.agents.index(agent_id)
else:
bootstrapping_info = info
idx = l
value_next = self.policy.get_value_estimate(bootstrapping_info, idx)
self.training_buffer[agent_id]['advantages'].set(
get_gae(
rewards=self.training_buffer[agent_id]['rewards'].get_batch(),
value_estimates=self.training_buffer[agent_id]['value_estimates'].get_batch(),
value_next=value_next,
gamma=self.trainer_parameters['gamma'],
lambd=self.trainer_parameters['lambd']))
self.training_buffer[agent_id]['discounted_returns'].set(
self.training_buffer[agent_id]['advantages'].get_batch()
+ self.training_buffer[agent_id]['value_estimates'].get_batch())
self.training_buffer.append_update_buffer(agent_id, batch_size=None,
training_length=self.policy.sequence_length)
self.training_buffer[agent_id].reset_agent()
if info.local_done[l]:
self.stats['cumulative_reward'].append(
self.cumulative_rewards.get(agent_id, 0))
self.reward_buffer.appendleft(self.cumulative_rewards.get(agent_id, 0))
self.stats['episode_length'].append(
self.episode_steps.get(agent_id, 0))
self.cumulative_rewards[agent_id] = 0
self.episode_steps[agent_id] = 0
if self.use_curiosity:
self.stats['intrinsic_reward'].append(
self.intrinsic_rewards.get(agent_id, 0))
self.intrinsic_rewards[agent_id] = 0
def end_episode(self):
self.training_buffer.reset_all()
for agent_id in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
for agent_id in self.episode_steps:
self.episode_steps[agent_id] = 0
if self.use_curiosity:
for agent_id in self.intrinsic_rewards:
self.intrinsic_rewards[agent_id] = 0
def is_ready_update(self):
size_of_buffer = len(self.training_buffer.update_buffer['actions'])
return size_of_buffer > max(int(self.trainer_parameters['buffer_size'] / self.policy.sequence_length), 1)
def update_policy(self):
n_sequences = max(int(self.trainer_parameters['batch_size'] / self.policy.sequence_length), 1)
value_total, policy_total, forward_total, inverse_total = [], [], [], []
advantages = self.training_buffer.update_buffer['advantages'].get_batch()
self.training_buffer.update_buffer['advantages'].set(
(advantages - advantages.mean()) / (advantages.std() + 1e-10))
num_epoch = self.trainer_parameters['num_epoch']
for k in range(num_epoch):
self.training_buffer.update_buffer.shuffle()
buffer = self.training_buffer.update_buffer
for l in range(len(self.training_buffer.update_buffer['actions']) // n_sequences):
start = l * n_sequences
end = (l + 1) * n_sequences
run_out = self.policy.update(buffer.make_mini_batch(start, end), n_sequences)
value_total.append(run_out['value_loss'])
policy_total.append(np.abs(run_out['policy_loss']))
if self.use_curiosity:
inverse_total.append(run_out['inverse_loss'])
forward_total.append(run_out['forward_loss'])
self.stats['value_loss'].append(np.mean(value_total))
self.stats['policy_loss'].append(np.mean(policy_total))
if self.use_curiosity:
self.stats['forward_loss'].append(np.mean(forward_total))
self.stats['inverse_loss'].append(np.mean(inverse_total))
self.training_buffer.reset_update_buffer()
def discount_rewards(r, gamma=0.99, value_next=0.0):
discounted_r = np.zeros_like(r)
running_add = value_next
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def get_gae(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):
value_estimates = np.asarray(value_estimates.tolist() + [value_next])
delta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]
advantage = discount_rewards(r=delta_t, gamma=gamma * lambd)
return advantage
| true | true |
f7f70c5704b0f62eb733995a6c895799f78fe3b1 | 4,148 | py | Python | paddlespeech/s2t/training/optimizer.py | JiehangXie/PaddleSpeech | 60090b49ec27437127ab62358026dd5bb95fccc7 | [
"Apache-2.0"
] | 1,540 | 2017-11-14T13:26:33.000Z | 2021-11-09T14:05:08.000Z | paddlespeech/s2t/training/optimizer.py | JiehangXie/PaddleSpeech | 60090b49ec27437127ab62358026dd5bb95fccc7 | [
"Apache-2.0"
] | 599 | 2017-11-14T13:19:12.000Z | 2021-11-09T01:58:26.000Z | paddlespeech/s2t/training/optimizer.py | JiehangXie/PaddleSpeech | 60090b49ec27437127ab62358026dd5bb95fccc7 | [
"Apache-2.0"
] | 449 | 2017-11-14T12:48:46.000Z | 2021-11-06T09:34:33.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from espnet(https://github.com/espnet/espnet)
from typing import Any
from typing import Dict
from typing import Text
import paddle
from paddle.optimizer import Optimizer
from paddle.regularizer import L2Decay
from paddlespeech.s2t.training.gradclip import ClipGradByGlobalNormWithLog
from paddlespeech.s2t.utils.dynamic_import import dynamic_import
from paddlespeech.s2t.utils.dynamic_import import instance_class
from paddlespeech.s2t.utils.log import Log
__all__ = ["OptimizerFactory"]
logger = Log(__name__).getlog()
OPTIMIZER_DICT = {
"sgd": "paddle.optimizer:SGD",
"momentum": "paddle.optimizer:Momentum",
"adadelta": "paddle.optimizer:Adadelta",
"adam": "paddle.optimizer:Adam",
"adamw": "paddle.optimizer:AdamW",
}
def register_optimizer(cls):
"""Register optimizer."""
alias = cls.__name__.lower()
OPTIMIZER_DICT[cls.__name__.lower()] = cls.__module__ + ":" + cls.__name__
return cls
@register_optimizer
class Noam(paddle.optimizer.Adam):
"""Seem to: espnet/nets/pytorch_backend/transformer/optimizer.py """
def __init__(self,
learning_rate=0,
beta1=0.9,
beta2=0.98,
epsilon=1e-9,
parameters=None,
weight_decay=None,
grad_clip=None,
lazy_mode=False,
multi_precision=False,
name=None):
super().__init__(
learning_rate=learning_rate,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
parameters=parameters,
weight_decay=weight_decay,
grad_clip=grad_clip,
lazy_mode=lazy_mode,
multi_precision=multi_precision,
name=name)
def __repr__(self):
echo = f"<{self.__class__.__module__}.{self.__class__.__name__} object at {hex(id(self))}> "
echo += f"learning_rate: {self._learning_rate}, "
echo += f"(beta1: {self._beta1} beta2: {self._beta2}), "
echo += f"epsilon: {self._epsilon}"
def dynamic_import_optimizer(module):
"""Import Optimizer class dynamically.
Args:
module (str): module_name:class_name or alias in `OPTIMIZER_DICT`
Returns:
type: Optimizer class
"""
module_class = dynamic_import(module, OPTIMIZER_DICT)
assert issubclass(module_class,
Optimizer), f"{module} does not implement Optimizer"
return module_class
class OptimizerFactory():
@classmethod
def from_args(cls, name: str, args: Dict[Text, Any]):
assert "parameters" in args, "parameters not in args."
assert "learning_rate" in args, "learning_rate not in args."
grad_clip = ClipGradByGlobalNormWithLog(
args['grad_clip']) if "grad_clip" in args else None
weight_decay = L2Decay(
args['weight_decay']) if "weight_decay" in args else None
if weight_decay:
logger.info(f'<WeightDecay - {weight_decay}>')
if grad_clip:
logger.info(f'<GradClip - {grad_clip}>')
module_class = dynamic_import_optimizer(name.lower())
args.update({"grad_clip": grad_clip, "weight_decay": weight_decay})
opt = instance_class(module_class, args)
if "__repr__" in vars(opt):
logger.info(f"{opt}")
else:
logger.info(
f"<Optimizer {module_class.__module__}.{module_class.__name__}> LR: {args['learning_rate']}"
)
return opt
| 33.723577 | 108 | 0.652604 |
from typing import Any
from typing import Dict
from typing import Text
import paddle
from paddle.optimizer import Optimizer
from paddle.regularizer import L2Decay
from paddlespeech.s2t.training.gradclip import ClipGradByGlobalNormWithLog
from paddlespeech.s2t.utils.dynamic_import import dynamic_import
from paddlespeech.s2t.utils.dynamic_import import instance_class
from paddlespeech.s2t.utils.log import Log
__all__ = ["OptimizerFactory"]
logger = Log(__name__).getlog()
OPTIMIZER_DICT = {
"sgd": "paddle.optimizer:SGD",
"momentum": "paddle.optimizer:Momentum",
"adadelta": "paddle.optimizer:Adadelta",
"adam": "paddle.optimizer:Adam",
"adamw": "paddle.optimizer:AdamW",
}
def register_optimizer(cls):
alias = cls.__name__.lower()
OPTIMIZER_DICT[cls.__name__.lower()] = cls.__module__ + ":" + cls.__name__
return cls
@register_optimizer
class Noam(paddle.optimizer.Adam):
def __init__(self,
learning_rate=0,
beta1=0.9,
beta2=0.98,
epsilon=1e-9,
parameters=None,
weight_decay=None,
grad_clip=None,
lazy_mode=False,
multi_precision=False,
name=None):
super().__init__(
learning_rate=learning_rate,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
parameters=parameters,
weight_decay=weight_decay,
grad_clip=grad_clip,
lazy_mode=lazy_mode,
multi_precision=multi_precision,
name=name)
def __repr__(self):
echo = f"<{self.__class__.__module__}.{self.__class__.__name__} object at {hex(id(self))}> "
echo += f"learning_rate: {self._learning_rate}, "
echo += f"(beta1: {self._beta1} beta2: {self._beta2}), "
echo += f"epsilon: {self._epsilon}"
def dynamic_import_optimizer(module):
module_class = dynamic_import(module, OPTIMIZER_DICT)
assert issubclass(module_class,
Optimizer), f"{module} does not implement Optimizer"
return module_class
class OptimizerFactory():
@classmethod
def from_args(cls, name: str, args: Dict[Text, Any]):
assert "parameters" in args, "parameters not in args."
assert "learning_rate" in args, "learning_rate not in args."
grad_clip = ClipGradByGlobalNormWithLog(
args['grad_clip']) if "grad_clip" in args else None
weight_decay = L2Decay(
args['weight_decay']) if "weight_decay" in args else None
if weight_decay:
logger.info(f'<WeightDecay - {weight_decay}>')
if grad_clip:
logger.info(f'<GradClip - {grad_clip}>')
module_class = dynamic_import_optimizer(name.lower())
args.update({"grad_clip": grad_clip, "weight_decay": weight_decay})
opt = instance_class(module_class, args)
if "__repr__" in vars(opt):
logger.info(f"{opt}")
else:
logger.info(
f"<Optimizer {module_class.__module__}.{module_class.__name__}> LR: {args['learning_rate']}"
)
return opt
| true | true |
f7f70f3e857c7fdd7abe636dc4ad200a8c0402c9 | 2,054 | py | Python | metriche.py | ferr26/toolMusicPlagiarism | cc6985d9847ca5589b5a0792845b7e85ce80e634 | [
"MIT"
] | null | null | null | metriche.py | ferr26/toolMusicPlagiarism | cc6985d9847ca5589b5a0792845b7e85ce80e634 | [
"MIT"
] | null | null | null | metriche.py | ferr26/toolMusicPlagiarism | cc6985d9847ca5589b5a0792845b7e85ce80e634 | [
"MIT"
] | null | null | null | import textdistance as td
import math
import numpy as np
global y_predNorm
global y_trueNorm
#Token based
def jaccard(y_predNorm,y_trueNorm):
return td.jaccard.similarity(y_predNorm, y_trueNorm)
def coseno(y_predNorm,y_trueNorm):
return td.cosine.similarity(y_predNorm, y_trueNorm)
#da fare
def cityblock_distance(A, B):
result = np.sum([abs(a - b) for (a, b) in zip(A, B)])
return result
def dice_coef(y_predNorm,y_trueNorm):
return td.sorensen_dice.similarity(y_predNorm, y_trueNorm)
def overlap_coe(y_predNorm,y_trueNorm):
return td.overlap.similarity(y_trueNorm, y_predNorm)
def tanimoto(y_predNorm,y_trueNorm):
return td.tanimoto.similarity(y_trueNorm, y_predNorm)
#def ngrams(X,Y):
# return NGram.compare(X,Y)
def lcs(X, Y):
# find the length of the strings
m = len(X)
n = len(Y)
# declaring the array for storing the dp values
L = [[None]*(n + 1) for i in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0 :
L[i][j] = 0
elif X[i-1] == Y[j-1]:
L[i][j] = L[i-1][j-1]+1
else:
L[i][j] = max(L[i-1][j], L[i][j-1])
# L[m][n] contains the length of LCS of X[0..n-1] & Y[0..m-1]
return L[m][n]
# end of function lcs
#edit based
def hamming(y_predNorm,y_trueNorm):
return td.hamming.similarity(y_trueNorm, y_predNorm)
def Lev(y_predNorm,y_trueNorm):
return td.levenshtein.similarity(y_trueNorm, y_predNorm)
def jaro_winkler(y_predNorm,y_trueNorm):
return td.jaro_winkler.similarity(y_trueNorm, y_predNorm)
def jaro(y_predNorm,y_trueNorm):
return td.jaro.similarity(y_trueNorm, y_predNorm)
def damerau_levenshtein(y_predNorm,y_trueNorm):
return td.damerau_levenshtein.similarity(y_trueNorm, y_predNorm)
def needleman_wunsch(y_predNorm,y_trueNorm):
return td.needleman_wunsch.similarity(y_trueNorm, y_predNorm)
def smith_waterman(y_predNorm,y_trueNorm):
return td.smith_waterman.similarity(y_trueNorm, y_predNorm)
| 24.746988 | 68 | 0.684518 | import textdistance as td
import math
import numpy as np
global y_predNorm
global y_trueNorm
def jaccard(y_predNorm,y_trueNorm):
return td.jaccard.similarity(y_predNorm, y_trueNorm)
def coseno(y_predNorm,y_trueNorm):
return td.cosine.similarity(y_predNorm, y_trueNorm)
def cityblock_distance(A, B):
result = np.sum([abs(a - b) for (a, b) in zip(A, B)])
return result
def dice_coef(y_predNorm,y_trueNorm):
return td.sorensen_dice.similarity(y_predNorm, y_trueNorm)
def overlap_coe(y_predNorm,y_trueNorm):
return td.overlap.similarity(y_trueNorm, y_predNorm)
def tanimoto(y_predNorm,y_trueNorm):
return td.tanimoto.similarity(y_trueNorm, y_predNorm)
def lcs(X, Y):
m = len(X)
n = len(Y)
L = [[None]*(n + 1) for i in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0 :
L[i][j] = 0
elif X[i-1] == Y[j-1]:
L[i][j] = L[i-1][j-1]+1
else:
L[i][j] = max(L[i-1][j], L[i][j-1])
return L[m][n]
def hamming(y_predNorm,y_trueNorm):
return td.hamming.similarity(y_trueNorm, y_predNorm)
def Lev(y_predNorm,y_trueNorm):
return td.levenshtein.similarity(y_trueNorm, y_predNorm)
def jaro_winkler(y_predNorm,y_trueNorm):
return td.jaro_winkler.similarity(y_trueNorm, y_predNorm)
def jaro(y_predNorm,y_trueNorm):
return td.jaro.similarity(y_trueNorm, y_predNorm)
def damerau_levenshtein(y_predNorm,y_trueNorm):
return td.damerau_levenshtein.similarity(y_trueNorm, y_predNorm)
def needleman_wunsch(y_predNorm,y_trueNorm):
return td.needleman_wunsch.similarity(y_trueNorm, y_predNorm)
def smith_waterman(y_predNorm,y_trueNorm):
return td.smith_waterman.similarity(y_trueNorm, y_predNorm)
| true | true |
f7f710dfe8ec9eba4b1abe779369d7eae6491957 | 8,770 | py | Python | ddsp/training/ddsp_run.py | Tiamat-Tech/ddsp | cd57829b9caa8643d37994caf6e1ee0a7fc11ad8 | [
"Apache-2.0"
] | 2,148 | 2020-01-14T20:08:54.000Z | 2022-03-29T22:17:08.000Z | ddsp/training/ddsp_run.py | Tiamat-Tech/ddsp | cd57829b9caa8643d37994caf6e1ee0a7fc11ad8 | [
"Apache-2.0"
] | 197 | 2020-01-15T15:22:29.000Z | 2022-03-24T22:50:00.000Z | ddsp/training/ddsp_run.py | Tiamat-Tech/ddsp | cd57829b9caa8643d37994caf6e1ee0a7fc11ad8 | [
"Apache-2.0"
] | 261 | 2020-01-15T08:16:03.000Z | 2022-03-28T08:42:07.000Z | # Copyright 2021 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Train, evaluate, or sample (from) a ddsp model.
Usage:
================================================================================
For training, you need to specify --gin_file for both the model and the dataset.
You can optionally specify additional params with --gin_param.
The pip install installs a `ddsp_run` script that can be called directly.
================================================================================
ddsp_run \
--mode=train \
--alsologtostderr \
--save_dir=/tmp/$USER-ddsp-0 \
--gin_file=models/ae.gin \
--gin_file=datasets/nsynth.gin \
--gin_param=batch_size=16
================================================================================
For evaluation and sampling, only the dataset file is required.
================================================================================
ddsp_run \
--mode=eval \
--alsologtostderr \
--save_dir=/tmp/$USER-ddsp-0 \
--gin_file=datasets/nsynth.gin
ddsp_run \
--mode=sample \
--alsologtostderr \
--save_dir=/tmp/$USER-ddsp-0 \
--gin_file=datasets/nsynth.gin
================================================================================
The directory `gin/papers/` stores configs that give the specific models and
datasets used for a paper's experiments, so only require one gin file to train.
================================================================================
ddsp_run \
--mode=train \
--alsologtostderr \
--save_dir=/tmp/$USER-ddsp-0 \
--gin_file=papers/iclr2020/nsynth_ae.gin
"""
import os
import time
from absl import app
from absl import flags
from absl import logging
from ddsp.training import cloud
from ddsp.training import eval_util
from ddsp.training import models
from ddsp.training import train_util
from ddsp.training import trainers
import gin
import pkg_resources
import tensorflow.compat.v2 as tf
gfile = tf.io.gfile
FLAGS = flags.FLAGS
# Program flags.
flags.DEFINE_enum('mode', 'train', ['train', 'eval', 'sample'],
'Whether to train, evaluate, or sample from the model.')
flags.DEFINE_string('save_dir', '/tmp/ddsp',
'Path where checkpoints and summary events will be saved '
'during training and evaluation.')
flags.DEFINE_string('restore_dir', '',
'Path from which checkpoints will be restored before '
'training. Can be different than the save_dir.')
flags.DEFINE_string('tpu', '', 'Address of the TPU. No TPU if left blank.')
flags.DEFINE_string('cluster_config', '',
'Worker-specific JSON string for multiworker setup. '
'For more information see train_util.get_strategy().')
flags.DEFINE_boolean('allow_memory_growth', False,
'Whether to grow the GPU memory usage as is needed by the '
'process. Prevents crashes on GPUs with smaller memory.')
flags.DEFINE_boolean('hypertune', False,
'Enable metric reporting for hyperparameter tuning, such '
'as on Google Cloud AI-Platform.')
flags.DEFINE_float('early_stop_loss_value', None,
'Stops training early when the `total_loss` reaches below '
'this value during training.')
# Gin config flags.
flags.DEFINE_multi_string('gin_search_path', [],
'Additional gin file search paths.')
flags.DEFINE_multi_string('gin_file', [],
'List of paths to the config files. If file '
'in gstorage bucket specify whole gstorage path: '
'gs://bucket-name/dir/in/bucket/file.gin.')
flags.DEFINE_multi_string('gin_param', [],
'Newline separated list of Gin parameter bindings.')
# Evaluation/sampling specific flags.
flags.DEFINE_boolean('run_once', False, 'Whether evaluation will run once.')
flags.DEFINE_integer('initial_delay_secs', None,
'Time to wait before evaluation starts')
GIN_PATH = pkg_resources.resource_filename(__name__, 'gin')
def delay_start():
"""Optionally delay the start of the run."""
delay_time = FLAGS.initial_delay_secs
if delay_time:
logging.info('Waiting for %i second(s)', delay_time)
time.sleep(delay_time)
def parse_gin(restore_dir):
"""Parse gin config from --gin_file, --gin_param, and the model directory."""
# Enable parsing gin files on Google Cloud.
gin.config.register_file_reader(tf.io.gfile.GFile, tf.io.gfile.exists)
# Add user folders to the gin search path.
for gin_search_path in [GIN_PATH] + FLAGS.gin_search_path:
gin.add_config_file_search_path(gin_search_path)
# Parse gin configs, later calls override earlier ones.
with gin.unlock_config():
# Optimization defaults.
use_tpu = bool(FLAGS.tpu)
opt_default = 'base.gin' if not use_tpu else 'base_tpu.gin'
gin.parse_config_file(os.path.join('optimization', opt_default))
eval_default = 'eval/basic.gin'
gin.parse_config_file(eval_default)
# Load operative_config if it exists (model has already trained).
try:
operative_config = train_util.get_latest_operative_config(restore_dir)
logging.info('Using operative config: %s', operative_config)
operative_config = cloud.make_file_paths_local(operative_config, GIN_PATH)
gin.parse_config_file(operative_config, skip_unknown=True)
except FileNotFoundError:
logging.info('Operative config not found in %s', restore_dir)
# User gin config and user hyperparameters from flags.
gin_file = cloud.make_file_paths_local(FLAGS.gin_file, GIN_PATH)
gin.parse_config_files_and_bindings(
gin_file, FLAGS.gin_param, skip_unknown=True)
def allow_memory_growth():
"""Sets the GPUs to grow the memory usage as is needed by the process."""
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs.
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized.
print(e)
def main(unused_argv):
"""Parse gin config and run ddsp training, evaluation, or sampling."""
restore_dir = os.path.expanduser(FLAGS.restore_dir)
save_dir = os.path.expanduser(FLAGS.save_dir)
# If no separate restore directory is given, use the save directory.
restore_dir = save_dir if not restore_dir else restore_dir
logging.info('Restore Dir: %s', restore_dir)
logging.info('Save Dir: %s', save_dir)
gfile.makedirs(restore_dir) # Only makes dirs if they don't exist.
parse_gin(restore_dir)
logging.info('Operative Gin Config:\n%s', gin.config.config_str())
train_util.gin_register_keras_layers()
if FLAGS.allow_memory_growth:
allow_memory_growth()
# Training.
if FLAGS.mode == 'train':
strategy = train_util.get_strategy(tpu=FLAGS.tpu,
cluster_config=FLAGS.cluster_config)
with strategy.scope():
model = models.get_model()
trainer = trainers.get_trainer_class()(model, strategy)
train_util.train(data_provider=gin.REQUIRED,
trainer=trainer,
save_dir=save_dir,
restore_dir=restore_dir,
early_stop_loss_value=FLAGS.early_stop_loss_value,
report_loss_to_hypertune=FLAGS.hypertune)
# Evaluation.
elif FLAGS.mode == 'eval':
model = models.get_model()
delay_start()
eval_util.evaluate(data_provider=gin.REQUIRED,
model=model,
save_dir=save_dir,
restore_dir=restore_dir,
run_once=FLAGS.run_once)
# Sampling.
elif FLAGS.mode == 'sample':
model = models.get_model()
delay_start()
eval_util.sample(data_provider=gin.REQUIRED,
model=model,
save_dir=save_dir,
restore_dir=restore_dir,
run_once=FLAGS.run_once)
def console_entry_point():
"""From pip installed script."""
app.run(main)
if __name__ == '__main__':
console_entry_point()
| 37.478632 | 80 | 0.648233 |
import os
import time
from absl import app
from absl import flags
from absl import logging
from ddsp.training import cloud
from ddsp.training import eval_util
from ddsp.training import models
from ddsp.training import train_util
from ddsp.training import trainers
import gin
import pkg_resources
import tensorflow.compat.v2 as tf
gfile = tf.io.gfile
FLAGS = flags.FLAGS
flags.DEFINE_enum('mode', 'train', ['train', 'eval', 'sample'],
'Whether to train, evaluate, or sample from the model.')
flags.DEFINE_string('save_dir', '/tmp/ddsp',
'Path where checkpoints and summary events will be saved '
'during training and evaluation.')
flags.DEFINE_string('restore_dir', '',
'Path from which checkpoints will be restored before '
'training. Can be different than the save_dir.')
flags.DEFINE_string('tpu', '', 'Address of the TPU. No TPU if left blank.')
flags.DEFINE_string('cluster_config', '',
'Worker-specific JSON string for multiworker setup. '
'For more information see train_util.get_strategy().')
flags.DEFINE_boolean('allow_memory_growth', False,
'Whether to grow the GPU memory usage as is needed by the '
'process. Prevents crashes on GPUs with smaller memory.')
flags.DEFINE_boolean('hypertune', False,
'Enable metric reporting for hyperparameter tuning, such '
'as on Google Cloud AI-Platform.')
flags.DEFINE_float('early_stop_loss_value', None,
'Stops training early when the `total_loss` reaches below '
'this value during training.')
flags.DEFINE_multi_string('gin_search_path', [],
'Additional gin file search paths.')
flags.DEFINE_multi_string('gin_file', [],
'List of paths to the config files. If file '
'in gstorage bucket specify whole gstorage path: '
'gs://bucket-name/dir/in/bucket/file.gin.')
flags.DEFINE_multi_string('gin_param', [],
'Newline separated list of Gin parameter bindings.')
flags.DEFINE_boolean('run_once', False, 'Whether evaluation will run once.')
flags.DEFINE_integer('initial_delay_secs', None,
'Time to wait before evaluation starts')
GIN_PATH = pkg_resources.resource_filename(__name__, 'gin')
def delay_start():
delay_time = FLAGS.initial_delay_secs
if delay_time:
logging.info('Waiting for %i second(s)', delay_time)
time.sleep(delay_time)
def parse_gin(restore_dir):
gin.config.register_file_reader(tf.io.gfile.GFile, tf.io.gfile.exists)
for gin_search_path in [GIN_PATH] + FLAGS.gin_search_path:
gin.add_config_file_search_path(gin_search_path)
with gin.unlock_config():
use_tpu = bool(FLAGS.tpu)
opt_default = 'base.gin' if not use_tpu else 'base_tpu.gin'
gin.parse_config_file(os.path.join('optimization', opt_default))
eval_default = 'eval/basic.gin'
gin.parse_config_file(eval_default)
try:
operative_config = train_util.get_latest_operative_config(restore_dir)
logging.info('Using operative config: %s', operative_config)
operative_config = cloud.make_file_paths_local(operative_config, GIN_PATH)
gin.parse_config_file(operative_config, skip_unknown=True)
except FileNotFoundError:
logging.info('Operative config not found in %s', restore_dir)
gin_file = cloud.make_file_paths_local(FLAGS.gin_file, GIN_PATH)
gin.parse_config_files_and_bindings(
gin_file, FLAGS.gin_param, skip_unknown=True)
def allow_memory_growth():
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
def main(unused_argv):
restore_dir = os.path.expanduser(FLAGS.restore_dir)
save_dir = os.path.expanduser(FLAGS.save_dir)
restore_dir = save_dir if not restore_dir else restore_dir
logging.info('Restore Dir: %s', restore_dir)
logging.info('Save Dir: %s', save_dir)
gfile.makedirs(restore_dir)
parse_gin(restore_dir)
logging.info('Operative Gin Config:\n%s', gin.config.config_str())
train_util.gin_register_keras_layers()
if FLAGS.allow_memory_growth:
allow_memory_growth()
# Training.
if FLAGS.mode == 'train':
strategy = train_util.get_strategy(tpu=FLAGS.tpu,
cluster_config=FLAGS.cluster_config)
with strategy.scope():
model = models.get_model()
trainer = trainers.get_trainer_class()(model, strategy)
train_util.train(data_provider=gin.REQUIRED,
trainer=trainer,
save_dir=save_dir,
restore_dir=restore_dir,
early_stop_loss_value=FLAGS.early_stop_loss_value,
report_loss_to_hypertune=FLAGS.hypertune)
# Evaluation.
elif FLAGS.mode == 'eval':
model = models.get_model()
delay_start()
eval_util.evaluate(data_provider=gin.REQUIRED,
model=model,
save_dir=save_dir,
restore_dir=restore_dir,
run_once=FLAGS.run_once)
# Sampling.
elif FLAGS.mode == 'sample':
model = models.get_model()
delay_start()
eval_util.sample(data_provider=gin.REQUIRED,
model=model,
save_dir=save_dir,
restore_dir=restore_dir,
run_once=FLAGS.run_once)
def console_entry_point():
app.run(main)
if __name__ == '__main__':
console_entry_point()
| true | true |
f7f711562faf6e3d8f4d9e1dd446c228d4ab9b50 | 9,468 | py | Python | test/functional/tool_wallet.py | cruro/cruro | 80aa93365db5e6653bb8235fb61914ee4aa087e8 | [
"MIT"
] | null | null | null | test/functional/tool_wallet.py | cruro/cruro | 80aa93365db5e6653bb8235fb61914ee4aa087e8 | [
"MIT"
] | null | null | null | test/functional/tool_wallet.py | cruro/cruro | 80aa93365db5e6653bb8235fb61914ee4aa087e8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Cruro Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoin-wallet."""
import hashlib
import os
import stat
import subprocess
import textwrap
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
BUFFER_SIZE = 16 * 1024
class ToolWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def bitcoin_wallet_process(self, *args):
binary = self.config["environment"]["BUILDDIR"] + '/src/bitcoin-wallet' + self.config["environment"]["EXEEXT"]
args = ['-datadir={}'.format(self.nodes[0].datadir), '-regtest'] + list(args)
return subprocess.Popen([binary] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
def assert_raises_tool_error(self, error, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(p.poll(), 1)
assert_equal(stdout, '')
assert_equal(stderr.strip(), error)
def assert_tool_output(self, output, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(stderr, '')
assert_equal(stdout, output)
assert_equal(p.poll(), 0)
def wallet_shasum(self):
h = hashlib.sha1()
mv = memoryview(bytearray(BUFFER_SIZE))
with open(self.wallet_path, 'rb', buffering=0) as f:
for n in iter(lambda : f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
def wallet_timestamp(self):
return os.path.getmtime(self.wallet_path)
def wallet_permissions(self):
return oct(os.lstat(self.wallet_path).st_mode)[-3:]
def log_wallet_timestamp_comparison(self, old, new):
result = 'unchanged' if new == old else 'increased!'
self.log.debug('Wallet file timestamp {}'.format(result))
def test_invalid_tool_commands_and_args(self):
self.log.info('Testing that various invalid commands raise with specific error messages')
self.assert_raises_tool_error('Invalid command: foo', 'foo')
# `bitcoin-wallet help` raises an error. Use `bitcoin-wallet -help`.
self.assert_raises_tool_error('Invalid command: help', 'help')
self.assert_raises_tool_error('Error: two methods provided (info and create). Only one method should be provided.', 'info', 'create')
self.assert_raises_tool_error('Error parsing command line arguments: Invalid parameter -foo', '-foo')
self.assert_raises_tool_error('Error loading wallet.dat. Is wallet being used by other process?', '-wallet=wallet.dat', 'info')
self.assert_raises_tool_error('Error: no wallet file at nonexistent.dat', '-wallet=nonexistent.dat', 'info')
def test_tool_wallet_info(self):
# Stop the node to close the wallet to call the info command.
self.stop_node(0)
self.log.info('Calling wallet tool info, testing output')
#
# TODO: Wallet tool info should work with wallet file permissions set to
# read-only without raising:
# "Error loading wallet.dat. Is wallet being used by another process?"
# The following lines should be uncommented and the tests still succeed:
#
# self.log.debug('Setting wallet file permissions to 400 (read-only)')
# os.chmod(self.wallet_path, stat.S_IRUSR)
# assert(self.wallet_permissions() in ['400', '666']) # Sanity check. 666 because Appveyor.
# shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling info: {}'.format(timestamp_before))
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 0
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling info: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
self.log.debug('Setting wallet file permissions back to 600 (read/write)')
os.chmod(self.wallet_path, stat.S_IRUSR | stat.S_IWUSR)
assert(self.wallet_permissions() in ['600', '666']) # Sanity check. 666 because Appveyor.
#
# TODO: Wallet tool info should not write to the wallet file.
# The following lines should be uncommented and the tests still succeed:
#
# assert_equal(timestamp_before, timestamp_after)
# shasum_after = self.wallet_shasum()
# assert_equal(shasum_before, shasum_after)
# self.log.debug('Wallet file shasum unchanged\n')
def test_tool_wallet_info_after_transaction(self):
"""
Mutate the wallet with a transaction to verify that the info command
output changes accordingly.
"""
self.start_node(0)
self.log.info('Generating transaction to mutate wallet')
self.nodes[0].generate(1)
self.stop_node(0)
self.log.info('Calling wallet tool info after generating a transaction, testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling info: {}'.format(timestamp_before))
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 1
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling info: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
#
# TODO: Wallet tool info should not write to the wallet file.
# This assertion should be uncommented and succeed:
# assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_before, shasum_after)
self.log.debug('Wallet file shasum unchanged\n')
def test_tool_wallet_create_on_existing_wallet(self):
self.log.info('Calling wallet tool create on an existing wallet, testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling create: {}'.format(timestamp_before))
out = textwrap.dedent('''\
Topping up keypool...
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2000
Transactions: 0
Address Book: 0
''')
self.assert_tool_output(out, '-wallet=foo', 'create')
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling create: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_before, shasum_after)
self.log.debug('Wallet file shasum unchanged\n')
def test_getwalletinfo_on_different_wallet(self):
self.log.info('Starting node with arg -wallet=foo')
self.start_node(0, ['-wallet=foo'])
self.log.info('Calling getwalletinfo on a different wallet ("foo"), testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling getwalletinfo: {}'.format(timestamp_before))
out = self.nodes[0].getwalletinfo()
self.stop_node(0)
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling getwalletinfo: {}'.format(timestamp_after))
assert_equal(0, out['txcount'])
assert_equal(1000, out['keypoolsize'])
assert_equal(1000, out['keypoolsize_hd_internal'])
assert_equal(True, 'hdseedid' in out)
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_after, shasum_before)
self.log.debug('Wallet file shasum unchanged\n')
def run_test(self):
self.wallet_path = os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat')
self.test_invalid_tool_commands_and_args()
# Warning: The following tests are order-dependent.
self.test_tool_wallet_info()
self.test_tool_wallet_info_after_transaction()
self.test_tool_wallet_create_on_existing_wallet()
self.test_getwalletinfo_on_different_wallet()
if __name__ == '__main__':
ToolWalletTest().main()
| 45.085714 | 144 | 0.668145 |
import hashlib
import os
import stat
import subprocess
import textwrap
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
BUFFER_SIZE = 16 * 1024
class ToolWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def bitcoin_wallet_process(self, *args):
binary = self.config["environment"]["BUILDDIR"] + '/src/bitcoin-wallet' + self.config["environment"]["EXEEXT"]
args = ['-datadir={}'.format(self.nodes[0].datadir), '-regtest'] + list(args)
return subprocess.Popen([binary] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
def assert_raises_tool_error(self, error, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(p.poll(), 1)
assert_equal(stdout, '')
assert_equal(stderr.strip(), error)
def assert_tool_output(self, output, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(stderr, '')
assert_equal(stdout, output)
assert_equal(p.poll(), 0)
def wallet_shasum(self):
h = hashlib.sha1()
mv = memoryview(bytearray(BUFFER_SIZE))
with open(self.wallet_path, 'rb', buffering=0) as f:
for n in iter(lambda : f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
def wallet_timestamp(self):
return os.path.getmtime(self.wallet_path)
def wallet_permissions(self):
return oct(os.lstat(self.wallet_path).st_mode)[-3:]
def log_wallet_timestamp_comparison(self, old, new):
result = 'unchanged' if new == old else 'increased!'
self.log.debug('Wallet file timestamp {}'.format(result))
def test_invalid_tool_commands_and_args(self):
self.log.info('Testing that various invalid commands raise with specific error messages')
self.assert_raises_tool_error('Invalid command: foo', 'foo')
self.assert_raises_tool_error('Invalid command: help', 'help')
self.assert_raises_tool_error('Error: two methods provided (info and create). Only one method should be provided.', 'info', 'create')
self.assert_raises_tool_error('Error parsing command line arguments: Invalid parameter -foo', '-foo')
self.assert_raises_tool_error('Error loading wallet.dat. Is wallet being used by other process?', '-wallet=wallet.dat', 'info')
self.assert_raises_tool_error('Error: no wallet file at nonexistent.dat', '-wallet=nonexistent.dat', 'info')
def test_tool_wallet_info(self):
self.stop_node(0)
self.log.info('Calling wallet tool info, testing output')
self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling info: {}'.format(timestamp_before))
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 0
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling info: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
self.log.debug('Setting wallet file permissions back to 600 (read/write)')
os.chmod(self.wallet_path, stat.S_IRUSR | stat.S_IWUSR)
assert(self.wallet_permissions() in ['600', '666'])
def test_tool_wallet_info_after_transaction(self):
self.start_node(0)
self.log.info('Generating transaction to mutate wallet')
self.nodes[0].generate(1)
self.stop_node(0)
self.log.info('Calling wallet tool info after generating a transaction, testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling info: {}'.format(timestamp_before))
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 1
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling info: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
assert_equal(shasum_before, shasum_after)
self.log.debug('Wallet file shasum unchanged\n')
def test_tool_wallet_create_on_existing_wallet(self):
self.log.info('Calling wallet tool create on an existing wallet, testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling create: {}'.format(timestamp_before))
out = textwrap.dedent('''\
Topping up keypool...
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2000
Transactions: 0
Address Book: 0
''')
self.assert_tool_output(out, '-wallet=foo', 'create')
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling create: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_before, shasum_after)
self.log.debug('Wallet file shasum unchanged\n')
def test_getwalletinfo_on_different_wallet(self):
self.log.info('Starting node with arg -wallet=foo')
self.start_node(0, ['-wallet=foo'])
self.log.info('Calling getwalletinfo on a different wallet ("foo"), testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling getwalletinfo: {}'.format(timestamp_before))
out = self.nodes[0].getwalletinfo()
self.stop_node(0)
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling getwalletinfo: {}'.format(timestamp_after))
assert_equal(0, out['txcount'])
assert_equal(1000, out['keypoolsize'])
assert_equal(1000, out['keypoolsize_hd_internal'])
assert_equal(True, 'hdseedid' in out)
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_after, shasum_before)
self.log.debug('Wallet file shasum unchanged\n')
def run_test(self):
self.wallet_path = os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat')
self.test_invalid_tool_commands_and_args()
self.test_tool_wallet_info()
self.test_tool_wallet_info_after_transaction()
self.test_tool_wallet_create_on_existing_wallet()
self.test_getwalletinfo_on_different_wallet()
if __name__ == '__main__':
ToolWalletTest().main()
| true | true |
f7f711b68471ce5e5da3e55ce3108bab95f2f8f1 | 14,774 | py | Python | supcon/losses_test.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | supcon/losses_test.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | supcon/losses_test.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for supcon.losses."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
from supcon import enums
from supcon import losses
class LossesTest(tf.test.TestCase, parameterized.TestCase):
def testKerasImplementation(self):
features = np.random.uniform(0., 1., (12, 2, 20))
labels = np.eye(12, 15, dtype=np.int32)
loss = losses.ContrastiveLoss()(labels, features)
self.assertEqual(loss.shape, ())
self.assertFalse(np.isnan(loss.numpy()))
def testKerasLossVsNonKerasLoss(self):
features = np.random.uniform(0., 1., size=(12, 2, 20))
labels = np.eye(12, 15, dtype=np.int32)
loss_keras = losses.ContrastiveLoss()(labels, features)
loss_direct = tf.reduce_mean(
losses.contrastive_loss(features, labels=labels))
self.assertFalse(np.isnan(loss_direct.numpy()))
self.assertFalse(np.isnan(loss_keras.numpy()))
self.assertEqual(loss_direct.numpy(), loss_keras.numpy())
def testIncorrectFeaturesRank(self):
features = np.zeros([1, 1])
with self.assertRaisesRegex(ValueError, 'Invalid features rank'):
losses.contrastive_loss(features)
def testUnknownBatchSizeDimension(self):
features = tf.keras.layers.Input(
dtype=tf.float32, batch_size=None, shape=(2, 20))
with self.assertRaisesRegex(ValueError, 'features has unknown batch_size'):
losses.contrastive_loss(features)
def testUnknownNumViewsDimension(self):
features = tf.keras.layers.Input(
dtype=tf.float32, batch_size=1, shape=(None, 20))
with self.assertRaisesRegex(ValueError, 'features has unknown num_views'):
losses.contrastive_loss(features)
def testIncorrectLabelsShape(self):
features = np.random.uniform(0., 1., size=(10, 3, 20))
labels = np.random.randint(1, size=(5))
with self.assertRaisesRegex(ValueError, 'Invalid labels shape'):
losses.contrastive_loss(features, labels=labels)
def testIncorrectLabelsRank(self):
features = np.random.uniform(0., 1., size=(10, 3, 20))
labels = np.random.randint(5, size=(4, 4))
with self.assertRaisesRegex(ValueError, 'Invalid labels shape'):
losses.contrastive_loss(features, labels=labels)
def testUnknownContrastMode(self):
features = np.random.uniform(size=(10, 3, 20))
labels = np.eye(10, dtype=np.int32)
with self.assertRaisesRegex(ValueError, 'Invalid contrast_mode'):
losses.contrastive_loss(features, labels, contrast_mode='invalid')
def testUnknownSummationLocation(self):
features = np.random.uniform(size=(10, 3, 20))
labels = np.eye(10, dtype=np.int32)
with self.assertRaisesRegex(ValueError, 'Invalid summation_location'):
losses.contrastive_loss(features, labels, summation_location='invalid')
def testUnknownDenominatorMode(self):
features = np.random.uniform(size=(10, 3, 20))
labels = np.eye(10, dtype=np.int32)
with self.assertRaisesRegex(ValueError, 'Invalid denominator_mode'):
losses.contrastive_loss(features, labels, denominator_mode='invalid')
def testDefaultBehaviourSameAsAllLabelsDifferent(self):
features = np.random.uniform(size=(10, 3, 20))
labels = np.eye(10, dtype=np.int64)
loss = tf.reduce_mean(losses.contrastive_loss(features))
loss_without_labels = tf.reduce_mean(
losses.contrastive_loss(features, labels))
self.assertFalse(np.isnan(loss.numpy()))
self.assertFalse(np.isnan(loss_without_labels.numpy()))
self.assertEqual(loss.numpy(), loss_without_labels.numpy())
def testContrastModeOneVsAll(self):
# shape (2, 2, 3)
features = np.array([[[0, 0, 1], [0, 1, 0]], [[1., 0., 0.], [0., -1., 0.]]])
loss_one = tf.reduce_mean(
losses.contrastive_loss(
features, contrast_mode=enums.LossContrastMode.ONE_VIEW))
self.assertFalse(np.isnan(loss_one.numpy()))
expected_loss = 1.098612 # np.log(3.)
self.assertAlmostEqual(np.mean(loss_one.numpy()), expected_loss, places=6)
loss_all = tf.reduce_mean(
losses.contrastive_loss(
features, contrast_mode=enums.LossContrastMode.ALL_VIEWS))
self.assertFalse(np.isnan(loss_all.numpy()))
self.assertNotAlmostEqual(
np.mean(loss_all.numpy()), expected_loss, places=6)
def testLossValue(self):
sqrt2 = np.sqrt(2.)
sqrt6 = np.sqrt(6.)
features = np.array([[[0, 0, 1], [0, (2. * sqrt2) / 3., -1 / 3.]],
[[sqrt6 / 3., -sqrt2 / 3., -1. / 3],
[-sqrt6 / 3., -sqrt2 / 3., -1. / 3]]])
loss = losses.contrastive_loss(features)
self.assertFalse(np.isnan(loss.numpy()).any())
expected_loss = 1.098612 # np.log(3.)
self.assertAlmostEqual(np.mean(loss.numpy()), expected_loss, places=6)
def testLossValueWithLabels(self):
sqrt2 = np.sqrt(2.)
sqrt6 = np.sqrt(6.)
features = np.array([[[0, 0, 1], [0, (2. * sqrt2) / 3., -1 / 3.]],
[[sqrt6 / 3., -sqrt2 / 3., -1. / 3],
[-sqrt6 / 3., -sqrt2 / 3., -1. / 3]]])
labels = np.eye(2, dtype=np.int32)
loss = losses.contrastive_loss(features, labels=labels)
self.assertFalse(np.isnan(loss.numpy()).any())
expected_loss = 1.098612 # np.log(3.)
self.assertAlmostEqual(np.mean(loss.numpy()), expected_loss, places=6)
def testLossValueWithLabelsAndPositives(self):
features = np.array([[[0, 0, 1], [0, 0, 1]], [[0, 1, 0], [0, 1, 0]],
[[1, 0, 0], [1, 0, 0]]])
labels = np.eye(3, dtype=np.int32)
# Make the label of sample 1 and 2 the same (= label 0)
labels[1] = labels[0]
loss = losses.contrastive_loss(features, labels).numpy()
self.assertFalse(np.isnan(loss).any())
expected_loss = [
1.57149910, # (3. * np.log(np.e + 4) - 1) / 3.
1.57149910, # (3. * np.log(np.e + 4) - 1) / 3.
0.90483244, # np.log(np.e + 4) - 1
]
self.assertAlmostEqual(loss[0], expected_loss[0], places=6)
self.assertAlmostEqual(loss[1], expected_loss[1], places=6)
self.assertAlmostEqual(loss[2], expected_loss[2], places=6)
def testLossValueWithTemp(self):
sqrt2 = np.sqrt(2.)
sqrt6 = np.sqrt(6.)
features = np.array([[[0, 0, 1], [0, (2. * sqrt2) / 3., -1 / 3.]],
[[sqrt6 / 3., -sqrt2 / 3., -1. / 3],
[-sqrt6 / 3., -sqrt2 / 3., -1. / 3]]])
loss = losses.contrastive_loss(features, temperature=0.1)
self.assertFalse(np.isnan(loss.numpy()).any())
expected_loss = 0.1098612 # 0.1 * np.log(3.)
self.assertAlmostEqual(np.mean(loss.numpy()), expected_loss, places=5)
def testLossValueWithTempNoScaleByTemp(self):
sqrt2 = np.sqrt(2.)
sqrt6 = np.sqrt(6.)
features = np.array([[[0, 0, 1], [0, (2. * sqrt2) / 3., -1 / 3.]],
[[sqrt6 / 3., -sqrt2 / 3., -1. / 3],
[-sqrt6 / 3., -sqrt2 / 3., -1. / 3]]])
loss = losses.contrastive_loss(
features, temperature=0.1, scale_by_temperature=False)
self.assertFalse(np.isnan(loss.numpy()).any())
expected_loss = 1.098612 # np.log(3.)
self.assertAlmostEqual(np.mean(loss.numpy()), expected_loss, places=5)
@parameterized.named_parameters(('1x1 features', (10, 3, 1, 1, 64)),
('3x3 features', (10, 3, 3, 3, 8)),
('16x16 features', (10, 3, 16, 16, 4)),
('rank-3 features', (10, 3, 16, 8)))
def testConvFeatures(self, features_shape):
features_shape = tf.TensorShape(features_shape)
features = tf.random.uniform(shape=features_shape)
# Normalize embeddings to ensure the Loss does not return NaN values
# for large feature sizes.
normalization_axes = list(range(2, features_shape.rank))
normalized_features = tf.nn.l2_normalize(features, axis=normalization_axes)
loss = tf.reduce_mean(losses.contrastive_loss(normalized_features))
self.assertFalse(np.isnan(loss.numpy()))
@parameterized.named_parameters(
# The following values have all been manually checked to be the correct
# outputs given the inputs in the test.
('out_and_all', enums.LossSummationLocation.OUTSIDE,
enums.LossDenominatorMode.ALL, -1, [1.6755852, 1.7973773, 1.58471]),
('out_and_one', enums.LossSummationLocation.OUTSIDE,
enums.LossDenominatorMode.ONE_POSITIVE, -1,
[1.1782627, 1.3435497, 1.58471]),
('out_and_none', enums.LossSummationLocation.OUTSIDE,
enums.LossDenominatorMode.ONLY_NEGATIVES, -1,
[0.79652834, 1.0154991, 1.3529458]),
('out_and_large_cap', enums.LossSummationLocation.OUTSIDE,
enums.LossDenominatorMode.ALL, 4, [1.6755852, 1.7973773, 1.58471]),
('out_and_small_cap', enums.LossSummationLocation.OUTSIDE,
enums.LossDenominatorMode.ALL, 2, [1.1385298, 1.1824025, 1.1557417],
(0, 0, 0)),
('out_and_zero_cap', enums.LossSummationLocation.OUTSIDE,
enums.LossDenominatorMode.ALL, 0, [1.3769293, 1.0785717, 1.58471]),
('in_and_all', enums.LossSummationLocation.INSIDE,
enums.LossDenominatorMode.ALL, -1, [1.6356678, 1.7135872, 1.58471]),
('in_and_one', enums.LossSummationLocation.INSIDE,
enums.LossDenominatorMode.ONE_POSITIVE, -1,
[1.1571673, 1.2941568, 1.58471]),
('in_and_none', enums.LossSummationLocation.INSIDE,
enums.LossDenominatorMode.ONLY_NEGATIVES, -1,
[0.756611, 0.93170905, 1.3529458]),
('in_and_large_cap', enums.LossSummationLocation.INSIDE,
enums.LossDenominatorMode.ALL, 4, [1.6356678, 1.7135872, 1.58471]),
('in_and_small_cap', enums.LossSummationLocation.INSIDE,
enums.LossDenominatorMode.ALL, 2, [1.0986123, 1.0986123, 1.0986123],
(0, 0, 0)),
('in_and_zero_cap', enums.LossSummationLocation.INSIDE,
enums.LossDenominatorMode.ALL, 0, [1.3769293, 1.0785717, 1.58471]))
def testLossForSummationLocationsAndDenominatorModes(self,
summation_location,
denominator_mode,
positives_cap,
expected_loss,
labels=(0, 0, 1)):
features = np.array([
[[0.01, 0.02, 0.14], [0.38, 0.61, 0.50]],
[[0.86, 0.97, 0.33], [0.26, 0.68, 0.45]],
[[0.32, 0.64, 0.28], [0.45, 0.74, 0.73]],
])
labels = tf.one_hot(labels, 2)
loss = losses.contrastive_loss(
features,
labels=labels,
summation_location=summation_location,
denominator_mode=denominator_mode,
positives_cap=positives_cap)
self.assertTupleEqual(loss.numpy().shape, (len(expected_loss),))
for index, (val1, val2) in enumerate(zip(loss.numpy(), expected_loss)):
self.assertAlmostEqual(
val1,
val2,
places=5,
msg=f'Lists not almost equal at index {index}: '
'{loss.numpy()} != {expected_loss}')
def testLossForOneView(self):
features = np.array([
[[0.01, 0.02, 0.14]],
[[0.86, 0.97, 0.33]],
[[0.32, 0.64, 0.28]],
])
labels = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]],
dtype=np.int32)
loss = losses.contrastive_loss(features, labels=labels, temperature=1.0)
pos0 = np.exp(np.dot(features[0, 0, :], features[1, 0, :]))
neg0 = np.exp(np.dot(features[0, 0, :], features[2, 0, :]))
loss0 = -np.log(pos0 / (pos0 + neg0))
pos1 = np.exp(np.dot(features[1, 0, :], features[0, 0, :]))
neg1 = np.exp(np.dot(features[1, 0, :], features[2, 0, :]))
loss1 = -np.log(pos1 / (pos1 + neg1))
expected_loss = np.array([loss0, loss1, 0.0])
self.assertTupleEqual(loss.numpy().shape, expected_loss.shape)
for index, (val1, val2) in enumerate(zip(loss.numpy(), expected_loss)):
self.assertAlmostEqual(
val1,
val2,
places=5,
msg=f'Lists not almost equal at index {index}: '
f'{loss.numpy()} != {expected_loss}')
def testLossOnTPU(self):
# Calling tpu.replicate in Eager mode doesn't work. Wrapping in a graph
# implicitly disables Eager mode within its scope.
with tf.Graph().as_default():
features = tf.constant([
[[0.01, 0.02, 0.14], [0.38, 0.61, 0.50]],
[[0.86, 0.97, 0.33], [0.26, 0.68, 0.45]],
[[0.32, 0.64, 0.28], [0.45, 0.74, 0.73]],
[[0.45, 0.62, 0.07], [0.13, 0.28, 0.91]],
])
labels = tf.one_hot((0, 0, 1, 1), 2)
tpu_result = tf.compat.v1.tpu.replicate(
losses.contrastive_loss,
[[features[:2], labels[:2]], [features[2:], labels[2:]]])
# tpu_result should be a list of 2 lists, each containing a single float
# Tensor with shape [2].
self.assertLen(tpu_result, 2)
self.assertLen(tpu_result[0], 1)
self.assertLen(tpu_result[1], 1)
self.assertEqual([2], tpu_result[0][0].shape.as_list())
self.assertEqual([2], tpu_result[1][0].shape.as_list())
tpu_loss = tf.reshape(tpu_result, [4])
cpu_loss = losses.contrastive_loss(features, labels=labels)
cpu_partial_loss_1 = losses.contrastive_loss(
features[:2], labels=labels[:2])
cpu_partial_loss_2 = losses.contrastive_loss(
features[2:], labels=labels[2:])
cpu_partial_loss = tf.concat([cpu_partial_loss_1, cpu_partial_loss_2],
axis=0)
with self.cached_session() as sess:
sess.run(tf.compat.v1.tpu.initialize_system())
tpu_loss, cpu_loss, cpu_partial_loss = sess.run(
(tpu_loss, cpu_loss, cpu_partial_loss))
print(tpu_loss)
print(cpu_loss)
# Numerical precision isn't so high on TPU.
self.assertAllClose(tpu_loss, cpu_loss, atol=1e-2)
# Verify that the TPU computation is different than independently
# computing the two "local batches" on CPU, because of the internal
# cross_replica_concat.
self.assertNotAllClose(tpu_loss, cpu_partial_loss, atol=1e-2)
if __name__ == '__main__':
tf.test.main()
| 44.634441 | 80 | 0.627657 |
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
from supcon import enums
from supcon import losses
class LossesTest(tf.test.TestCase, parameterized.TestCase):
def testKerasImplementation(self):
features = np.random.uniform(0., 1., (12, 2, 20))
labels = np.eye(12, 15, dtype=np.int32)
loss = losses.ContrastiveLoss()(labels, features)
self.assertEqual(loss.shape, ())
self.assertFalse(np.isnan(loss.numpy()))
def testKerasLossVsNonKerasLoss(self):
features = np.random.uniform(0., 1., size=(12, 2, 20))
labels = np.eye(12, 15, dtype=np.int32)
loss_keras = losses.ContrastiveLoss()(labels, features)
loss_direct = tf.reduce_mean(
losses.contrastive_loss(features, labels=labels))
self.assertFalse(np.isnan(loss_direct.numpy()))
self.assertFalse(np.isnan(loss_keras.numpy()))
self.assertEqual(loss_direct.numpy(), loss_keras.numpy())
def testIncorrectFeaturesRank(self):
features = np.zeros([1, 1])
with self.assertRaisesRegex(ValueError, 'Invalid features rank'):
losses.contrastive_loss(features)
def testUnknownBatchSizeDimension(self):
features = tf.keras.layers.Input(
dtype=tf.float32, batch_size=None, shape=(2, 20))
with self.assertRaisesRegex(ValueError, 'features has unknown batch_size'):
losses.contrastive_loss(features)
def testUnknownNumViewsDimension(self):
features = tf.keras.layers.Input(
dtype=tf.float32, batch_size=1, shape=(None, 20))
with self.assertRaisesRegex(ValueError, 'features has unknown num_views'):
losses.contrastive_loss(features)
def testIncorrectLabelsShape(self):
features = np.random.uniform(0., 1., size=(10, 3, 20))
labels = np.random.randint(1, size=(5))
with self.assertRaisesRegex(ValueError, 'Invalid labels shape'):
losses.contrastive_loss(features, labels=labels)
def testIncorrectLabelsRank(self):
features = np.random.uniform(0., 1., size=(10, 3, 20))
labels = np.random.randint(5, size=(4, 4))
with self.assertRaisesRegex(ValueError, 'Invalid labels shape'):
losses.contrastive_loss(features, labels=labels)
def testUnknownContrastMode(self):
features = np.random.uniform(size=(10, 3, 20))
labels = np.eye(10, dtype=np.int32)
with self.assertRaisesRegex(ValueError, 'Invalid contrast_mode'):
losses.contrastive_loss(features, labels, contrast_mode='invalid')
def testUnknownSummationLocation(self):
features = np.random.uniform(size=(10, 3, 20))
labels = np.eye(10, dtype=np.int32)
with self.assertRaisesRegex(ValueError, 'Invalid summation_location'):
losses.contrastive_loss(features, labels, summation_location='invalid')
def testUnknownDenominatorMode(self):
features = np.random.uniform(size=(10, 3, 20))
labels = np.eye(10, dtype=np.int32)
with self.assertRaisesRegex(ValueError, 'Invalid denominator_mode'):
losses.contrastive_loss(features, labels, denominator_mode='invalid')
def testDefaultBehaviourSameAsAllLabelsDifferent(self):
features = np.random.uniform(size=(10, 3, 20))
labels = np.eye(10, dtype=np.int64)
loss = tf.reduce_mean(losses.contrastive_loss(features))
loss_without_labels = tf.reduce_mean(
losses.contrastive_loss(features, labels))
self.assertFalse(np.isnan(loss.numpy()))
self.assertFalse(np.isnan(loss_without_labels.numpy()))
self.assertEqual(loss.numpy(), loss_without_labels.numpy())
def testContrastModeOneVsAll(self):
features = np.array([[[0, 0, 1], [0, 1, 0]], [[1., 0., 0.], [0., -1., 0.]]])
loss_one = tf.reduce_mean(
losses.contrastive_loss(
features, contrast_mode=enums.LossContrastMode.ONE_VIEW))
self.assertFalse(np.isnan(loss_one.numpy()))
expected_loss = 1.098612
self.assertAlmostEqual(np.mean(loss_one.numpy()), expected_loss, places=6)
loss_all = tf.reduce_mean(
losses.contrastive_loss(
features, contrast_mode=enums.LossContrastMode.ALL_VIEWS))
self.assertFalse(np.isnan(loss_all.numpy()))
self.assertNotAlmostEqual(
np.mean(loss_all.numpy()), expected_loss, places=6)
def testLossValue(self):
sqrt2 = np.sqrt(2.)
sqrt6 = np.sqrt(6.)
features = np.array([[[0, 0, 1], [0, (2. * sqrt2) / 3., -1 / 3.]],
[[sqrt6 / 3., -sqrt2 / 3., -1. / 3],
[-sqrt6 / 3., -sqrt2 / 3., -1. / 3]]])
loss = losses.contrastive_loss(features)
self.assertFalse(np.isnan(loss.numpy()).any())
expected_loss = 1.098612
self.assertAlmostEqual(np.mean(loss.numpy()), expected_loss, places=6)
def testLossValueWithLabels(self):
sqrt2 = np.sqrt(2.)
sqrt6 = np.sqrt(6.)
features = np.array([[[0, 0, 1], [0, (2. * sqrt2) / 3., -1 / 3.]],
[[sqrt6 / 3., -sqrt2 / 3., -1. / 3],
[-sqrt6 / 3., -sqrt2 / 3., -1. / 3]]])
labels = np.eye(2, dtype=np.int32)
loss = losses.contrastive_loss(features, labels=labels)
self.assertFalse(np.isnan(loss.numpy()).any())
expected_loss = 1.098612
self.assertAlmostEqual(np.mean(loss.numpy()), expected_loss, places=6)
def testLossValueWithLabelsAndPositives(self):
features = np.array([[[0, 0, 1], [0, 0, 1]], [[0, 1, 0], [0, 1, 0]],
[[1, 0, 0], [1, 0, 0]]])
labels = np.eye(3, dtype=np.int32)
labels[1] = labels[0]
loss = losses.contrastive_loss(features, labels).numpy()
self.assertFalse(np.isnan(loss).any())
expected_loss = [
1.57149910,
1.57149910,
0.90483244,
]
self.assertAlmostEqual(loss[0], expected_loss[0], places=6)
self.assertAlmostEqual(loss[1], expected_loss[1], places=6)
self.assertAlmostEqual(loss[2], expected_loss[2], places=6)
def testLossValueWithTemp(self):
sqrt2 = np.sqrt(2.)
sqrt6 = np.sqrt(6.)
features = np.array([[[0, 0, 1], [0, (2. * sqrt2) / 3., -1 / 3.]],
[[sqrt6 / 3., -sqrt2 / 3., -1. / 3],
[-sqrt6 / 3., -sqrt2 / 3., -1. / 3]]])
loss = losses.contrastive_loss(features, temperature=0.1)
self.assertFalse(np.isnan(loss.numpy()).any())
expected_loss = 0.1098612
self.assertAlmostEqual(np.mean(loss.numpy()), expected_loss, places=5)
def testLossValueWithTempNoScaleByTemp(self):
sqrt2 = np.sqrt(2.)
sqrt6 = np.sqrt(6.)
features = np.array([[[0, 0, 1], [0, (2. * sqrt2) / 3., -1 / 3.]],
[[sqrt6 / 3., -sqrt2 / 3., -1. / 3],
[-sqrt6 / 3., -sqrt2 / 3., -1. / 3]]])
loss = losses.contrastive_loss(
features, temperature=0.1, scale_by_temperature=False)
self.assertFalse(np.isnan(loss.numpy()).any())
expected_loss = 1.098612
self.assertAlmostEqual(np.mean(loss.numpy()), expected_loss, places=5)
@parameterized.named_parameters(('1x1 features', (10, 3, 1, 1, 64)),
('3x3 features', (10, 3, 3, 3, 8)),
('16x16 features', (10, 3, 16, 16, 4)),
('rank-3 features', (10, 3, 16, 8)))
def testConvFeatures(self, features_shape):
features_shape = tf.TensorShape(features_shape)
features = tf.random.uniform(shape=features_shape)
normalization_axes = list(range(2, features_shape.rank))
normalized_features = tf.nn.l2_normalize(features, axis=normalization_axes)
loss = tf.reduce_mean(losses.contrastive_loss(normalized_features))
self.assertFalse(np.isnan(loss.numpy()))
@parameterized.named_parameters(
('out_and_all', enums.LossSummationLocation.OUTSIDE,
enums.LossDenominatorMode.ALL, -1, [1.6755852, 1.7973773, 1.58471]),
('out_and_one', enums.LossSummationLocation.OUTSIDE,
enums.LossDenominatorMode.ONE_POSITIVE, -1,
[1.1782627, 1.3435497, 1.58471]),
('out_and_none', enums.LossSummationLocation.OUTSIDE,
enums.LossDenominatorMode.ONLY_NEGATIVES, -1,
[0.79652834, 1.0154991, 1.3529458]),
('out_and_large_cap', enums.LossSummationLocation.OUTSIDE,
enums.LossDenominatorMode.ALL, 4, [1.6755852, 1.7973773, 1.58471]),
('out_and_small_cap', enums.LossSummationLocation.OUTSIDE,
enums.LossDenominatorMode.ALL, 2, [1.1385298, 1.1824025, 1.1557417],
(0, 0, 0)),
('out_and_zero_cap', enums.LossSummationLocation.OUTSIDE,
enums.LossDenominatorMode.ALL, 0, [1.3769293, 1.0785717, 1.58471]),
('in_and_all', enums.LossSummationLocation.INSIDE,
enums.LossDenominatorMode.ALL, -1, [1.6356678, 1.7135872, 1.58471]),
('in_and_one', enums.LossSummationLocation.INSIDE,
enums.LossDenominatorMode.ONE_POSITIVE, -1,
[1.1571673, 1.2941568, 1.58471]),
('in_and_none', enums.LossSummationLocation.INSIDE,
enums.LossDenominatorMode.ONLY_NEGATIVES, -1,
[0.756611, 0.93170905, 1.3529458]),
('in_and_large_cap', enums.LossSummationLocation.INSIDE,
enums.LossDenominatorMode.ALL, 4, [1.6356678, 1.7135872, 1.58471]),
('in_and_small_cap', enums.LossSummationLocation.INSIDE,
enums.LossDenominatorMode.ALL, 2, [1.0986123, 1.0986123, 1.0986123],
(0, 0, 0)),
('in_and_zero_cap', enums.LossSummationLocation.INSIDE,
enums.LossDenominatorMode.ALL, 0, [1.3769293, 1.0785717, 1.58471]))
def testLossForSummationLocationsAndDenominatorModes(self,
summation_location,
denominator_mode,
positives_cap,
expected_loss,
labels=(0, 0, 1)):
features = np.array([
[[0.01, 0.02, 0.14], [0.38, 0.61, 0.50]],
[[0.86, 0.97, 0.33], [0.26, 0.68, 0.45]],
[[0.32, 0.64, 0.28], [0.45, 0.74, 0.73]],
])
labels = tf.one_hot(labels, 2)
loss = losses.contrastive_loss(
features,
labels=labels,
summation_location=summation_location,
denominator_mode=denominator_mode,
positives_cap=positives_cap)
self.assertTupleEqual(loss.numpy().shape, (len(expected_loss),))
for index, (val1, val2) in enumerate(zip(loss.numpy(), expected_loss)):
self.assertAlmostEqual(
val1,
val2,
places=5,
msg=f'Lists not almost equal at index {index}: '
'{loss.numpy()} != {expected_loss}')
def testLossForOneView(self):
features = np.array([
[[0.01, 0.02, 0.14]],
[[0.86, 0.97, 0.33]],
[[0.32, 0.64, 0.28]],
])
labels = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]],
dtype=np.int32)
loss = losses.contrastive_loss(features, labels=labels, temperature=1.0)
pos0 = np.exp(np.dot(features[0, 0, :], features[1, 0, :]))
neg0 = np.exp(np.dot(features[0, 0, :], features[2, 0, :]))
loss0 = -np.log(pos0 / (pos0 + neg0))
pos1 = np.exp(np.dot(features[1, 0, :], features[0, 0, :]))
neg1 = np.exp(np.dot(features[1, 0, :], features[2, 0, :]))
loss1 = -np.log(pos1 / (pos1 + neg1))
expected_loss = np.array([loss0, loss1, 0.0])
self.assertTupleEqual(loss.numpy().shape, expected_loss.shape)
for index, (val1, val2) in enumerate(zip(loss.numpy(), expected_loss)):
self.assertAlmostEqual(
val1,
val2,
places=5,
msg=f'Lists not almost equal at index {index}: '
f'{loss.numpy()} != {expected_loss}')
def testLossOnTPU(self):
# implicitly disables Eager mode within its scope.
with tf.Graph().as_default():
features = tf.constant([
[[0.01, 0.02, 0.14], [0.38, 0.61, 0.50]],
[[0.86, 0.97, 0.33], [0.26, 0.68, 0.45]],
[[0.32, 0.64, 0.28], [0.45, 0.74, 0.73]],
[[0.45, 0.62, 0.07], [0.13, 0.28, 0.91]],
])
labels = tf.one_hot((0, 0, 1, 1), 2)
tpu_result = tf.compat.v1.tpu.replicate(
losses.contrastive_loss,
[[features[:2], labels[:2]], [features[2:], labels[2:]]])
# tpu_result should be a list of 2 lists, each containing a single float
# Tensor with shape [2].
self.assertLen(tpu_result, 2)
self.assertLen(tpu_result[0], 1)
self.assertLen(tpu_result[1], 1)
self.assertEqual([2], tpu_result[0][0].shape.as_list())
self.assertEqual([2], tpu_result[1][0].shape.as_list())
tpu_loss = tf.reshape(tpu_result, [4])
cpu_loss = losses.contrastive_loss(features, labels=labels)
cpu_partial_loss_1 = losses.contrastive_loss(
features[:2], labels=labels[:2])
cpu_partial_loss_2 = losses.contrastive_loss(
features[2:], labels=labels[2:])
cpu_partial_loss = tf.concat([cpu_partial_loss_1, cpu_partial_loss_2],
axis=0)
with self.cached_session() as sess:
sess.run(tf.compat.v1.tpu.initialize_system())
tpu_loss, cpu_loss, cpu_partial_loss = sess.run(
(tpu_loss, cpu_loss, cpu_partial_loss))
print(tpu_loss)
print(cpu_loss)
# Numerical precision isn't so high on TPU.
self.assertAllClose(tpu_loss, cpu_loss, atol=1e-2)
self.assertNotAllClose(tpu_loss, cpu_partial_loss, atol=1e-2)
if __name__ == '__main__':
tf.test.main()
| true | true |
f7f711c01c02b0177cefeee4a613b3e4f7162a98 | 288 | py | Python | Module_system 1.171/header_map_icons.py | Sea-Monster/WarbandModuleSystem | 66c67147692707b85c457db10a112627118733a5 | [
"MIT"
] | 14 | 2018-09-20T23:01:27.000Z | 2021-05-25T11:05:09.000Z | Module_system 1.171/header_map_icons.py | Sea-Monster/WarbandModuleSystem | 66c67147692707b85c457db10a112627118733a5 | [
"MIT"
] | 44 | 2018-09-15T03:05:50.000Z | 2022-03-22T02:46:24.000Z | Module_system 1.171/header_map_icons.py | Sea-Monster/WarbandModuleSystem | 66c67147692707b85c457db10a112627118733a5 | [
"MIT"
] | 13 | 2018-10-02T11:45:24.000Z | 2021-08-22T18:41:44.000Z | ###################################################
# header_map_icons.py
# This file contains declarations for module_map_icons
# DO NOT EDIT THIS FILE!
###################################################
from header_common import *
## map icon flags
mcn_no_shadow = 0x00000001
| 22.153846 | 54 | 0.486111 | true | true | |
f7f7121942955db3d7a66be39b8f8eff225cc10a | 1,665 | py | Python | project1.py | AideenByrne/python-project | 90100295360db810d49cda3f0377fcca3866e33a | [
"Apache-2.0"
] | null | null | null | project1.py | AideenByrne/python-project | 90100295360db810d49cda3f0377fcca3866e33a | [
"Apache-2.0"
] | null | null | null | project1.py | AideenByrne/python-project | 90100295360db810d49cda3f0377fcca3866e33a | [
"Apache-2.0"
] | null | null | null |
#Aideen Byrne 26th March 2018
#Code for investigating Iris Data Set for Programming & Scripting module project
#Select only the rows of the Virginica flowers and assign it to virginica
import pandas as pd #import pandas library
df1 = pd.read_csv("data/iris.csv") #label contents of iris.csv file as dataframe
my_columns = ["Sepal Length", "Sepal Width", "Petal Length", "Petal Width", "Species"] #borrowed from Jeff Tratner at https://stackoverflow.com/questions/17018638/assigning-column-names-from-a-list-to-a-table
df1.columns = my_columns
species = df1[["Species"]] #to select column named Species
virginica = df1.loc[(df1["Species"] == "Iris-virginica")] #select rows that contain virginica flowers in species column
print (virginica)#prints only rows that contain virginica flowers in species column
#Select only the Sepal Length of the Virginica flowers and assign it
vsepallength = virginica["Sepal Length"]
print (vsepallength)
#Calculate the mean, median, variance and standard deviation of the Virginica Sepal Length
print ("The mean of Virginica Sepal Length is", vsepallength.mean())
print ("The median of Virginica Sepal Length is", vsepallength.median())
print ("The variance of Virginica Sepal Length is", vsepallength.var())
print ("The standard deviation of Virginica Sepal Length is", vsepallength.std())
#Select only the numerical columns
selectnumcol = df1[["Sepal Length", "Sepal Width", "Petal Length", "Petal Width"]]
print (selectnumcol)
#Calculate the mean of all the numerical variables
print ("The mean per numerical column is", selectnumcol.mean())
print ("The mean of all numerical columns is", selectnumcol.mean().mean())
| 53.709677 | 208 | 0.771171 |
import pandas as pd
df1 = pd.read_csv("data/iris.csv")
my_columns = ["Sepal Length", "Sepal Width", "Petal Length", "Petal Width", "Species"]
df1.columns = my_columns
species = df1[["Species"]]
virginica = df1.loc[(df1["Species"] == "Iris-virginica")]
print (virginica)
vsepallength = virginica["Sepal Length"]
print (vsepallength)
print ("The mean of Virginica Sepal Length is", vsepallength.mean())
print ("The median of Virginica Sepal Length is", vsepallength.median())
print ("The variance of Virginica Sepal Length is", vsepallength.var())
print ("The standard deviation of Virginica Sepal Length is", vsepallength.std())
selectnumcol = df1[["Sepal Length", "Sepal Width", "Petal Length", "Petal Width"]]
print (selectnumcol)
print ("The mean per numerical column is", selectnumcol.mean())
print ("The mean of all numerical columns is", selectnumcol.mean().mean())
| true | true |
f7f712a4bfdda3ce48839e69af5b1f695fc2bfe5 | 877 | py | Python | farmer/urls.py | manulangat1/farmer | b71041fe8d961d815c1a70fb2be4357008d3a05f | [
"MIT"
] | 2 | 2020-02-06T20:54:00.000Z | 2020-02-10T08:37:28.000Z | farmer/urls.py | manulangat1/farmer | b71041fe8d961d815c1a70fb2be4357008d3a05f | [
"MIT"
] | 7 | 2020-06-05T19:24:13.000Z | 2022-03-11T23:32:22.000Z | farmer/urls.py | manulangat1/farmer | b71041fe8d961d815c1a70fb2be4357008d3a05f | [
"MIT"
] | null | null | null | """farmer URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'',include('farmer1.urls')),
url(r'^accounts/',include('registration.backends.simple.urls'))
]
| 36.541667 | 79 | 0.701254 | from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'',include('farmer1.urls')),
url(r'^accounts/',include('registration.backends.simple.urls'))
]
| true | true |
f7f7136342fed3982ffb0acb7a12a76faa329167 | 4,916 | py | Python | nhentai/cmdline.py | violetdarkness/nhentai | c95ecdded4d15e47e520a7371f62fc8820e95418 | [
"MIT"
] | null | null | null | nhentai/cmdline.py | violetdarkness/nhentai | c95ecdded4d15e47e520a7371f62fc8820e95418 | [
"MIT"
] | null | null | null | nhentai/cmdline.py | violetdarkness/nhentai | c95ecdded4d15e47e520a7371f62fc8820e95418 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import print_function
import sys
from optparse import OptionParser
from nhentai import __version__
try:
from itertools import ifilter as filter
except ImportError:
pass
import nhentai.constant as constant
from nhentai.utils import urlparse, generate_html
from nhentai.logger import logger
try:
if sys.version_info < (3, 0, 0):
import codecs
import locale
sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout)
sys.stderr = codecs.getwriter(locale.getpreferredencoding())(sys.stderr)
except NameError:
# python3
pass
def banner():
logger.info(u'''nHentai ver %s: あなたも変態。 いいね?
_ _ _ _
_ __ | | | | ___ _ __ | |_ __ _(_)
| '_ \| |_| |/ _ \ '_ \| __/ _` | |
| | | | _ | __/ | | | || (_| | |
|_| |_|_| |_|\___|_| |_|\__\__,_|_|
''' % __version__)
def cmd_parser():
parser = OptionParser('\n nhentai --search [keyword] --download'
'\n NHENTAI=http://h.loli.club nhentai --id [ID ...]'
'\n\nEnvironment Variable:\n'
' NHENTAI nhentai mirror url')
parser.add_option('--download', dest='is_download', action='store_true',
help='download doujinshi (for search results)')
parser.add_option('--show-info', dest='is_show', action='store_true', help='just show the doujinshi information')
parser.add_option('--id', type='string', dest='id', action='store', help='doujinshi ids set, e.g. 1,2,3')
parser.add_option('--search', type='string', dest='keyword', action='store', help='search doujinshi by keyword')
parser.add_option('--page', type='int', dest='page', action='store', default=1,
help='page number of search results')
parser.add_option('--tag', type='string', dest='tag', action='store', help='download doujinshi by tag')
parser.add_option('--max-page', type='int', dest='max_page', action='store', default=1,
help='The max page when recursive download tagged doujinshi')
parser.add_option('--output', type='string', dest='output_dir', action='store', default='',
help='output dir')
parser.add_option('--threads', '-t', type='int', dest='threads', action='store', default=5,
help='thread count for downloading doujinshi')
parser.add_option('--timeout', type='int', dest='timeout', action='store', default=30,
help='timeout for downloading doujinshi')
parser.add_option('--proxy', type='string', dest='proxy', action='store', default='',
help='uses a proxy, for example: http://127.0.0.1:1080')
parser.add_option('--html', dest='html_viewer', action='store_true',
help='generate a html viewer at current directory')
parser.add_option('--login', '-l', type='str', dest='login', action='store',
help='username:password pair of nhentai account')
parser.add_option('--nohtml', dest='is_nohtml', action='store_true',
help='Don\'t generate HTML')
parser.add_option('--cbz', dest='is_cbz', action='store_true',
help='Generate Comic Book CBZ File')
try:
sys.argv = list(map(lambda x: unicode(x.decode(sys.stdin.encoding)), sys.argv))
except (NameError, TypeError):
pass
except UnicodeDecodeError:
exit(0)
args, _ = parser.parse_args(sys.argv[1:])
if args.html_viewer:
generate_html()
exit(0)
if args.login:
try:
_, _ = args.login.split(':', 1)
except ValueError:
logger.error('Invalid `username:password` pair.')
exit(1)
if not args.is_download:
logger.warning('YOU DO NOT SPECIFY `--download` OPTION !!!')
if args.id:
_ = map(lambda id: id.strip(), args.id.split(','))
args.id = set(map(int, filter(lambda id_: id_.isdigit(), _)))
if (args.is_download or args.is_show) and not args.id and not args.keyword and \
not args.login and not args.tag:
logger.critical('Doujinshi id(s) are required for downloading')
parser.print_help()
exit(1)
if not args.keyword and not args.id and not args.login and not args.tag:
parser.print_help()
exit(1)
if args.threads <= 0:
args.threads = 1
elif args.threads > 15:
logger.critical('Maximum number of used threads is 15')
exit(1)
if args.proxy:
proxy_url = urlparse(args.proxy)
if proxy_url.scheme not in ('http', 'https'):
logger.error('Invalid protocol \'{0}\' of proxy, ignored'.format(proxy_url.scheme))
else:
constant.PROXY = {'http': args.proxy, 'https': args.proxy}
return args
| 39.645161 | 117 | 0.591538 |
from __future__ import print_function
import sys
from optparse import OptionParser
from nhentai import __version__
try:
from itertools import ifilter as filter
except ImportError:
pass
import nhentai.constant as constant
from nhentai.utils import urlparse, generate_html
from nhentai.logger import logger
try:
if sys.version_info < (3, 0, 0):
import codecs
import locale
sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout)
sys.stderr = codecs.getwriter(locale.getpreferredencoding())(sys.stderr)
except NameError:
pass
def banner():
logger.info(u'''nHentai ver %s: あなたも変態。 いいね?
_ _ _ _
_ __ | | | | ___ _ __ | |_ __ _(_)
| '_ \| |_| |/ _ \ '_ \| __/ _` | |
| | | | _ | __/ | | | || (_| | |
|_| |_|_| |_|\___|_| |_|\__\__,_|_|
''' % __version__)
def cmd_parser():
parser = OptionParser('\n nhentai --search [keyword] --download'
'\n NHENTAI=http://h.loli.club nhentai --id [ID ...]'
'\n\nEnvironment Variable:\n'
' NHENTAI nhentai mirror url')
parser.add_option('--download', dest='is_download', action='store_true',
help='download doujinshi (for search results)')
parser.add_option('--show-info', dest='is_show', action='store_true', help='just show the doujinshi information')
parser.add_option('--id', type='string', dest='id', action='store', help='doujinshi ids set, e.g. 1,2,3')
parser.add_option('--search', type='string', dest='keyword', action='store', help='search doujinshi by keyword')
parser.add_option('--page', type='int', dest='page', action='store', default=1,
help='page number of search results')
parser.add_option('--tag', type='string', dest='tag', action='store', help='download doujinshi by tag')
parser.add_option('--max-page', type='int', dest='max_page', action='store', default=1,
help='The max page when recursive download tagged doujinshi')
parser.add_option('--output', type='string', dest='output_dir', action='store', default='',
help='output dir')
parser.add_option('--threads', '-t', type='int', dest='threads', action='store', default=5,
help='thread count for downloading doujinshi')
parser.add_option('--timeout', type='int', dest='timeout', action='store', default=30,
help='timeout for downloading doujinshi')
parser.add_option('--proxy', type='string', dest='proxy', action='store', default='',
help='uses a proxy, for example: http://127.0.0.1:1080')
parser.add_option('--html', dest='html_viewer', action='store_true',
help='generate a html viewer at current directory')
parser.add_option('--login', '-l', type='str', dest='login', action='store',
help='username:password pair of nhentai account')
parser.add_option('--nohtml', dest='is_nohtml', action='store_true',
help='Don\'t generate HTML')
parser.add_option('--cbz', dest='is_cbz', action='store_true',
help='Generate Comic Book CBZ File')
try:
sys.argv = list(map(lambda x: unicode(x.decode(sys.stdin.encoding)), sys.argv))
except (NameError, TypeError):
pass
except UnicodeDecodeError:
exit(0)
args, _ = parser.parse_args(sys.argv[1:])
if args.html_viewer:
generate_html()
exit(0)
if args.login:
try:
_, _ = args.login.split(':', 1)
except ValueError:
logger.error('Invalid `username:password` pair.')
exit(1)
if not args.is_download:
logger.warning('YOU DO NOT SPECIFY `--download` OPTION !!!')
if args.id:
_ = map(lambda id: id.strip(), args.id.split(','))
args.id = set(map(int, filter(lambda id_: id_.isdigit(), _)))
if (args.is_download or args.is_show) and not args.id and not args.keyword and \
not args.login and not args.tag:
logger.critical('Doujinshi id(s) are required for downloading')
parser.print_help()
exit(1)
if not args.keyword and not args.id and not args.login and not args.tag:
parser.print_help()
exit(1)
if args.threads <= 0:
args.threads = 1
elif args.threads > 15:
logger.critical('Maximum number of used threads is 15')
exit(1)
if args.proxy:
proxy_url = urlparse(args.proxy)
if proxy_url.scheme not in ('http', 'https'):
logger.error('Invalid protocol \'{0}\' of proxy, ignored'.format(proxy_url.scheme))
else:
constant.PROXY = {'http': args.proxy, 'https': args.proxy}
return args
| true | true |
f7f7137e588ca6e9ed60088050026169f4a815e3 | 269 | py | Python | sms_extras/sms_extras/doctype/sms_recipient_list_number/sms_recipient_list_number.py | libermatic/sms_extras | 8e94013b26970edda2c80c6d4e9ecd515e9a1fa7 | [
"MIT"
] | 4 | 2020-08-26T23:08:26.000Z | 2022-02-12T13:20:30.000Z | sms_extras/sms_extras/doctype/sms_recipient_list_number/sms_recipient_list_number.py | libermatic/sms_extras | 8e94013b26970edda2c80c6d4e9ecd515e9a1fa7 | [
"MIT"
] | null | null | null | sms_extras/sms_extras/doctype/sms_recipient_list_number/sms_recipient_list_number.py | libermatic/sms_extras | 8e94013b26970edda2c80c6d4e9ecd515e9a1fa7 | [
"MIT"
] | 3 | 2020-09-13T16:12:35.000Z | 2022-03-03T05:14:43.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Libermatic and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class SMSRecipientListNumber(Document):
pass
| 24.454545 | 49 | 0.791822 |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class SMSRecipientListNumber(Document):
pass
| true | true |
f7f713875b309e22d774f50051288268b0691c73 | 620 | py | Python | Task2B.py | PerryLewis01/FloodWarning | 6bdce9277755ef0826789eb193ddf3f7f3457ab7 | [
"MIT"
] | null | null | null | Task2B.py | PerryLewis01/FloodWarning | 6bdce9277755ef0826789eb193ddf3f7f3457ab7 | [
"MIT"
] | null | null | null | Task2B.py | PerryLewis01/FloodWarning | 6bdce9277755ef0826789eb193ddf3f7f3457ab7 | [
"MIT"
] | null | null | null | from floodsystem.flood import stations_level_over_threshold
from floodsystem.stationdata import build_station_list, update_water_levels
def run():
# Build list of stations
stations = build_station_list()
# Update latest level data for all stations
update_water_levels(stations)
for i in stations_level_over_threshold(stations, 0.8):
print("{} {}".format(i[0].name, i[1]))
if len(stations_level_over_threshold(stations, 0.8)) == 0 :
print("No stations over tolerance")
if __name__ == "__main__":
print("*** Task 2B: CUED Part IA Flood Warning System ***")
run()
| 29.52381 | 75 | 0.7 | from floodsystem.flood import stations_level_over_threshold
from floodsystem.stationdata import build_station_list, update_water_levels
def run():
stations = build_station_list()
update_water_levels(stations)
for i in stations_level_over_threshold(stations, 0.8):
print("{} {}".format(i[0].name, i[1]))
if len(stations_level_over_threshold(stations, 0.8)) == 0 :
print("No stations over tolerance")
if __name__ == "__main__":
print("*** Task 2B: CUED Part IA Flood Warning System ***")
run()
| true | true |
f7f71465bf2f2a6598ac225bae59bb7d73a52b14 | 22,672 | py | Python | manila/tests/share/drivers/hdfs/test_hdfs_native.py | mail2nsrajesh/manila | ec052a6cff2ddc722e3b78ab208801e6b94ab55a | [
"Apache-2.0"
] | null | null | null | manila/tests/share/drivers/hdfs/test_hdfs_native.py | mail2nsrajesh/manila | ec052a6cff2ddc722e3b78ab208801e6b94ab55a | [
"Apache-2.0"
] | null | null | null | manila/tests/share/drivers/hdfs/test_hdfs_native.py | mail2nsrajesh/manila | ec052a6cff2ddc722e3b78ab208801e6b94ab55a | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Intel, Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for HDFS native protocol driver module."""
import socket
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
import six
from manila import context
from manila import exception
import manila.share.configuration as config
import manila.share.drivers.hdfs.hdfs_native as hdfs_native
from manila import test
from manila.tests import fake_share
from manila import utils
CONF = cfg.CONF
class HDFSNativeShareDriverTestCase(test.TestCase):
"""Tests HDFSNativeShareDriver."""
def setUp(self):
super(HDFSNativeShareDriverTestCase, self).setUp()
self._context = context.get_admin_context()
self._hdfs_execute = mock.Mock(return_value=('', ''))
self.local_ip = '192.168.1.1'
CONF.set_default('driver_handles_share_servers', False)
CONF.set_default('hdfs_namenode_ip', self.local_ip)
CONF.set_default('hdfs_ssh_name', 'fake_sshname')
CONF.set_default('hdfs_ssh_pw', 'fake_sshpw')
CONF.set_default('hdfs_ssh_private_key', 'fake_sshkey')
self.fake_conf = config.Configuration(None)
self._driver = hdfs_native.HDFSNativeShareDriver(
execute=self._hdfs_execute,
configuration=self.fake_conf)
self.hdfs_bin = 'hdfs'
self._driver._hdfs_bin = 'fake_hdfs_bin'
self.share = fake_share.fake_share(share_proto='HDFS')
self.snapshot = fake_share.fake_snapshot(share_proto='HDFS')
self.access = fake_share.fake_access(access_type='user')
self.fakesharepath = 'hdfs://1.2.3.4:5/share-0'
self.fakesnapshotpath = '/share-0/.snapshot/snapshot-0'
socket.gethostname = mock.Mock(return_value='testserver')
socket.gethostbyname_ex = mock.Mock(return_value=(
'localhost',
['localhost.localdomain', 'testserver'],
['127.0.0.1', self.local_ip]))
def test_do_setup(self):
self._driver.do_setup(self._context)
self.assertEqual(self._driver._hdfs_bin, self.hdfs_bin)
def test_create_share(self):
self._driver._create_share = mock.Mock()
self._driver._get_share_path = mock.Mock(
return_value=self.fakesharepath)
result = self._driver.create_share(self._context, self.share,
share_server=None)
self._driver._create_share.assert_called_once_with(self.share)
self._driver._get_share_path.assert_called_once_with(self.share)
self.assertEqual(self.fakesharepath, result)
def test_create_share_unsupported_proto(self):
self._driver._get_share_path = mock.Mock()
self.assertRaises(exception.HDFSException,
self._driver.create_share,
self._context,
fake_share.fake_share(),
share_server=None)
self.assertFalse(self._driver._get_share_path.called)
def test__set_share_size(self):
share_dir = '/' + self.share['name']
sizestr = six.text_type(self.share['size']) + 'g'
self._driver._hdfs_execute = mock.Mock(return_value=True)
self._driver._set_share_size(self.share)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfsadmin', '-setSpaceQuota', sizestr, share_dir)
def test__set_share_size_exception(self):
share_dir = '/' + self.share['name']
sizestr = six.text_type(self.share['size']) + 'g'
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.HDFSException,
self._driver._set_share_size, self.share)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfsadmin', '-setSpaceQuota', sizestr, share_dir)
def test__set_share_size_with_new_size(self):
share_dir = '/' + self.share['name']
new_size = 'fake_size'
sizestr = new_size + 'g'
self._driver._hdfs_execute = mock.Mock(return_value=True)
self._driver._set_share_size(self.share, new_size)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfsadmin', '-setSpaceQuota', sizestr, share_dir)
def test__create_share(self):
share_dir = '/' + self.share['name']
self._driver._hdfs_execute = mock.Mock(return_value=True)
self._driver._set_share_size = mock.Mock()
self._driver._create_share(self.share)
self._driver._hdfs_execute.assert_any_call(
'fake_hdfs_bin', 'dfs', '-mkdir', share_dir)
self._driver._set_share_size.assert_called_once_with(self.share)
self._driver._hdfs_execute.assert_any_call(
'fake_hdfs_bin', 'dfsadmin', '-allowSnapshot', share_dir)
def test__create_share_exception(self):
share_dir = '/' + self.share['name']
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.HDFSException,
self._driver._create_share, self.share)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-mkdir', share_dir)
def test_create_share_from_empty_snapshot(self):
return_hdfs_execute = (None, None)
self._driver._hdfs_execute = mock.Mock(
return_value=return_hdfs_execute)
self._driver._create_share = mock.Mock(return_value=True)
self._driver._get_share_path = mock.Mock(return_value=self.
fakesharepath)
self._driver._get_snapshot_path = mock.Mock(return_value=self.
fakesnapshotpath)
result = self._driver.create_share_from_snapshot(self._context,
self.share,
self.snapshot,
share_server=None)
self._driver._create_share.assert_called_once_with(self.share)
self._driver._get_snapshot_path.assert_called_once_with(
self.snapshot)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-ls', self.fakesnapshotpath)
self._driver._get_share_path.assert_called_once_with(self.share)
self.assertEqual(self.fakesharepath, result)
def test_create_share_from_snapshot(self):
return_hdfs_execute = ("fake_content", None)
self._driver._hdfs_execute = mock.Mock(
return_value=return_hdfs_execute)
self._driver._create_share = mock.Mock(return_value=True)
self._driver._get_share_path = mock.Mock(return_value=self.
fakesharepath)
self._driver._get_snapshot_path = mock.Mock(return_value=self.
fakesnapshotpath)
result = self._driver.create_share_from_snapshot(self._context,
self.share,
self.snapshot,
share_server=None)
self._driver._create_share.assert_called_once_with(self.share)
self._driver._get_snapshot_path.assert_called_once_with(
self.snapshot)
calls = [mock.call('fake_hdfs_bin', 'dfs',
'-ls', self.fakesnapshotpath),
mock.call('fake_hdfs_bin', 'dfs', '-cp',
self.fakesnapshotpath + '/*',
'/' + self.share['name'])]
self._driver._hdfs_execute.assert_has_calls(calls)
self._driver._get_share_path.assert_called_once_with(self.share)
self.assertEqual(self.fakesharepath, result)
def test_create_share_from_snapshot_exception(self):
self._driver._create_share = mock.Mock(return_value=True)
self._driver._get_snapshot_path = mock.Mock(return_value=self.
fakesnapshotpath)
self._driver._get_share_path = mock.Mock(return_value=self.
fakesharepath)
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.HDFSException,
self._driver.create_share_from_snapshot,
self._context, self.share,
self.snapshot, share_server=None)
self._driver._create_share.assert_called_once_with(self.share)
self._driver._get_snapshot_path.assert_called_once_with(self.snapshot)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-ls', self.fakesnapshotpath)
self.assertFalse(self._driver._get_share_path.called)
def test_create_snapshot(self):
self._driver._hdfs_execute = mock.Mock(return_value=True)
self._driver.create_snapshot(self._context, self.snapshot,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-createSnapshot',
'/' + self.snapshot['share_name'], self.snapshot['name'])
def test_create_snapshot_exception(self):
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.HDFSException,
self._driver.create_snapshot, self._context,
self.snapshot, share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-createSnapshot',
'/' + self.snapshot['share_name'], self.snapshot['name'])
def test_delete_share(self):
self._driver._hdfs_execute = mock.Mock(return_value=True)
self._driver.delete_share(self._context,
self.share,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-rm', '-r',
'/' + self.share['name'])
def test_delete_share_exception(self):
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.HDFSException,
self._driver.delete_share,
self._context,
self.share,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-rm', '-r',
'/' + self.share['name'])
def test_delete_snapshot(self):
self._driver._hdfs_execute = mock.Mock(return_value=True)
self._driver.delete_snapshot(self._context,
self.snapshot,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-deleteSnapshot',
'/' + self.snapshot['share_name'], self.snapshot['name'])
def test_delete_snapshot_exception(self):
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.HDFSException,
self._driver.delete_snapshot,
self._context,
self.snapshot,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-deleteSnapshot',
'/' + self.snapshot['share_name'], self.snapshot['name'])
def test_allow_access(self):
self._driver._hdfs_execute = mock.Mock(
return_value=['', ''])
share_dir = '/' + self.share['name']
user_access = ':'.join([self.access['access_type'],
self.access['access_to'],
'rwx'])
cmd = ['fake_hdfs_bin', 'dfs', '-setfacl', '-m', '-R',
user_access, share_dir]
self._driver.allow_access(self._context, self.share, self.access,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
*cmd, check_exit_code=True)
def test_allow_access_invalid_access_type(self):
self.assertRaises(exception.InvalidShareAccess,
self._driver.allow_access,
self._context,
self.share,
fake_share.fake_access(
access_type='invalid_access_type'),
share_server=None)
def test_allow_access_invalid_access_level(self):
self.assertRaises(exception.InvalidShareAccess,
self._driver.allow_access,
self._context,
self.share,
fake_share.fake_access(
access_level='invalid_access_level'),
share_server=None)
def test_allow_access_exception(self):
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
share_dir = '/' + self.share['name']
user_access = ':'.join([self.access['access_type'],
self.access['access_to'],
'rwx'])
cmd = ['fake_hdfs_bin', 'dfs', '-setfacl', '-m', '-R',
user_access, share_dir]
self.assertRaises(exception.HDFSException,
self._driver.allow_access,
self._context,
self.share,
self.access,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
*cmd, check_exit_code=True)
def test_deny_access(self):
self._driver._hdfs_execute = mock.Mock(return_value=['', ''])
share_dir = '/' + self.share['name']
access_name = ':'.join([self.access['access_type'],
self.access['access_to']])
cmd = ['fake_hdfs_bin', 'dfs', '-setfacl', '-x', '-R',
access_name, share_dir]
self._driver.deny_access(self._context,
self.share,
self.access,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
*cmd, check_exit_code=True)
def test_deny_access_exception(self):
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
share_dir = '/' + self.share['name']
access_name = ':'.join([self.access['access_type'],
self.access['access_to']])
cmd = ['fake_hdfs_bin', 'dfs', '-setfacl', '-x', '-R',
access_name, share_dir]
self.assertRaises(exception.HDFSException,
self._driver.deny_access,
self._context,
self.share,
self.access,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
*cmd, check_exit_code=True)
def test_extend_share(self):
new_size = "fake_size"
self._driver._set_share_size = mock.Mock()
self._driver.extend_share(self.share, new_size)
self._driver._set_share_size.assert_called_once_with(
self.share, new_size)
def test__check_hdfs_state_healthy(self):
fake_out = "fakeinfo\n...Status: HEALTHY"
self._driver._hdfs_execute = mock.Mock(return_value=(fake_out, ''))
result = self._driver._check_hdfs_state()
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'fsck', '/')
self.assertTrue(result)
def test__check_hdfs_state_down(self):
fake_out = "fakeinfo\n...Status: DOWN"
self._driver._hdfs_execute = mock.Mock(return_value=(fake_out, ''))
result = self._driver._check_hdfs_state()
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'fsck', '/')
self.assertFalse(result)
def test__check_hdfs_state_exception(self):
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.HDFSException,
self._driver._check_hdfs_state)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'fsck', '/')
def test__get_available_capacity(self):
fake_out = ('Configured Capacity: 2.4\n' +
'Total Capacity: 2\n' +
'DFS free: 1')
self._driver._hdfs_execute = mock.Mock(return_value=(fake_out, ''))
total, free = self._driver._get_available_capacity()
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfsadmin', '-report')
self.assertEqual(2, total)
self.assertEqual(1, free)
def test__get_available_capacity_exception(self):
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.HDFSException,
self._driver._get_available_capacity)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfsadmin', '-report')
def test_get_share_stats_refresh_false(self):
self._driver._stats = {'fake_key': 'fake_value'}
result = self._driver.get_share_stats(False)
self.assertEqual(self._driver._stats, result)
def test_get_share_stats_refresh_true(self):
self._driver._get_available_capacity = mock.Mock(
return_value=(11111.0, 12345.0))
result = self._driver.get_share_stats(True)
expected_keys = [
'qos', 'driver_version', 'share_backend_name',
'free_capacity_gb', 'total_capacity_gb',
'driver_handles_share_servers',
'reserved_percentage', 'vendor_name', 'storage_protocol',
]
for key in expected_keys:
self.assertIn(key, result)
self.assertEqual('HDFS', result['storage_protocol'])
self._driver._get_available_capacity.assert_called_once_with()
def test__hdfs_local_execute(self):
cmd = 'testcmd'
self.mock_object(utils, 'execute', mock.Mock(return_value=True))
self._driver._hdfs_local_execute(cmd)
utils.execute.assert_called_once_with(cmd, run_as_root=False)
def test__hdfs_remote_execute(self):
self._driver._run_ssh = mock.Mock(return_value=True)
cmd = 'testcmd'
self._driver._hdfs_remote_execute(cmd, check_exit_code=True)
self._driver._run_ssh.assert_called_once_with(
self.local_ip, tuple([cmd]), True)
def test__run_ssh(self):
ssh_output = 'fake_ssh_output'
cmd_list = ['fake', 'cmd']
ssh = mock.Mock()
ssh.get_transport = mock.Mock()
ssh.get_transport().is_active = mock.Mock(return_value=True)
ssh_pool = mock.Mock()
ssh_pool.create = mock.Mock(return_value=ssh)
self.mock_object(utils, 'SSHPool', mock.Mock(return_value=ssh_pool))
self.mock_object(processutils, 'ssh_execute',
mock.Mock(return_value=ssh_output))
result = self._driver._run_ssh(self.local_ip, cmd_list)
utils.SSHPool.assert_called_once_with(
self._driver.configuration.hdfs_namenode_ip,
self._driver.configuration.hdfs_ssh_port,
self._driver.configuration.ssh_conn_timeout,
self._driver.configuration.hdfs_ssh_name,
password=self._driver.configuration.hdfs_ssh_pw,
privatekey=self._driver.configuration.hdfs_ssh_private_key,
min_size=self._driver.configuration.ssh_min_pool_conn,
max_size=self._driver.configuration.ssh_max_pool_conn)
ssh_pool.create.assert_called_once_with()
ssh.get_transport().is_active.assert_called_once_with()
processutils.ssh_execute.assert_called_once_with(
ssh, 'fake cmd', check_exit_code=False)
self.assertEqual(ssh_output, result)
def test__run_ssh_exception(self):
cmd_list = ['fake', 'cmd']
ssh = mock.Mock()
ssh.get_transport = mock.Mock()
ssh.get_transport().is_active = mock.Mock(return_value=True)
ssh_pool = mock.Mock()
ssh_pool.create = mock.Mock(return_value=ssh)
self.mock_object(utils, 'SSHPool', mock.Mock(return_value=ssh_pool))
self.mock_object(processutils, 'ssh_execute',
mock.Mock(side_effect=Exception))
self.assertRaises(exception.HDFSException,
self._driver._run_ssh,
self.local_ip,
cmd_list)
utils.SSHPool.assert_called_once_with(
self._driver.configuration.hdfs_namenode_ip,
self._driver.configuration.hdfs_ssh_port,
self._driver.configuration.ssh_conn_timeout,
self._driver.configuration.hdfs_ssh_name,
password=self._driver.configuration.hdfs_ssh_pw,
privatekey=self._driver.configuration.hdfs_ssh_private_key,
min_size=self._driver.configuration.ssh_min_pool_conn,
max_size=self._driver.configuration.ssh_max_pool_conn)
ssh_pool.create.assert_called_once_with()
ssh.get_transport().is_active.assert_called_once_with()
processutils.ssh_execute.assert_called_once_with(
ssh, 'fake cmd', check_exit_code=False)
| 46.746392 | 78 | 0.615914 |
import socket
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
import six
from manila import context
from manila import exception
import manila.share.configuration as config
import manila.share.drivers.hdfs.hdfs_native as hdfs_native
from manila import test
from manila.tests import fake_share
from manila import utils
CONF = cfg.CONF
class HDFSNativeShareDriverTestCase(test.TestCase):
def setUp(self):
super(HDFSNativeShareDriverTestCase, self).setUp()
self._context = context.get_admin_context()
self._hdfs_execute = mock.Mock(return_value=('', ''))
self.local_ip = '192.168.1.1'
CONF.set_default('driver_handles_share_servers', False)
CONF.set_default('hdfs_namenode_ip', self.local_ip)
CONF.set_default('hdfs_ssh_name', 'fake_sshname')
CONF.set_default('hdfs_ssh_pw', 'fake_sshpw')
CONF.set_default('hdfs_ssh_private_key', 'fake_sshkey')
self.fake_conf = config.Configuration(None)
self._driver = hdfs_native.HDFSNativeShareDriver(
execute=self._hdfs_execute,
configuration=self.fake_conf)
self.hdfs_bin = 'hdfs'
self._driver._hdfs_bin = 'fake_hdfs_bin'
self.share = fake_share.fake_share(share_proto='HDFS')
self.snapshot = fake_share.fake_snapshot(share_proto='HDFS')
self.access = fake_share.fake_access(access_type='user')
self.fakesharepath = 'hdfs://1.2.3.4:5/share-0'
self.fakesnapshotpath = '/share-0/.snapshot/snapshot-0'
socket.gethostname = mock.Mock(return_value='testserver')
socket.gethostbyname_ex = mock.Mock(return_value=(
'localhost',
['localhost.localdomain', 'testserver'],
['127.0.0.1', self.local_ip]))
def test_do_setup(self):
self._driver.do_setup(self._context)
self.assertEqual(self._driver._hdfs_bin, self.hdfs_bin)
def test_create_share(self):
self._driver._create_share = mock.Mock()
self._driver._get_share_path = mock.Mock(
return_value=self.fakesharepath)
result = self._driver.create_share(self._context, self.share,
share_server=None)
self._driver._create_share.assert_called_once_with(self.share)
self._driver._get_share_path.assert_called_once_with(self.share)
self.assertEqual(self.fakesharepath, result)
def test_create_share_unsupported_proto(self):
self._driver._get_share_path = mock.Mock()
self.assertRaises(exception.HDFSException,
self._driver.create_share,
self._context,
fake_share.fake_share(),
share_server=None)
self.assertFalse(self._driver._get_share_path.called)
def test__set_share_size(self):
share_dir = '/' + self.share['name']
sizestr = six.text_type(self.share['size']) + 'g'
self._driver._hdfs_execute = mock.Mock(return_value=True)
self._driver._set_share_size(self.share)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfsadmin', '-setSpaceQuota', sizestr, share_dir)
def test__set_share_size_exception(self):
share_dir = '/' + self.share['name']
sizestr = six.text_type(self.share['size']) + 'g'
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.HDFSException,
self._driver._set_share_size, self.share)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfsadmin', '-setSpaceQuota', sizestr, share_dir)
def test__set_share_size_with_new_size(self):
share_dir = '/' + self.share['name']
new_size = 'fake_size'
sizestr = new_size + 'g'
self._driver._hdfs_execute = mock.Mock(return_value=True)
self._driver._set_share_size(self.share, new_size)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfsadmin', '-setSpaceQuota', sizestr, share_dir)
def test__create_share(self):
share_dir = '/' + self.share['name']
self._driver._hdfs_execute = mock.Mock(return_value=True)
self._driver._set_share_size = mock.Mock()
self._driver._create_share(self.share)
self._driver._hdfs_execute.assert_any_call(
'fake_hdfs_bin', 'dfs', '-mkdir', share_dir)
self._driver._set_share_size.assert_called_once_with(self.share)
self._driver._hdfs_execute.assert_any_call(
'fake_hdfs_bin', 'dfsadmin', '-allowSnapshot', share_dir)
def test__create_share_exception(self):
share_dir = '/' + self.share['name']
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.HDFSException,
self._driver._create_share, self.share)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-mkdir', share_dir)
def test_create_share_from_empty_snapshot(self):
return_hdfs_execute = (None, None)
self._driver._hdfs_execute = mock.Mock(
return_value=return_hdfs_execute)
self._driver._create_share = mock.Mock(return_value=True)
self._driver._get_share_path = mock.Mock(return_value=self.
fakesharepath)
self._driver._get_snapshot_path = mock.Mock(return_value=self.
fakesnapshotpath)
result = self._driver.create_share_from_snapshot(self._context,
self.share,
self.snapshot,
share_server=None)
self._driver._create_share.assert_called_once_with(self.share)
self._driver._get_snapshot_path.assert_called_once_with(
self.snapshot)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-ls', self.fakesnapshotpath)
self._driver._get_share_path.assert_called_once_with(self.share)
self.assertEqual(self.fakesharepath, result)
def test_create_share_from_snapshot(self):
return_hdfs_execute = ("fake_content", None)
self._driver._hdfs_execute = mock.Mock(
return_value=return_hdfs_execute)
self._driver._create_share = mock.Mock(return_value=True)
self._driver._get_share_path = mock.Mock(return_value=self.
fakesharepath)
self._driver._get_snapshot_path = mock.Mock(return_value=self.
fakesnapshotpath)
result = self._driver.create_share_from_snapshot(self._context,
self.share,
self.snapshot,
share_server=None)
self._driver._create_share.assert_called_once_with(self.share)
self._driver._get_snapshot_path.assert_called_once_with(
self.snapshot)
calls = [mock.call('fake_hdfs_bin', 'dfs',
'-ls', self.fakesnapshotpath),
mock.call('fake_hdfs_bin', 'dfs', '-cp',
self.fakesnapshotpath + '/*',
'/' + self.share['name'])]
self._driver._hdfs_execute.assert_has_calls(calls)
self._driver._get_share_path.assert_called_once_with(self.share)
self.assertEqual(self.fakesharepath, result)
def test_create_share_from_snapshot_exception(self):
self._driver._create_share = mock.Mock(return_value=True)
self._driver._get_snapshot_path = mock.Mock(return_value=self.
fakesnapshotpath)
self._driver._get_share_path = mock.Mock(return_value=self.
fakesharepath)
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.HDFSException,
self._driver.create_share_from_snapshot,
self._context, self.share,
self.snapshot, share_server=None)
self._driver._create_share.assert_called_once_with(self.share)
self._driver._get_snapshot_path.assert_called_once_with(self.snapshot)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-ls', self.fakesnapshotpath)
self.assertFalse(self._driver._get_share_path.called)
def test_create_snapshot(self):
self._driver._hdfs_execute = mock.Mock(return_value=True)
self._driver.create_snapshot(self._context, self.snapshot,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-createSnapshot',
'/' + self.snapshot['share_name'], self.snapshot['name'])
def test_create_snapshot_exception(self):
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.HDFSException,
self._driver.create_snapshot, self._context,
self.snapshot, share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-createSnapshot',
'/' + self.snapshot['share_name'], self.snapshot['name'])
def test_delete_share(self):
self._driver._hdfs_execute = mock.Mock(return_value=True)
self._driver.delete_share(self._context,
self.share,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-rm', '-r',
'/' + self.share['name'])
def test_delete_share_exception(self):
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.HDFSException,
self._driver.delete_share,
self._context,
self.share,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-rm', '-r',
'/' + self.share['name'])
def test_delete_snapshot(self):
self._driver._hdfs_execute = mock.Mock(return_value=True)
self._driver.delete_snapshot(self._context,
self.snapshot,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-deleteSnapshot',
'/' + self.snapshot['share_name'], self.snapshot['name'])
def test_delete_snapshot_exception(self):
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.HDFSException,
self._driver.delete_snapshot,
self._context,
self.snapshot,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfs', '-deleteSnapshot',
'/' + self.snapshot['share_name'], self.snapshot['name'])
def test_allow_access(self):
self._driver._hdfs_execute = mock.Mock(
return_value=['', ''])
share_dir = '/' + self.share['name']
user_access = ':'.join([self.access['access_type'],
self.access['access_to'],
'rwx'])
cmd = ['fake_hdfs_bin', 'dfs', '-setfacl', '-m', '-R',
user_access, share_dir]
self._driver.allow_access(self._context, self.share, self.access,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
*cmd, check_exit_code=True)
def test_allow_access_invalid_access_type(self):
self.assertRaises(exception.InvalidShareAccess,
self._driver.allow_access,
self._context,
self.share,
fake_share.fake_access(
access_type='invalid_access_type'),
share_server=None)
def test_allow_access_invalid_access_level(self):
self.assertRaises(exception.InvalidShareAccess,
self._driver.allow_access,
self._context,
self.share,
fake_share.fake_access(
access_level='invalid_access_level'),
share_server=None)
def test_allow_access_exception(self):
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
share_dir = '/' + self.share['name']
user_access = ':'.join([self.access['access_type'],
self.access['access_to'],
'rwx'])
cmd = ['fake_hdfs_bin', 'dfs', '-setfacl', '-m', '-R',
user_access, share_dir]
self.assertRaises(exception.HDFSException,
self._driver.allow_access,
self._context,
self.share,
self.access,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
*cmd, check_exit_code=True)
def test_deny_access(self):
self._driver._hdfs_execute = mock.Mock(return_value=['', ''])
share_dir = '/' + self.share['name']
access_name = ':'.join([self.access['access_type'],
self.access['access_to']])
cmd = ['fake_hdfs_bin', 'dfs', '-setfacl', '-x', '-R',
access_name, share_dir]
self._driver.deny_access(self._context,
self.share,
self.access,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
*cmd, check_exit_code=True)
def test_deny_access_exception(self):
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
share_dir = '/' + self.share['name']
access_name = ':'.join([self.access['access_type'],
self.access['access_to']])
cmd = ['fake_hdfs_bin', 'dfs', '-setfacl', '-x', '-R',
access_name, share_dir]
self.assertRaises(exception.HDFSException,
self._driver.deny_access,
self._context,
self.share,
self.access,
share_server=None)
self._driver._hdfs_execute.assert_called_once_with(
*cmd, check_exit_code=True)
def test_extend_share(self):
new_size = "fake_size"
self._driver._set_share_size = mock.Mock()
self._driver.extend_share(self.share, new_size)
self._driver._set_share_size.assert_called_once_with(
self.share, new_size)
def test__check_hdfs_state_healthy(self):
fake_out = "fakeinfo\n...Status: HEALTHY"
self._driver._hdfs_execute = mock.Mock(return_value=(fake_out, ''))
result = self._driver._check_hdfs_state()
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'fsck', '/')
self.assertTrue(result)
def test__check_hdfs_state_down(self):
fake_out = "fakeinfo\n...Status: DOWN"
self._driver._hdfs_execute = mock.Mock(return_value=(fake_out, ''))
result = self._driver._check_hdfs_state()
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'fsck', '/')
self.assertFalse(result)
def test__check_hdfs_state_exception(self):
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.HDFSException,
self._driver._check_hdfs_state)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'fsck', '/')
def test__get_available_capacity(self):
fake_out = ('Configured Capacity: 2.4\n' +
'Total Capacity: 2\n' +
'DFS free: 1')
self._driver._hdfs_execute = mock.Mock(return_value=(fake_out, ''))
total, free = self._driver._get_available_capacity()
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfsadmin', '-report')
self.assertEqual(2, total)
self.assertEqual(1, free)
def test__get_available_capacity_exception(self):
self._driver._hdfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.HDFSException,
self._driver._get_available_capacity)
self._driver._hdfs_execute.assert_called_once_with(
'fake_hdfs_bin', 'dfsadmin', '-report')
def test_get_share_stats_refresh_false(self):
self._driver._stats = {'fake_key': 'fake_value'}
result = self._driver.get_share_stats(False)
self.assertEqual(self._driver._stats, result)
def test_get_share_stats_refresh_true(self):
self._driver._get_available_capacity = mock.Mock(
return_value=(11111.0, 12345.0))
result = self._driver.get_share_stats(True)
expected_keys = [
'qos', 'driver_version', 'share_backend_name',
'free_capacity_gb', 'total_capacity_gb',
'driver_handles_share_servers',
'reserved_percentage', 'vendor_name', 'storage_protocol',
]
for key in expected_keys:
self.assertIn(key, result)
self.assertEqual('HDFS', result['storage_protocol'])
self._driver._get_available_capacity.assert_called_once_with()
def test__hdfs_local_execute(self):
cmd = 'testcmd'
self.mock_object(utils, 'execute', mock.Mock(return_value=True))
self._driver._hdfs_local_execute(cmd)
utils.execute.assert_called_once_with(cmd, run_as_root=False)
def test__hdfs_remote_execute(self):
self._driver._run_ssh = mock.Mock(return_value=True)
cmd = 'testcmd'
self._driver._hdfs_remote_execute(cmd, check_exit_code=True)
self._driver._run_ssh.assert_called_once_with(
self.local_ip, tuple([cmd]), True)
def test__run_ssh(self):
ssh_output = 'fake_ssh_output'
cmd_list = ['fake', 'cmd']
ssh = mock.Mock()
ssh.get_transport = mock.Mock()
ssh.get_transport().is_active = mock.Mock(return_value=True)
ssh_pool = mock.Mock()
ssh_pool.create = mock.Mock(return_value=ssh)
self.mock_object(utils, 'SSHPool', mock.Mock(return_value=ssh_pool))
self.mock_object(processutils, 'ssh_execute',
mock.Mock(return_value=ssh_output))
result = self._driver._run_ssh(self.local_ip, cmd_list)
utils.SSHPool.assert_called_once_with(
self._driver.configuration.hdfs_namenode_ip,
self._driver.configuration.hdfs_ssh_port,
self._driver.configuration.ssh_conn_timeout,
self._driver.configuration.hdfs_ssh_name,
password=self._driver.configuration.hdfs_ssh_pw,
privatekey=self._driver.configuration.hdfs_ssh_private_key,
min_size=self._driver.configuration.ssh_min_pool_conn,
max_size=self._driver.configuration.ssh_max_pool_conn)
ssh_pool.create.assert_called_once_with()
ssh.get_transport().is_active.assert_called_once_with()
processutils.ssh_execute.assert_called_once_with(
ssh, 'fake cmd', check_exit_code=False)
self.assertEqual(ssh_output, result)
def test__run_ssh_exception(self):
cmd_list = ['fake', 'cmd']
ssh = mock.Mock()
ssh.get_transport = mock.Mock()
ssh.get_transport().is_active = mock.Mock(return_value=True)
ssh_pool = mock.Mock()
ssh_pool.create = mock.Mock(return_value=ssh)
self.mock_object(utils, 'SSHPool', mock.Mock(return_value=ssh_pool))
self.mock_object(processutils, 'ssh_execute',
mock.Mock(side_effect=Exception))
self.assertRaises(exception.HDFSException,
self._driver._run_ssh,
self.local_ip,
cmd_list)
utils.SSHPool.assert_called_once_with(
self._driver.configuration.hdfs_namenode_ip,
self._driver.configuration.hdfs_ssh_port,
self._driver.configuration.ssh_conn_timeout,
self._driver.configuration.hdfs_ssh_name,
password=self._driver.configuration.hdfs_ssh_pw,
privatekey=self._driver.configuration.hdfs_ssh_private_key,
min_size=self._driver.configuration.ssh_min_pool_conn,
max_size=self._driver.configuration.ssh_max_pool_conn)
ssh_pool.create.assert_called_once_with()
ssh.get_transport().is_active.assert_called_once_with()
processutils.ssh_execute.assert_called_once_with(
ssh, 'fake cmd', check_exit_code=False)
| true | true |
f7f7159c9914610b6bf75c401a71383dc8f50e28 | 1,267 | py | Python | mlplaygrounds/datasets/tests/test_serializers/test_mlmodel_lite_serializer.py | rennym19/ml-playgrounds | a3927c0b5b73588745f07527eb6c1a542fac8afb | [
"MIT"
] | null | null | null | mlplaygrounds/datasets/tests/test_serializers/test_mlmodel_lite_serializer.py | rennym19/ml-playgrounds | a3927c0b5b73588745f07527eb6c1a542fac8afb | [
"MIT"
] | 7 | 2020-06-12T03:13:50.000Z | 2021-09-22T19:01:10.000Z | mlplaygrounds/datasets/tests/test_serializers/test_mlmodel_lite_serializer.py | rennym19/ml-playgrounds | a3927c0b5b73588745f07527eb6c1a542fac8afb | [
"MIT"
] | null | null | null | from unittest import TestCase
from unittest.mock import patch
from django_mock_queries.query import MockModel
from bson import ObjectId
from mlplaygrounds.datasets.serializers.models import MLModelLiteSerializer
class TestMLModelLiteSerializer(TestCase):
def setUp(self):
self.valid_instance = MockModel(uid=ObjectId(),
name='test model',
user_id='test_user',
dataset_id='test_dataset',
algorithm='testalg')
self.expected_data = {
'uid': str(self.valid_instance.uid),
'name': self.valid_instance.name,
'algorithm': self.valid_instance.algorithm,
}
def test_serialize_instance(self):
serialized_data = MLModelLiteSerializer(self.valid_instance).data
self.assertDictEqual(serialized_data, self.expected_data)
def test_serialize_many(self):
expected_list = [self.expected_data, self.expected_data]
serialized_data = MLModelLiteSerializer(
[self.valid_instance, self.valid_instance], many=True).data
self.assertListEqual(serialized_data, expected_list)
| 35.194444 | 75 | 0.624309 | from unittest import TestCase
from unittest.mock import patch
from django_mock_queries.query import MockModel
from bson import ObjectId
from mlplaygrounds.datasets.serializers.models import MLModelLiteSerializer
class TestMLModelLiteSerializer(TestCase):
def setUp(self):
self.valid_instance = MockModel(uid=ObjectId(),
name='test model',
user_id='test_user',
dataset_id='test_dataset',
algorithm='testalg')
self.expected_data = {
'uid': str(self.valid_instance.uid),
'name': self.valid_instance.name,
'algorithm': self.valid_instance.algorithm,
}
def test_serialize_instance(self):
serialized_data = MLModelLiteSerializer(self.valid_instance).data
self.assertDictEqual(serialized_data, self.expected_data)
def test_serialize_many(self):
expected_list = [self.expected_data, self.expected_data]
serialized_data = MLModelLiteSerializer(
[self.valid_instance, self.valid_instance], many=True).data
self.assertListEqual(serialized_data, expected_list)
| true | true |
f7f71655698243e910fdac17b3b7914ec21baa13 | 85 | py | Python | store/__init__.py | Semior001/tg2pub | cdd0474ec27b061b52ac26011126eec66276c0ca | [
"MIT"
] | null | null | null | store/__init__.py | Semior001/tg2pub | cdd0474ec27b061b52ac26011126eec66276c0ca | [
"MIT"
] | null | null | null | store/__init__.py | Semior001/tg2pub | cdd0474ec27b061b52ac26011126eec66276c0ca | [
"MIT"
] | null | null | null | from .store import Store
from .vedisdb import VedisDB
__all__ = ['Store', 'VedisDB'] | 21.25 | 30 | 0.741176 | from .store import Store
from .vedisdb import VedisDB
__all__ = ['Store', 'VedisDB'] | true | true |
f7f7165a4cf43f704d703f203482929bb09a0bda | 2,347 | py | Python | Advance_Python/Database_Connectivity/Prepared_Statement.py | siddharth-143/Python | 293f4643a3a13e3b82d23fd8922db54dbb0f12bc | [
"MIT"
] | null | null | null | Advance_Python/Database_Connectivity/Prepared_Statement.py | siddharth-143/Python | 293f4643a3a13e3b82d23fd8922db54dbb0f12bc | [
"MIT"
] | null | null | null | Advance_Python/Database_Connectivity/Prepared_Statement.py | siddharth-143/Python | 293f4643a3a13e3b82d23fd8922db54dbb0f12bc | [
"MIT"
] | null | null | null | # Prepared Statement
import mysql.connector
def student_data(fetch): # def student_data(nm, ro, fe)
try:
conn = mysql.connector.connect(
user="root",
password="password",
host="localhost",
database="pdb",
port=3306
)
if conn.is_connected(): # check the connection
print("Connect Successfully")
except:
print("Unable To Connect")
# sql = 'INSERT INTO student(name, roll, fees) VALUES(%s, %s, %s)' # tuple
# sql = 'INSERT INTO student(name, roll, fees) VALUES(%?, %?, %?)'
# sql = 'DELETE FROM student WHERE stu_id=%s'
# sql = 'UPDATE student SET fees=%s WHERE stu_id=%s'
# sql = 'SELECT * FROM student WHERE stu_id=%s'
sql = 'SELECT * FROM student WHERE fees=%s'
myc = conn.cursor(prepared=True) # cursor prepared method
# n = nm
# r = ro
# f = fe
# disp_val = (nm, ro, fe)
# d = delete_data # delete data
# del_val = (delete_data, ) # tuple
# f = fe
# i = id
# update_val = (fe, id) # update data
fat = fetch
fetch_data = (fetch,)
try:
myc.execute(sql, fetch_data) # execute query
# conn.commit() # committing the changes (while fetching data no need of committing the change)
# row = myc.fetchone() # to display single row
row = myc.fetchone()
while row is not None:
print(row)
row = myc.fetchone()
print(myc.rowcount, "Row inserted")
except:
conn.rollback() # rollback the changes
print("Unable to process data")
myc.close() # close cursor
conn.close() # close connection
while True:
# nm = input("Enter Name : ")
# ro = int(input("Enter Roll No : "))
# fe = int(input("Enter Fees : "))
# student_data(nm, ro, fe) # call function student_data()
# delete_data = int(input("Enter student id : "))
# student_data(delete_data)
# fe = int(input("Enter fees : "))
# id = int(input("Enter Id : "))
# student_data(fe, id)
# fetch = int(input("Enter id to display data : "))
# student_data(fetch)
fetch = int(input("Enter fees : "))
student_data(fetch)
ans = input("Do you want to exit (y/n) : ")
if ans == "y":
break
| 27.290698 | 108 | 0.551768 |
import mysql.connector
def student_data(fetch):
try:
conn = mysql.connector.connect(
user="root",
password="password",
host="localhost",
database="pdb",
port=3306
)
if conn.is_connected():
print("Connect Successfully")
except:
print("Unable To Connect")
sql = 'SELECT * FROM student WHERE fees=%s'
myc = conn.cursor(prepared=True)
etch
fetch_data = (fetch,)
try:
myc.execute(sql, fetch_data)
row = myc.fetchone()
print(myc.rowcount, "Row inserted")
except:
conn.rollback()
print("Unable to process data")
myc.close()
conn.close()
while True:
fetch = int(input("Enter fees : "))
student_data(fetch)
ans = input("Do you want to exit (y/n) : ")
if ans == "y":
break
| true | true |
f7f7191bb1bd9fa1d266f49d60d0fde8ffb3ac7d | 475 | py | Python | integration_tests/test_transforms_torchvision.py | andreaconti/torch_k | a5bf09b22d3bef9092d7313dda529af83da15dc6 | [
"MIT"
] | 6 | 2021-06-26T18:19:50.000Z | 2022-02-04T14:16:56.000Z | integration_tests/test_transforms_torchvision.py | andreaconti/torch_k | a5bf09b22d3bef9092d7313dda529af83da15dc6 | [
"MIT"
] | 3 | 2020-11-19T14:41:53.000Z | 2020-11-20T12:26:02.000Z | integration_tests/test_transforms_torchvision.py | andreaconti/torch_kitti | a5bf09b22d3bef9092d7313dda529af83da15dc6 | [
"MIT"
] | null | null | null | """
testing integration between torchvision and transformations
"""
import torch
import torchvision.transforms as V
import torch_kitti.transforms as K
def test_random_crop():
# simulate input
fake_img_1 = torch.randn(1, 600, 600)
fake_img_2 = fake_img_1.clone()
x = {"img_left": fake_img_1, "img_right": fake_img_2}
output = K.functional.apply_to_features(V.RandomCrop([200, 200]), x)
assert torch.all(output["img_left"] == output["img_right"])
| 23.75 | 72 | 0.72 |
import torch
import torchvision.transforms as V
import torch_kitti.transforms as K
def test_random_crop():
fake_img_1 = torch.randn(1, 600, 600)
fake_img_2 = fake_img_1.clone()
x = {"img_left": fake_img_1, "img_right": fake_img_2}
output = K.functional.apply_to_features(V.RandomCrop([200, 200]), x)
assert torch.all(output["img_left"] == output["img_right"])
| true | true |
f7f71bed6073f64979f789e6ef5964c27e7d1eb0 | 1,941 | py | Python | TemplateTest/templatetest/config/environment.py | mohseenrm/pylons | 3a21c5399832a1d47f12646598c4d82d4c15da9c | [
"MIT"
] | null | null | null | TemplateTest/templatetest/config/environment.py | mohseenrm/pylons | 3a21c5399832a1d47f12646598c4d82d4c15da9c | [
"MIT"
] | null | null | null | TemplateTest/templatetest/config/environment.py | mohseenrm/pylons | 3a21c5399832a1d47f12646598c4d82d4c15da9c | [
"MIT"
] | null | null | null | """Pylons environment configuration"""
import os
from mako.lookup import TemplateLookup
from pylons.configuration import PylonsConfig
from pylons.error import handle_mako_error
from sqlalchemy import engine_from_config
import templatetest.lib.app_globals as app_globals
import templatetest.lib.helpers
from templatetest.config.routing import make_map
from templatetest.model import init_model
def load_environment(global_conf, app_conf):
"""Configure the Pylons environment via the ``pylons.config``
object
"""
config = PylonsConfig()
# Pylons paths
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
paths = dict(root=root,
controllers=os.path.join(root, 'controllers'),
static_files=os.path.join(root, 'public'),
templates=[os.path.join(root, 'templates')])
# Initialize config with the basic options
config.init_app(global_conf, app_conf, package='templatetest', paths=paths)
config['routes.map'] = make_map(config)
config['pylons.app_globals'] = app_globals.Globals(config)
config['pylons.h'] = templatetest.lib.helpers
# Setup cache object as early as possible
import pylons
pylons.cache._push_object(config['pylons.app_globals'].cache)
# Create the Mako TemplateLookup, with the default auto-escaping
config['pylons.app_globals'].mako_lookup = TemplateLookup(
directories=paths['templates'],
error_handler=handle_mako_error,
module_directory=os.path.join(app_conf['cache_dir'], 'templates'),
input_encoding='utf-8', default_filters=['escape'],
imports=['from markupsafe import escape'])
# Setup the SQLAlchemy database engine
engine = engine_from_config(config, 'sqlalchemy.')
init_model(engine)
# CONFIGURATION OPTIONS HERE (note: all config options will override
# any Pylons config options)
return config
| 35.290909 | 79 | 0.717156 | import os
from mako.lookup import TemplateLookup
from pylons.configuration import PylonsConfig
from pylons.error import handle_mako_error
from sqlalchemy import engine_from_config
import templatetest.lib.app_globals as app_globals
import templatetest.lib.helpers
from templatetest.config.routing import make_map
from templatetest.model import init_model
def load_environment(global_conf, app_conf):
config = PylonsConfig()
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
paths = dict(root=root,
controllers=os.path.join(root, 'controllers'),
static_files=os.path.join(root, 'public'),
templates=[os.path.join(root, 'templates')])
config.init_app(global_conf, app_conf, package='templatetest', paths=paths)
config['routes.map'] = make_map(config)
config['pylons.app_globals'] = app_globals.Globals(config)
config['pylons.h'] = templatetest.lib.helpers
import pylons
pylons.cache._push_object(config['pylons.app_globals'].cache)
config['pylons.app_globals'].mako_lookup = TemplateLookup(
directories=paths['templates'],
error_handler=handle_mako_error,
module_directory=os.path.join(app_conf['cache_dir'], 'templates'),
input_encoding='utf-8', default_filters=['escape'],
imports=['from markupsafe import escape'])
engine = engine_from_config(config, 'sqlalchemy.')
init_model(engine)
return config
| true | true |
f7f71ca8b451dd13607185cac141abc57a6b68a3 | 7,676 | py | Python | u2flib_host/hid_transport.py | o-/python-u2flib-host | 27dc866368f96f071c9edb3ba90bd628f14ad21c | [
"BSD-2-Clause"
] | 43 | 2015-01-12T18:27:25.000Z | 2021-11-16T21:11:56.000Z | u2flib_host/hid_transport.py | o-/python-u2flib-host | 27dc866368f96f071c9edb3ba90bd628f14ad21c | [
"BSD-2-Clause"
] | 35 | 2015-04-07T18:01:43.000Z | 2019-12-05T12:25:30.000Z | u2flib_host/hid_transport.py | o-/python-u2flib-host | 27dc866368f96f071c9edb3ba90bd628f14ad21c | [
"BSD-2-Clause"
] | 26 | 2015-02-25T15:31:04.000Z | 2021-11-16T21:11:40.000Z | # Copyright (c) 2013 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import os
try:
import hidraw as hid # Prefer hidraw
except ImportError:
import hid
from time import time, sleep
from u2flib_host.device import U2FDevice
from u2flib_host.yubicommon.compat import byte2int, int2byte
from u2flib_host import exc
DEVICES = [
(0x1050, 0x0200), # Gnubby
(0x1050, 0x0113), # YubiKey NEO U2F
(0x1050, 0x0114), # YubiKey NEO OTP+U2F
(0x1050, 0x0115), # YubiKey NEO U2F+CCID
(0x1050, 0x0116), # YubiKey NEO OTP+U2F+CCID
(0x1050, 0x0120), # Security Key by Yubico
(0x1050, 0x0410), # YubiKey Plus
(0x1050, 0x0402), # YubiKey 4 U2F
(0x1050, 0x0403), # YubiKey 4 OTP+U2F
(0x1050, 0x0406), # YubiKey 4 U2F+CCID
(0x1050, 0x0407), # YubiKey 4 OTP+U2F+CCID
(0x2581, 0xf1d0), # Plug-Up U2F Security Key
(0x096e, 0x0850), # Feitian Technologies, Inc. ePass FIDO
(0x096e, 0x0858), # FT U2F
(0x096e, 0x085b), # FS ePass FIDO
(0x24dc, 0x0501), # JaCarta U2F
(0x1ea8, 0xf025), # Thetis U2F
(0x1d50, 0x60fc), # OnlyKey U2F
(0x1209, 0x53c1), # Trezor U2F/FIDO2
]
HID_RPT_SIZE = 64
TYPE_INIT = 0x80
U2F_VENDOR_FIRST = 0x40
# USB Commands
CMD_INIT = 0x06
CMD_WINK = 0x08
CMD_PING = 0x01
CMD_APDU = 0x03
CMD_LOCK = 0x04
U2FHID_YUBIKEY_DEVICE_CONFIG = U2F_VENDOR_FIRST
STAT_ERR = 0xbf
def list_devices(dev_class=None):
dev_class = dev_class or HIDDevice
devices = []
for d in hid.enumerate(0, 0):
usage_page = d['usage_page']
if usage_page == 0xf1d0 and d['usage'] == 1:
devices.append(dev_class(d['path']))
# Usage page doesn't work on Linux
elif (d['vendor_id'], d['product_id']) in DEVICES:
device = HIDDevice(d['path'])
try:
device.open()
device.close()
devices.append(dev_class(d['path']))
except (exc.DeviceError, IOError, OSError):
pass
return devices
def _read_timeout(dev, size, timeout=2.0):
timeout += time()
while time() < timeout:
resp = dev.read(size)
if resp:
return resp
return []
class U2FHIDError(Exception):
def __init__(self, code):
super(Exception, self).__init__("U2FHIDError: 0x%02x" % code)
self.code = code
class HIDDevice(U2FDevice):
"""
U2FDevice implementation using the HID transport.
"""
def __init__(self, path):
self.path = path
self.cid = b"\xff\xff\xff\xff"
self.capabilities = 0x00
def open(self):
self.handle = hid.device()
self.handle.open_path(self.path)
self.handle.set_nonblocking(True)
self.init()
def close(self):
if hasattr(self, 'handle'):
self.handle.close()
del self.handle
def init(self):
nonce = os.urandom(8)
resp = self.call(CMD_INIT, nonce)
timeout = time() + 2.0
while (len(resp) != 17 or resp[:8] != nonce):
if timeout < time():
raise exc.DeviceError('Wrong INIT response from device')
sleep(0.1)
resp = self._read_resp(self.cid, CMD_INIT)
self.cid = resp[8:12]
self.capabilities = byte2int(resp[16])
def ctap2_enabled(self):
return (self.capabilities >> 2) & 0x01
def set_mode(self, mode):
data = mode + b"\x0f\x00\x00"
self.call(U2FHID_YUBIKEY_DEVICE_CONFIG, data)
def _do_send_apdu(self, apdu_data):
return self.call(CMD_APDU, apdu_data)
def wink(self):
self.call(CMD_WINK)
def ping(self, msg=b'Hello U2F'):
resp = self.call(CMD_PING, msg)
if resp != msg:
raise exc.DeviceError("Incorrect PING readback")
return resp
def lock(self, lock_time=10):
self.call(CMD_LOCK, lock_time)
def _write_to_device(self, to_send, timeout=2.0):
expected = len(to_send)
actual = 0
stop_at = time() + timeout
while actual != expected:
if (time() > stop_at):
raise exc.DeviceError("Unable to send data to the device")
actual = self.handle.write(to_send)
sleep(0.025)
def _send_req(self, cid, cmd, data):
size = len(data)
bc_l = int2byte(size & 0xff)
bc_h = int2byte(size >> 8 & 0xff)
payload = cid + int2byte(TYPE_INIT | cmd) + bc_h + bc_l + \
data[:HID_RPT_SIZE - 7]
payload += b'\0' * (HID_RPT_SIZE - len(payload))
self._write_to_device([0] + [byte2int(c) for c in payload])
data = data[HID_RPT_SIZE - 7:]
seq = 0
while len(data) > 0:
payload = cid + int2byte(0x7f & seq) + data[:HID_RPT_SIZE - 5]
payload += b'\0' * (HID_RPT_SIZE - len(payload))
self._write_to_device([0] + [byte2int(c) for c in payload])
data = data[HID_RPT_SIZE - 5:]
seq += 1
def _read_resp(self, cid, cmd):
resp = b'.'
header = cid + int2byte(TYPE_INIT | cmd)
while resp and resp[:5] != header:
resp_vals = _read_timeout(self.handle, HID_RPT_SIZE)
resp = b''.join(int2byte(v) for v in resp_vals)
if resp[:5] == cid + int2byte(STAT_ERR):
raise U2FHIDError(byte2int(resp[7]))
if not resp:
raise exc.DeviceError("Invalid response from device!")
data_len = (byte2int(resp[5]) << 8) + byte2int(resp[6])
data = resp[7:min(7 + data_len, HID_RPT_SIZE)]
data_len -= len(data)
seq = 0
while data_len > 0:
resp_vals = _read_timeout(self.handle, HID_RPT_SIZE)
resp = b''.join(int2byte(v) for v in resp_vals)
if resp[:4] != cid:
raise exc.DeviceError("Wrong CID from device!")
if byte2int(resp[4]) != seq & 0x7f:
raise exc.DeviceError("Wrong SEQ from device!")
seq += 1
new_data = resp[5:min(5 + data_len, HID_RPT_SIZE)]
data_len -= len(new_data)
data += new_data
return data
def call(self, cmd, data=b''):
if isinstance(data, int):
data = int2byte(data)
self._send_req(self.cid, cmd, data)
return self._read_resp(self.cid, cmd)
| 32.803419 | 74 | 0.614513 |
from __future__ import print_function
import os
try:
import hidraw as hid
except ImportError:
import hid
from time import time, sleep
from u2flib_host.device import U2FDevice
from u2flib_host.yubicommon.compat import byte2int, int2byte
from u2flib_host import exc
DEVICES = [
(0x1050, 0x0200),
(0x1050, 0x0113),
(0x1050, 0x0114),
(0x1050, 0x0115),
(0x1050, 0x0116),
(0x1050, 0x0120),
(0x1050, 0x0410),
(0x1050, 0x0402),
(0x1050, 0x0403),
(0x1050, 0x0406),
(0x1050, 0x0407),
(0x2581, 0xf1d0),
(0x096e, 0x0850),
(0x096e, 0x0858),
(0x096e, 0x085b),
(0x24dc, 0x0501),
(0x1ea8, 0xf025),
(0x1d50, 0x60fc),
(0x1209, 0x53c1),
]
HID_RPT_SIZE = 64
TYPE_INIT = 0x80
U2F_VENDOR_FIRST = 0x40
CMD_INIT = 0x06
CMD_WINK = 0x08
CMD_PING = 0x01
CMD_APDU = 0x03
CMD_LOCK = 0x04
U2FHID_YUBIKEY_DEVICE_CONFIG = U2F_VENDOR_FIRST
STAT_ERR = 0xbf
def list_devices(dev_class=None):
dev_class = dev_class or HIDDevice
devices = []
for d in hid.enumerate(0, 0):
usage_page = d['usage_page']
if usage_page == 0xf1d0 and d['usage'] == 1:
devices.append(dev_class(d['path']))
elif (d['vendor_id'], d['product_id']) in DEVICES:
device = HIDDevice(d['path'])
try:
device.open()
device.close()
devices.append(dev_class(d['path']))
except (exc.DeviceError, IOError, OSError):
pass
return devices
def _read_timeout(dev, size, timeout=2.0):
timeout += time()
while time() < timeout:
resp = dev.read(size)
if resp:
return resp
return []
class U2FHIDError(Exception):
def __init__(self, code):
super(Exception, self).__init__("U2FHIDError: 0x%02x" % code)
self.code = code
class HIDDevice(U2FDevice):
def __init__(self, path):
self.path = path
self.cid = b"\xff\xff\xff\xff"
self.capabilities = 0x00
def open(self):
self.handle = hid.device()
self.handle.open_path(self.path)
self.handle.set_nonblocking(True)
self.init()
def close(self):
if hasattr(self, 'handle'):
self.handle.close()
del self.handle
def init(self):
nonce = os.urandom(8)
resp = self.call(CMD_INIT, nonce)
timeout = time() + 2.0
while (len(resp) != 17 or resp[:8] != nonce):
if timeout < time():
raise exc.DeviceError('Wrong INIT response from device')
sleep(0.1)
resp = self._read_resp(self.cid, CMD_INIT)
self.cid = resp[8:12]
self.capabilities = byte2int(resp[16])
def ctap2_enabled(self):
return (self.capabilities >> 2) & 0x01
def set_mode(self, mode):
data = mode + b"\x0f\x00\x00"
self.call(U2FHID_YUBIKEY_DEVICE_CONFIG, data)
def _do_send_apdu(self, apdu_data):
return self.call(CMD_APDU, apdu_data)
def wink(self):
self.call(CMD_WINK)
def ping(self, msg=b'Hello U2F'):
resp = self.call(CMD_PING, msg)
if resp != msg:
raise exc.DeviceError("Incorrect PING readback")
return resp
def lock(self, lock_time=10):
self.call(CMD_LOCK, lock_time)
def _write_to_device(self, to_send, timeout=2.0):
expected = len(to_send)
actual = 0
stop_at = time() + timeout
while actual != expected:
if (time() > stop_at):
raise exc.DeviceError("Unable to send data to the device")
actual = self.handle.write(to_send)
sleep(0.025)
def _send_req(self, cid, cmd, data):
size = len(data)
bc_l = int2byte(size & 0xff)
bc_h = int2byte(size >> 8 & 0xff)
payload = cid + int2byte(TYPE_INIT | cmd) + bc_h + bc_l + \
data[:HID_RPT_SIZE - 7]
payload += b'\0' * (HID_RPT_SIZE - len(payload))
self._write_to_device([0] + [byte2int(c) for c in payload])
data = data[HID_RPT_SIZE - 7:]
seq = 0
while len(data) > 0:
payload = cid + int2byte(0x7f & seq) + data[:HID_RPT_SIZE - 5]
payload += b'\0' * (HID_RPT_SIZE - len(payload))
self._write_to_device([0] + [byte2int(c) for c in payload])
data = data[HID_RPT_SIZE - 5:]
seq += 1
def _read_resp(self, cid, cmd):
resp = b'.'
header = cid + int2byte(TYPE_INIT | cmd)
while resp and resp[:5] != header:
resp_vals = _read_timeout(self.handle, HID_RPT_SIZE)
resp = b''.join(int2byte(v) for v in resp_vals)
if resp[:5] == cid + int2byte(STAT_ERR):
raise U2FHIDError(byte2int(resp[7]))
if not resp:
raise exc.DeviceError("Invalid response from device!")
data_len = (byte2int(resp[5]) << 8) + byte2int(resp[6])
data = resp[7:min(7 + data_len, HID_RPT_SIZE)]
data_len -= len(data)
seq = 0
while data_len > 0:
resp_vals = _read_timeout(self.handle, HID_RPT_SIZE)
resp = b''.join(int2byte(v) for v in resp_vals)
if resp[:4] != cid:
raise exc.DeviceError("Wrong CID from device!")
if byte2int(resp[4]) != seq & 0x7f:
raise exc.DeviceError("Wrong SEQ from device!")
seq += 1
new_data = resp[5:min(5 + data_len, HID_RPT_SIZE)]
data_len -= len(new_data)
data += new_data
return data
def call(self, cmd, data=b''):
if isinstance(data, int):
data = int2byte(data)
self._send_req(self.cid, cmd, data)
return self._read_resp(self.cid, cmd)
| true | true |
f7f71d7ea12eaf040de22ec23d2beb9d36c13c15 | 399 | py | Python | intraviewAI/wsgi.py | underflow101/intraviewAI | 0775fe532d650c53d3248a3ccf5b6d67b948ebbe | [
"MIT"
] | null | null | null | intraviewAI/wsgi.py | underflow101/intraviewAI | 0775fe532d650c53d3248a3ccf5b6d67b948ebbe | [
"MIT"
] | null | null | null | intraviewAI/wsgi.py | underflow101/intraviewAI | 0775fe532d650c53d3248a3ccf5b6d67b948ebbe | [
"MIT"
] | null | null | null | """
WSGI config for intraviewAI project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'intraviewAI.settings')
application = get_wsgi_application()
| 23.470588 | 78 | 0.789474 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'intraviewAI.settings')
application = get_wsgi_application()
| true | true |
f7f71da609b1fc21903cf0e2256ad87f6168887c | 36 | py | Python | src/genderizerQc/__init__.py | jaytouz/genderizeQc | 79449634a15023c1ddcab51bce77eb3d61b51689 | [
"MIT"
] | null | null | null | src/genderizerQc/__init__.py | jaytouz/genderizeQc | 79449634a15023c1ddcab51bce77eb3d61b51689 | [
"MIT"
] | null | null | null | src/genderizerQc/__init__.py | jaytouz/genderizeQc | 79449634a15023c1ddcab51bce77eb3d61b51689 | [
"MIT"
] | null | null | null | from .genderizer import GenderizerQc | 36 | 36 | 0.888889 | from .genderizer import GenderizerQc | true | true |
f7f71dceb0f115a3cfbbe40ea73ccc793514d974 | 25,788 | py | Python | corankco/experiments/stats/bootstrap_experiment.py | pierreandrieu/corankcolight | dce9d05c62f25faae5f73f150f44cc20bfa87b33 | [
"MIT"
] | null | null | null | corankco/experiments/stats/bootstrap_experiment.py | pierreandrieu/corankcolight | dce9d05c62f25faae5f73f150f44cc20bfa87b33 | [
"MIT"
] | null | null | null | corankco/experiments/stats/bootstrap_experiment.py | pierreandrieu/corankcolight | dce9d05c62f25faae5f73f150f44cc20bfa87b33 | [
"MIT"
] | null | null | null | from corankco.algorithms.median_ranking import MedianRanking
from corankco.dataset import DatasetSelector, Dataset
from corankco.scoringscheme import ScoringScheme
from corankco.experiments.experiment import ExperimentFromDataset, ExperimentFromOrphanetDataset
from corankco.experiments.stats.bootstrap import bootstrap_dataset
from numpy import asarray, std, mean, quantile, fromstring, round, isnan
from corankco.utils import create_dir
from random import shuffle
import matplotlib.pyplot as plt
import pandas as pd
from corankco.algorithms.algorithmChoice import get_algorithm, Algorithm
# from corankco.rankings generation.rankings generate import create_rankings
class BootstrapExperiment(ExperimentFromDataset):
def __init__(self,
dataset_folder: str,
algo: MedianRanking,
scoring_scheme: ScoringScheme,
nb_bootstrap: int = 1000,
dataset_selector: DatasetSelector = None,
):
super().__init__(dataset_folder=dataset_folder, dataset_selector=dataset_selector)
self._algo = algo
self._scoring_cheme = scoring_scheme
self._nb_bootstrap = nb_bootstrap
self.__path_hist = "/home/pierre/Bureau/bootstrap_converge/hist"
self.__path_ic = "/home/pierre/Bureau/bootstrap_converge/ic"
self.__rate_presence_min = 0.
self.__ic_rate = 0.05
def _get_algo(self) -> MedianRanking:
return self._algo
def _get_scoring_scheme(self) -> ScoringScheme:
return self._scoring_cheme
def _get_nb_bootstrap(self) -> int:
return self._nb_bootstrap
algo = property(_get_algo)
scoring_scheme = property(_get_scoring_scheme)
nb_bootstrap = property(_get_nb_bootstrap)
def _run_raw_data(self) -> str:
res = ""
position_consensus = {}
h_max_copeland_score = {}
create_dir(self.__path_hist)
create_dir(self.__path_ic)
for dataset in self.datasets:
max_score = 0
h_dataset = {}
h_dataset_victories = {}
dataset.remove_empty_rankings()
if self.__rate_presence_min > 0.:
dataset.remove_elements_rate_presence_lower_than(self.__rate_presence_min)
dataset.remove_empty_rankings()
create_dir(self.__path_hist + "/" + dataset.name.replace(" ", "_").replace(".txt", "")
+ "_nb_elements=" + str(dataset.nb_elements) + "_nb_rankings=" + str(dataset.nb_rankings))
print(dataset.name + " " + str(dataset.nb_elements) + " " + str(dataset.nb_rankings))
consensus_initial = self.algo.compute_consensus_rankings(dataset, self.scoring_scheme, True)
consensus_ranking = consensus_initial.consensus_rankings[0]
position_element = 1
for bucket_i in consensus_ranking:
for element in bucket_i:
position_consensus[element] = position_element
position_element += len(bucket_i)
score_copeland_elements = consensus_initial.copeland_scores
victories_copeland_elements = consensus_initial.copeland_victories
for gene in dataset.elements:
score_gene = score_copeland_elements[gene]
victories_gene = victories_copeland_elements[gene]
h_dataset[gene] = [score_gene]
h_dataset_victories[gene] = {}
h_dataset_victories[gene]["initial"] = victories_gene
h_dataset_victories[gene]["bootstrap_victories"] = []
h_dataset_victories[gene]["bootstrap_equalities"] = []
h_dataset_victories[gene]["bootstrap_defeats"] = []
if score_gene > max_score:
max_score = score_gene
for i in range(self._nb_bootstrap):
if (i + 1) % (self._nb_bootstrap / 10) == 0:
print(i + 1)
dataset_bootstrap = bootstrap_dataset(dataset)
lost_elements = dataset.nb_elements - dataset_bootstrap.nb_elements
consensus = self._algo.compute_consensus_rankings(dataset_bootstrap, self._scoring_cheme, True)
cop_scores = consensus.copeland_scores
cop_victories = consensus.copeland_victories
for element in dataset.elements:
if element in cop_scores:
h_dataset[element].append(cop_scores[element] + lost_elements)
victories_equalities_defeat = cop_victories[element]
h_dataset_victories[element]["bootstrap_victories"].append(victories_equalities_defeat[0])
h_dataset_victories[element]["bootstrap_equalities"].append(victories_equalities_defeat[1])
h_dataset_victories[element]["bootstrap_defeats"].append(victories_equalities_defeat[2])
# h_dataset[gene].append(cop_scores[gene])
if cop_scores[element] > max_score:
max_score = cop_scores[element]
else:
h_dataset[element].append((lost_elements - 1) * 0.5)
# h_dataset[gene].append(nan)
for element in dataset.elements:
array_scores = asarray(h_dataset[element])
array_scores_without_nan = array_scores[~isnan(array_scores)]
h_dataset[element] = list(array_scores_without_nan)
h_max_copeland_score[dataset.name] = max_score
h_elements_mean_score = {}
for element in dataset.elements:
h_elements_mean_score[element] = mean(asarray(h_dataset[element][1:]))
for element, value in sorted(h_elements_mean_score.items(), key=lambda item: item[1]):
res += dataset.name + ";" \
+ str(dataset.nb_elements) + ";" \
+ str(dataset.nb_rankings) + ";" \
+ str(element) + ";1;" \
+ str(h_dataset[element][0]) + ";" \
+ str(h_max_copeland_score[dataset.name]) + ";" \
+ str(mean(asarray(h_dataset_victories[element]["bootstrap_victories"]))) + ";" \
+ str(mean(asarray(h_dataset_victories[element]["bootstrap_equalities"]))) + ";" \
+ str(mean(asarray(h_dataset_victories[element]["bootstrap_defeats"]))) + ";" \
+ str(position_consensus[element]) + ";" \
+ str(h_dataset[element][1:])[1:-1] + "\n"
print(res)
return res
def _run_final_data(self, raw_data: str) -> str:
ic_rate = self.__ic_rate
h_rank_genes = {}
h_ic_genes = {}
for line in raw_data.split("\n"):
if len(line) > 1:
cols = line.split(";")
disease = cols[0]
nb_elements = cols[1]
nb_rankings = cols[2]
id_gene = cols[3]
score_cop_initial = cols[5]
max_copeland_dataset = cols[6]
victories = cols[7]
equalities = cols[8]
defeats = cols[9]
rank_gene_consensus = cols[10]
scores_bootstrap = cols[11]
path_output = self.__path_hist + "/" + cols[0].replace(" ", "_") + "_nb_elements=" \
+ str(nb_elements) + "_nb_rankings=" + str(nb_rankings) + "/"
col = "blue"
score_bootstrap = fromstring(scores_bootstrap, sep=",")
score_initial = float(score_cop_initial)
scores_boot_centered = score_bootstrap - score_initial
quantiles_ic = quantile(scores_boot_centered, [ic_rate/2, 1-ic_rate/2])
ic_gene = score_initial - quantiles_ic[1], score_initial - quantiles_ic[0]
if disease not in h_rank_genes:
h_rank_genes[disease] = {}
h_ic_genes[disease] = {}
h_rank_genes[disease][id_gene] = int(rank_gene_consensus)
h_ic_genes[disease][id_gene] = ic_gene
plt.hist(score_bootstrap, color=col)
plt.xlim(xmin=0, xmax=round(int(float(max_copeland_dataset))) + 1)
plt.axvline(float(score_cop_initial), color="green")
plt.xlabel("Cop score for elem " + str(int(id_gene)))
plt.title("quartiles:" + str(quantile(score_bootstrap, [0.25, 0.5, 0.75])) + ";mean=" + str(
round(mean(score_bootstrap), 2)) + ";std=" + str(round(std(score_bootstrap), 2))
+"\nvict-eq-def=" + str(victories) + " "+ str(equalities) + " " + str(defeats))
plt.savefig(fname=path_output + '{0:03}'.format(int(rank_gene_consensus)) +"_elem_" + str(int(id_gene)), format="png")
plt.clf()
data_dict = {}
for disease in h_rank_genes.keys():
h_disease_ranks = sorted(h_rank_genes[disease].items(), key=lambda item: item[1])[:25]
print(h_disease_ranks)
h_disease_ic = h_ic_genes[disease]
data_dict.clear()
data_dict['category'] = []
data_dict['lower'] = []
data_dict['upper'] = []
min_x = 0
max_x = 0
for gene_with_pos in h_disease_ranks:
gene = gene_with_pos[0]
data_dict['category'].append(gene)
data_dict['lower'].append(h_disease_ic[gene][0])
data_dict['upper'].append(h_disease_ic[gene][1])
if h_disease_ic[gene][0] < min_x:
min_x = h_disease_ic[gene][0]
if h_disease_ic[gene][1] > max_x:
max_x = h_disease_ic[gene][1]
dataset = pd.DataFrame(data_dict)
for lower, upper, y in zip(dataset['lower'], dataset['upper'], range(len(dataset))):
plt.xlim(min_x, max_x)
plt.plot((lower, upper), (y, y), 'ro-')
path_output = self.__path_ic + "/" + disease.replace(" ", "_")
plt.yticks(range(len(dataset)), list(dataset['category']))
plt.savefig(fname=path_output, format="png")
plt.clf()
return ""
class BootstrapExperimentBiologicalIC(BootstrapExperiment, ExperimentFromOrphanetDataset):
def __init__(self,
dataset_folder: str,
algo: MedianRanking,
scoring_scheme: ScoringScheme,
nb_bootstrap: int = 1000,
dataset_selector: DatasetSelector = None,
ic_rate: float = 0.05,
rate_presence_minimal: float = 0.
):
super().__init__(dataset_folder, algo, scoring_scheme, nb_bootstrap, dataset_selector)
super()._remove_datasets_empty_goldstandard()
self.__ic_rate = ic_rate
self.__rate_presence_min = rate_presence_minimal
self.__path_hist = "/home/pierre/Bureau/expe_bootstrap_bio/ic=" \
+ str(self.__ic_rate) + "_pres=" + str(self.__rate_presence_min) + "_hist"
self.__path_ic = "/home/pierre/Bureau/expe_bootstrap_bio/ic=" \
+ str(self.__ic_rate) + "_pres=" + str(self.__rate_presence_min) + "_ic"
def _run_raw_data(self) -> str:
res = ""
position_consensus = {}
h_max_copeland_score = {}
create_dir(self.__path_hist)
create_dir(self.__path_ic)
for dataset in self.datasets:
max_score = 0
h_dataset = {}
h_dataset_victories = {}
dataset.remove_empty_rankings()
if self.__rate_presence_min > 0.:
dataset.remove_elements_rate_presence_lower_than(self.__rate_presence_min)
dataset.remove_empty_rankings()
create_dir(self.__path_hist + "/" + dataset.name.replace(" ", "_") + "_nb_genes=" + str(
dataset.nb_elements) + "_nb_rankings=" + str(dataset.nb_rankings))
print(dataset.name + " " + str(dataset.nb_elements) + " " + str(dataset.nb_rankings))
consensus_initial = self.algo.compute_consensus_rankings(dataset, self.scoring_scheme, True)
consensus_ranking = consensus_initial.consensus_rankings[0]
position_element = 1
for bucket_i in consensus_ranking:
for element in bucket_i:
position_consensus[element] = position_element
position_element += len(bucket_i)
score_copeland_elements = consensus_initial.copeland_scores
victories_gene = consensus_initial.copeland_victories
for gene in dataset.elements:
score_gene = score_copeland_elements[gene]
h_dataset[gene] = [score_gene]
h_dataset_victories[gene] = {}
h_dataset_victories[gene]["initial"] = victories_gene
h_dataset_victories[gene]["bootstrap_victories"] = []
h_dataset_victories[gene]["bootstrap_equalities"] = []
h_dataset_victories[gene]["bootstrap_defeats"] = []
if score_gene > max_score:
max_score = score_gene
for i in range(self._nb_bootstrap):
if (i + 1) % (self._nb_bootstrap / 10) == 0:
print(i + 1)
dataset_bootstrap = bootstrap_dataset(dataset)
lost_elements = dataset.nb_elements - dataset_bootstrap.nb_elements
consensus = self._algo.compute_consensus_rankings(dataset_bootstrap, self._scoring_cheme, True)
cop_scores = consensus.copeland_scores
cop_victories = consensus.copeland_victories
for gene in dataset.elements:
if gene in cop_scores:
h_dataset[gene].append(cop_scores[gene] + lost_elements)
# h_dataset[gene].append(cop_scores[gene])
victories_equalities_defeat = cop_victories[gene]
h_dataset_victories[gene]["bootstrap_victories"].append(victories_equalities_defeat[0]+lost_elements)
h_dataset_victories[gene]["bootstrap_equalities"].append(victories_equalities_defeat[1])
h_dataset_victories[gene]["bootstrap_defeats"].append(victories_equalities_defeat[2])
if cop_scores[gene] > max_score:
max_score = cop_scores[gene]
else:
h_dataset[gene].append((lost_elements - 1) * 0.5)
# h_dataset[gene].append(nan)
h_dataset_victories[gene]["bootstrap_victories"].append(0)
h_dataset_victories[gene]["bootstrap_equalities"].append(lost_elements - 1)
h_dataset_victories[gene]["bootstrap_defeats"].append(dataset_bootstrap.nb_elements)
for gene in dataset.elements:
array_scores = asarray(h_dataset[gene])
array_scores_without_nan = array_scores[~isnan(array_scores)]
h_dataset[gene] = list(array_scores_without_nan)
h_max_copeland_score[dataset.name] = max_score
h_elements_mean_score = {}
for gene in dataset.elements:
h_elements_mean_score[gene] = mean(asarray(h_dataset[gene][1:]))
for gene, value in sorted(h_elements_mean_score.items(), key=lambda item: item[1]):
if gene in self.datasets_gs[dataset.name]:
res += dataset.name + ";" \
+ str(dataset.nb_elements) + ";" \
+ str(dataset.nb_rankings) + ";" \
+ str(gene) + ";1;" \
+ str(h_dataset[gene][0]) + ";" \
+ str(h_max_copeland_score[dataset.name]) + ";" \
+ str(mean(asarray(h_dataset_victories[gene]["bootstrap_victories"]))) + ";" \
+ str(mean(asarray(h_dataset_victories[gene]["bootstrap_equalities"]))) + ";" \
+ str(mean(asarray(h_dataset_victories[gene]["bootstrap_defeats"]))) + ";" \
+ str(position_consensus[gene]) + ";" \
+ str(h_dataset[gene][1:])[1:-1] + "\n"
else:
res += dataset.name + ";" \
+ str(dataset.nb_elements) + ";" \
+ str(dataset.nb_rankings) + ";" \
+ str(gene) + ";0;" \
+ str(h_dataset[gene][0]) + ";" \
+ str(h_max_copeland_score[dataset.name]) + ";" \
+ str(mean(asarray(h_dataset_victories[gene]["bootstrap_victories"]))) + ";" \
+ str(mean(asarray(h_dataset_victories[gene]["bootstrap_equalities"]))) + ";" \
+ str(mean(asarray(h_dataset_victories[gene]["bootstrap_defeats"]))) + ";" \
+ str(position_consensus[gene]) + ";" \
+ str(h_dataset[gene][1:])[1:-1] + "\n"
return res
def _run_final_data(self, raw_data: str) -> str:
ic_rate = self.__ic_rate
h_rank_genes = {}
h_ic_genes = {}
for line in raw_data.split("\n"):
if len(line) > 1:
cols = line.split(";")
disease = cols[0]
nb_elements = cols[1]
nb_rankings = cols[2]
id_gene = cols[3]
score_cop_initial = cols[5]
max_copeland_dataset = cols[6]
victories = cols[7]
equalities = cols[8]
defeats = cols[9]
rank_gene_consensus = cols[10]
scores_bootstrap = cols[11]
path_output = self.__path_hist + "/" + cols[0].replace(" ", "_") + "_nb_genes=" \
+ str(nb_elements) + "_nb_rankings=" + str(nb_rankings) + "/"
col = "blue"
score_bootstrap = fromstring(scores_bootstrap, sep=",")
score_initial = float(score_cop_initial)
scores_boot_centered = score_bootstrap - score_initial
quantiles_ic = quantile(scores_boot_centered, [ic_rate/2, 1-ic_rate/2])
ic_gene = score_initial - quantiles_ic[1], score_initial - quantiles_ic[0]
if disease not in h_rank_genes:
h_rank_genes[disease] = {}
h_ic_genes[disease] = {}
h_rank_genes[disease][id_gene] = int(rank_gene_consensus)
h_ic_genes[disease][id_gene] = ic_gene
plt.hist(score_bootstrap, color=col)
plt.xlim(xmin=0, xmax=round(int(float(max_copeland_dataset))) + 1)
plt.axvline(float(score_cop_initial), color="green")
plt.xlabel("Cop score for gene " + str(int(id_gene)))
plt.title("quartiles:" + str(quantile(score_bootstrap, [0.25, 0.5, 0.75])) + ";mean=" + str(
round(mean(score_bootstrap), 2)) + ";std=" + str(round(std(score_bootstrap), 2))
+"\nvict-eq-def=" + str(victories) + " "+ str(equalities) + " " + str(defeats))
plt.savefig(fname=path_output + '{0:03}'.format(int(rank_gene_consensus)) +"_elem_" + str(int(id_gene)), format="png")
plt.clf()
data_dict = {}
for disease in h_rank_genes.keys():
h_disease_ranks = sorted(h_rank_genes[disease].items(), key=lambda item: item[1])
h_disease_ic = h_ic_genes[disease]
data_dict.clear()
data_dict['category'] = []
data_dict['lower'] = []
data_dict['upper'] = []
for gene_with_pos in h_disease_ranks:
gene = gene_with_pos[0]
data_dict['category'].append(gene)
data_dict['lower'].append(h_disease_ic[gene][0])
data_dict['upper'].append(h_disease_ic[gene][1])
dataset = pd.DataFrame(data_dict)
for lower, upper, y in zip(dataset['lower'], dataset['upper'], range(len(dataset))):
plt.plot((lower, upper), (y, y), 'ro-')
path_output = self.__path_ic + "/" + disease.replace(" ", "_")
plt.yticks(range(len(dataset)), list(dataset['category']))
plt.savefig(fname=path_output, format="png")
plt.clf()
return ""
class EvolutionNbRankings(ExperimentFromDataset):
def __init__(self,
dataset_folder: str,
algo: MedianRanking,
scoring_scheme: ScoringScheme,
dataset_selector: DatasetSelector = None,
):
super().__init__(dataset_folder=dataset_folder, dataset_selector=dataset_selector)
self._algo = algo
self._scoring_cheme = scoring_scheme
def _run_final_data(self, raw_data: str) -> str:
return raw_data
def _run_raw_data(self) -> str:
to_test = list(range(10, 100, 10))
to_test.extend(list(range(100, 1001, 100)))
res = ""
for dataset in self.datasets:
print(dataset.name)
h_gene_list_scores = {}
for element in dataset.elements:
h_gene_list_scores[element] = []
shuffle(dataset.rankings)
for i in to_test:
dataset_new = Dataset(dataset.rankings[0:i])
dataset_new.name = dataset.name
consensus = self._algo.compute_consensus_rankings(dataset_new, self._scoring_cheme, True)
copeland_scores = consensus.copeland_scores
for element in dataset_new.elements:
cop_score_element = copeland_scores.get(element)
h_gene_list_scores[element].append(cop_score_element)
for element in dataset.elements:
res += dataset.name + ";" + str(element) + ";" + str(h_gene_list_scores[element]) + "\n"
return res
algor = get_algorithm(Algorithm.CopelandMethod)
scoring_scheme_exp = ScoringScheme.get_pseudodistance_scoring_scheme_p(1.)
"""
rates_presence_min = [0.2]
ic_rates = [0.05]
for rate_presence_minimal in rates_presence_min:
for ic_rate in ic_rates:
print(ic_rate)
print(rate_presence_minimal)
b = BootstrapExperimentBiologicalIC(dataset_folder="/home/pierre/Bureau/vldb_data/datasets/biological_dataset",
algo=algor,
scoring_scheme=scoring_scheme_exp,
nb_bootstrap=10000,
dataset_selector=DatasetSelector(
nb_rankings_min=20, nb_elem_min=200, nb_elem_max=219),
rate_presence_minimal=rate_presence_minimal,
ic_rate=ic_rate)
b.run(display_all=True, figures=True)
seed(1)
rdm.seed(1)
repeat = 1
nb_steps = [0, 50, 100, 150, 200, 250, 300, 600, 900, 1200, 1500, 3000, 6000, 9000, 12000, 15000, 18000, 21000, 30000, 40000, 50000, 60000]
nb_elem = 50
nb_rankings_to_generate = 10
for step in nb_steps:
for i in range(repeat):
new_rankings = create_rankings(nb_elements=nb_elem, nb_rankings=nb_rankings, steps=step, complete=True)
f = open("/home/pierre/Bureau/datasets_bootstrap_permutations/"+"n=" + str(nb_elem) + "_m=" + str(nb_rankings)
+ "_s=" + str(step) +"_" + '{0:03}'.format(i), "w")
for ranking_new in new_rankings:
f.write(str(ranking_new))
f.write("\n")
f.close()
from corankco.dataset import Dataset, EmptyDatasetException
e = ExperimentFromDataset("/home/pierre/Bureau/vldb_data/datasets/biological_dataset")
for d in e.datasets:
print(d)
changes = True
d_bis = Dataset(d.rankings)
d_bis.remove_empty_rankings()
print(d_bis)
str_save = str(d_bis)
try:
while changes:
print("encore un tour")
d_bis.remove_elements_rate_presence_lower_than(0.333)
print(d_bis)
rankings_dataset = d_bis.rankings
rankings_copy = []
for ranking_dataset in rankings_dataset:
s = set()
for bucket_dataset in ranking_dataset:
s.update(bucket_dataset)
if len(s) >= 10:
rankings_copy.append(ranking_dataset)
d_copy = Dataset(rankings_copy)
print("copie")
print(d_copy)
if str(d_copy) == str_save:
changes = False
else:
str_save = str(d_copy)
d_bis = d_copy
except EmptyDatasetException:
continue
if d_bis.nb_rankings >= 10:
d_bis.write("/home/pierre/Bureau/data_converge/" + d.name)
"""
for i in range(60, 150):
b = BootstrapExperiment(
dataset_folder="/home/pierre/Bureau/data_converge",
algo=algor,
scoring_scheme=scoring_scheme_exp,
nb_bootstrap=1000,
dataset_selector=DatasetSelector(nb_elem_min=i, nb_elem_max=i),
)
b.run()
""""
b = EvolutionNbRankings(
dataset_folder="/home/pierre/Bureau/datasets_bootstrap_permutations",
algo=algor,
scoring_scheme=scoring_scheme_exp,
)
b.run(display_all=True) """ | 46.464865 | 139 | 0.569567 | from corankco.algorithms.median_ranking import MedianRanking
from corankco.dataset import DatasetSelector, Dataset
from corankco.scoringscheme import ScoringScheme
from corankco.experiments.experiment import ExperimentFromDataset, ExperimentFromOrphanetDataset
from corankco.experiments.stats.bootstrap import bootstrap_dataset
from numpy import asarray, std, mean, quantile, fromstring, round, isnan
from corankco.utils import create_dir
from random import shuffle
import matplotlib.pyplot as plt
import pandas as pd
from corankco.algorithms.algorithmChoice import get_algorithm, Algorithm
class BootstrapExperiment(ExperimentFromDataset):
def __init__(self,
dataset_folder: str,
algo: MedianRanking,
scoring_scheme: ScoringScheme,
nb_bootstrap: int = 1000,
dataset_selector: DatasetSelector = None,
):
super().__init__(dataset_folder=dataset_folder, dataset_selector=dataset_selector)
self._algo = algo
self._scoring_cheme = scoring_scheme
self._nb_bootstrap = nb_bootstrap
self.__path_hist = "/home/pierre/Bureau/bootstrap_converge/hist"
self.__path_ic = "/home/pierre/Bureau/bootstrap_converge/ic"
self.__rate_presence_min = 0.
self.__ic_rate = 0.05
def _get_algo(self) -> MedianRanking:
return self._algo
def _get_scoring_scheme(self) -> ScoringScheme:
return self._scoring_cheme
def _get_nb_bootstrap(self) -> int:
return self._nb_bootstrap
algo = property(_get_algo)
scoring_scheme = property(_get_scoring_scheme)
nb_bootstrap = property(_get_nb_bootstrap)
def _run_raw_data(self) -> str:
res = ""
position_consensus = {}
h_max_copeland_score = {}
create_dir(self.__path_hist)
create_dir(self.__path_ic)
for dataset in self.datasets:
max_score = 0
h_dataset = {}
h_dataset_victories = {}
dataset.remove_empty_rankings()
if self.__rate_presence_min > 0.:
dataset.remove_elements_rate_presence_lower_than(self.__rate_presence_min)
dataset.remove_empty_rankings()
create_dir(self.__path_hist + "/" + dataset.name.replace(" ", "_").replace(".txt", "")
+ "_nb_elements=" + str(dataset.nb_elements) + "_nb_rankings=" + str(dataset.nb_rankings))
print(dataset.name + " " + str(dataset.nb_elements) + " " + str(dataset.nb_rankings))
consensus_initial = self.algo.compute_consensus_rankings(dataset, self.scoring_scheme, True)
consensus_ranking = consensus_initial.consensus_rankings[0]
position_element = 1
for bucket_i in consensus_ranking:
for element in bucket_i:
position_consensus[element] = position_element
position_element += len(bucket_i)
score_copeland_elements = consensus_initial.copeland_scores
victories_copeland_elements = consensus_initial.copeland_victories
for gene in dataset.elements:
score_gene = score_copeland_elements[gene]
victories_gene = victories_copeland_elements[gene]
h_dataset[gene] = [score_gene]
h_dataset_victories[gene] = {}
h_dataset_victories[gene]["initial"] = victories_gene
h_dataset_victories[gene]["bootstrap_victories"] = []
h_dataset_victories[gene]["bootstrap_equalities"] = []
h_dataset_victories[gene]["bootstrap_defeats"] = []
if score_gene > max_score:
max_score = score_gene
for i in range(self._nb_bootstrap):
if (i + 1) % (self._nb_bootstrap / 10) == 0:
print(i + 1)
dataset_bootstrap = bootstrap_dataset(dataset)
lost_elements = dataset.nb_elements - dataset_bootstrap.nb_elements
consensus = self._algo.compute_consensus_rankings(dataset_bootstrap, self._scoring_cheme, True)
cop_scores = consensus.copeland_scores
cop_victories = consensus.copeland_victories
for element in dataset.elements:
if element in cop_scores:
h_dataset[element].append(cop_scores[element] + lost_elements)
victories_equalities_defeat = cop_victories[element]
h_dataset_victories[element]["bootstrap_victories"].append(victories_equalities_defeat[0])
h_dataset_victories[element]["bootstrap_equalities"].append(victories_equalities_defeat[1])
h_dataset_victories[element]["bootstrap_defeats"].append(victories_equalities_defeat[2])
if cop_scores[element] > max_score:
max_score = cop_scores[element]
else:
h_dataset[element].append((lost_elements - 1) * 0.5)
for element in dataset.elements:
array_scores = asarray(h_dataset[element])
array_scores_without_nan = array_scores[~isnan(array_scores)]
h_dataset[element] = list(array_scores_without_nan)
h_max_copeland_score[dataset.name] = max_score
h_elements_mean_score = {}
for element in dataset.elements:
h_elements_mean_score[element] = mean(asarray(h_dataset[element][1:]))
for element, value in sorted(h_elements_mean_score.items(), key=lambda item: item[1]):
res += dataset.name + ";" \
+ str(dataset.nb_elements) + ";" \
+ str(dataset.nb_rankings) + ";" \
+ str(element) + ";1;" \
+ str(h_dataset[element][0]) + ";" \
+ str(h_max_copeland_score[dataset.name]) + ";" \
+ str(mean(asarray(h_dataset_victories[element]["bootstrap_victories"]))) + ";" \
+ str(mean(asarray(h_dataset_victories[element]["bootstrap_equalities"]))) + ";" \
+ str(mean(asarray(h_dataset_victories[element]["bootstrap_defeats"]))) + ";" \
+ str(position_consensus[element]) + ";" \
+ str(h_dataset[element][1:])[1:-1] + "\n"
print(res)
return res
def _run_final_data(self, raw_data: str) -> str:
ic_rate = self.__ic_rate
h_rank_genes = {}
h_ic_genes = {}
for line in raw_data.split("\n"):
if len(line) > 1:
cols = line.split(";")
disease = cols[0]
nb_elements = cols[1]
nb_rankings = cols[2]
id_gene = cols[3]
score_cop_initial = cols[5]
max_copeland_dataset = cols[6]
victories = cols[7]
equalities = cols[8]
defeats = cols[9]
rank_gene_consensus = cols[10]
scores_bootstrap = cols[11]
path_output = self.__path_hist + "/" + cols[0].replace(" ", "_") + "_nb_elements=" \
+ str(nb_elements) + "_nb_rankings=" + str(nb_rankings) + "/"
col = "blue"
score_bootstrap = fromstring(scores_bootstrap, sep=",")
score_initial = float(score_cop_initial)
scores_boot_centered = score_bootstrap - score_initial
quantiles_ic = quantile(scores_boot_centered, [ic_rate/2, 1-ic_rate/2])
ic_gene = score_initial - quantiles_ic[1], score_initial - quantiles_ic[0]
if disease not in h_rank_genes:
h_rank_genes[disease] = {}
h_ic_genes[disease] = {}
h_rank_genes[disease][id_gene] = int(rank_gene_consensus)
h_ic_genes[disease][id_gene] = ic_gene
plt.hist(score_bootstrap, color=col)
plt.xlim(xmin=0, xmax=round(int(float(max_copeland_dataset))) + 1)
plt.axvline(float(score_cop_initial), color="green")
plt.xlabel("Cop score for elem " + str(int(id_gene)))
plt.title("quartiles:" + str(quantile(score_bootstrap, [0.25, 0.5, 0.75])) + ";mean=" + str(
round(mean(score_bootstrap), 2)) + ";std=" + str(round(std(score_bootstrap), 2))
+"\nvict-eq-def=" + str(victories) + " "+ str(equalities) + " " + str(defeats))
plt.savefig(fname=path_output + '{0:03}'.format(int(rank_gene_consensus)) +"_elem_" + str(int(id_gene)), format="png")
plt.clf()
data_dict = {}
for disease in h_rank_genes.keys():
h_disease_ranks = sorted(h_rank_genes[disease].items(), key=lambda item: item[1])[:25]
print(h_disease_ranks)
h_disease_ic = h_ic_genes[disease]
data_dict.clear()
data_dict['category'] = []
data_dict['lower'] = []
data_dict['upper'] = []
min_x = 0
max_x = 0
for gene_with_pos in h_disease_ranks:
gene = gene_with_pos[0]
data_dict['category'].append(gene)
data_dict['lower'].append(h_disease_ic[gene][0])
data_dict['upper'].append(h_disease_ic[gene][1])
if h_disease_ic[gene][0] < min_x:
min_x = h_disease_ic[gene][0]
if h_disease_ic[gene][1] > max_x:
max_x = h_disease_ic[gene][1]
dataset = pd.DataFrame(data_dict)
for lower, upper, y in zip(dataset['lower'], dataset['upper'], range(len(dataset))):
plt.xlim(min_x, max_x)
plt.plot((lower, upper), (y, y), 'ro-')
path_output = self.__path_ic + "/" + disease.replace(" ", "_")
plt.yticks(range(len(dataset)), list(dataset['category']))
plt.savefig(fname=path_output, format="png")
plt.clf()
return ""
class BootstrapExperimentBiologicalIC(BootstrapExperiment, ExperimentFromOrphanetDataset):
def __init__(self,
dataset_folder: str,
algo: MedianRanking,
scoring_scheme: ScoringScheme,
nb_bootstrap: int = 1000,
dataset_selector: DatasetSelector = None,
ic_rate: float = 0.05,
rate_presence_minimal: float = 0.
):
super().__init__(dataset_folder, algo, scoring_scheme, nb_bootstrap, dataset_selector)
super()._remove_datasets_empty_goldstandard()
self.__ic_rate = ic_rate
self.__rate_presence_min = rate_presence_minimal
self.__path_hist = "/home/pierre/Bureau/expe_bootstrap_bio/ic=" \
+ str(self.__ic_rate) + "_pres=" + str(self.__rate_presence_min) + "_hist"
self.__path_ic = "/home/pierre/Bureau/expe_bootstrap_bio/ic=" \
+ str(self.__ic_rate) + "_pres=" + str(self.__rate_presence_min) + "_ic"
def _run_raw_data(self) -> str:
res = ""
position_consensus = {}
h_max_copeland_score = {}
create_dir(self.__path_hist)
create_dir(self.__path_ic)
for dataset in self.datasets:
max_score = 0
h_dataset = {}
h_dataset_victories = {}
dataset.remove_empty_rankings()
if self.__rate_presence_min > 0.:
dataset.remove_elements_rate_presence_lower_than(self.__rate_presence_min)
dataset.remove_empty_rankings()
create_dir(self.__path_hist + "/" + dataset.name.replace(" ", "_") + "_nb_genes=" + str(
dataset.nb_elements) + "_nb_rankings=" + str(dataset.nb_rankings))
print(dataset.name + " " + str(dataset.nb_elements) + " " + str(dataset.nb_rankings))
consensus_initial = self.algo.compute_consensus_rankings(dataset, self.scoring_scheme, True)
consensus_ranking = consensus_initial.consensus_rankings[0]
position_element = 1
for bucket_i in consensus_ranking:
for element in bucket_i:
position_consensus[element] = position_element
position_element += len(bucket_i)
score_copeland_elements = consensus_initial.copeland_scores
victories_gene = consensus_initial.copeland_victories
for gene in dataset.elements:
score_gene = score_copeland_elements[gene]
h_dataset[gene] = [score_gene]
h_dataset_victories[gene] = {}
h_dataset_victories[gene]["initial"] = victories_gene
h_dataset_victories[gene]["bootstrap_victories"] = []
h_dataset_victories[gene]["bootstrap_equalities"] = []
h_dataset_victories[gene]["bootstrap_defeats"] = []
if score_gene > max_score:
max_score = score_gene
for i in range(self._nb_bootstrap):
if (i + 1) % (self._nb_bootstrap / 10) == 0:
print(i + 1)
dataset_bootstrap = bootstrap_dataset(dataset)
lost_elements = dataset.nb_elements - dataset_bootstrap.nb_elements
consensus = self._algo.compute_consensus_rankings(dataset_bootstrap, self._scoring_cheme, True)
cop_scores = consensus.copeland_scores
cop_victories = consensus.copeland_victories
for gene in dataset.elements:
if gene in cop_scores:
h_dataset[gene].append(cop_scores[gene] + lost_elements)
victories_equalities_defeat = cop_victories[gene]
h_dataset_victories[gene]["bootstrap_victories"].append(victories_equalities_defeat[0]+lost_elements)
h_dataset_victories[gene]["bootstrap_equalities"].append(victories_equalities_defeat[1])
h_dataset_victories[gene]["bootstrap_defeats"].append(victories_equalities_defeat[2])
if cop_scores[gene] > max_score:
max_score = cop_scores[gene]
else:
h_dataset[gene].append((lost_elements - 1) * 0.5)
h_dataset_victories[gene]["bootstrap_victories"].append(0)
h_dataset_victories[gene]["bootstrap_equalities"].append(lost_elements - 1)
h_dataset_victories[gene]["bootstrap_defeats"].append(dataset_bootstrap.nb_elements)
for gene in dataset.elements:
array_scores = asarray(h_dataset[gene])
array_scores_without_nan = array_scores[~isnan(array_scores)]
h_dataset[gene] = list(array_scores_without_nan)
h_max_copeland_score[dataset.name] = max_score
h_elements_mean_score = {}
for gene in dataset.elements:
h_elements_mean_score[gene] = mean(asarray(h_dataset[gene][1:]))
for gene, value in sorted(h_elements_mean_score.items(), key=lambda item: item[1]):
if gene in self.datasets_gs[dataset.name]:
res += dataset.name + ";" \
+ str(dataset.nb_elements) + ";" \
+ str(dataset.nb_rankings) + ";" \
+ str(gene) + ";1;" \
+ str(h_dataset[gene][0]) + ";" \
+ str(h_max_copeland_score[dataset.name]) + ";" \
+ str(mean(asarray(h_dataset_victories[gene]["bootstrap_victories"]))) + ";" \
+ str(mean(asarray(h_dataset_victories[gene]["bootstrap_equalities"]))) + ";" \
+ str(mean(asarray(h_dataset_victories[gene]["bootstrap_defeats"]))) + ";" \
+ str(position_consensus[gene]) + ";" \
+ str(h_dataset[gene][1:])[1:-1] + "\n"
else:
res += dataset.name + ";" \
+ str(dataset.nb_elements) + ";" \
+ str(dataset.nb_rankings) + ";" \
+ str(gene) + ";0;" \
+ str(h_dataset[gene][0]) + ";" \
+ str(h_max_copeland_score[dataset.name]) + ";" \
+ str(mean(asarray(h_dataset_victories[gene]["bootstrap_victories"]))) + ";" \
+ str(mean(asarray(h_dataset_victories[gene]["bootstrap_equalities"]))) + ";" \
+ str(mean(asarray(h_dataset_victories[gene]["bootstrap_defeats"]))) + ";" \
+ str(position_consensus[gene]) + ";" \
+ str(h_dataset[gene][1:])[1:-1] + "\n"
return res
def _run_final_data(self, raw_data: str) -> str:
ic_rate = self.__ic_rate
h_rank_genes = {}
h_ic_genes = {}
for line in raw_data.split("\n"):
if len(line) > 1:
cols = line.split(";")
disease = cols[0]
nb_elements = cols[1]
nb_rankings = cols[2]
id_gene = cols[3]
score_cop_initial = cols[5]
max_copeland_dataset = cols[6]
victories = cols[7]
equalities = cols[8]
defeats = cols[9]
rank_gene_consensus = cols[10]
scores_bootstrap = cols[11]
path_output = self.__path_hist + "/" + cols[0].replace(" ", "_") + "_nb_genes=" \
+ str(nb_elements) + "_nb_rankings=" + str(nb_rankings) + "/"
col = "blue"
score_bootstrap = fromstring(scores_bootstrap, sep=",")
score_initial = float(score_cop_initial)
scores_boot_centered = score_bootstrap - score_initial
quantiles_ic = quantile(scores_boot_centered, [ic_rate/2, 1-ic_rate/2])
ic_gene = score_initial - quantiles_ic[1], score_initial - quantiles_ic[0]
if disease not in h_rank_genes:
h_rank_genes[disease] = {}
h_ic_genes[disease] = {}
h_rank_genes[disease][id_gene] = int(rank_gene_consensus)
h_ic_genes[disease][id_gene] = ic_gene
plt.hist(score_bootstrap, color=col)
plt.xlim(xmin=0, xmax=round(int(float(max_copeland_dataset))) + 1)
plt.axvline(float(score_cop_initial), color="green")
plt.xlabel("Cop score for gene " + str(int(id_gene)))
plt.title("quartiles:" + str(quantile(score_bootstrap, [0.25, 0.5, 0.75])) + ";mean=" + str(
round(mean(score_bootstrap), 2)) + ";std=" + str(round(std(score_bootstrap), 2))
+"\nvict-eq-def=" + str(victories) + " "+ str(equalities) + " " + str(defeats))
plt.savefig(fname=path_output + '{0:03}'.format(int(rank_gene_consensus)) +"_elem_" + str(int(id_gene)), format="png")
plt.clf()
data_dict = {}
for disease in h_rank_genes.keys():
h_disease_ranks = sorted(h_rank_genes[disease].items(), key=lambda item: item[1])
h_disease_ic = h_ic_genes[disease]
data_dict.clear()
data_dict['category'] = []
data_dict['lower'] = []
data_dict['upper'] = []
for gene_with_pos in h_disease_ranks:
gene = gene_with_pos[0]
data_dict['category'].append(gene)
data_dict['lower'].append(h_disease_ic[gene][0])
data_dict['upper'].append(h_disease_ic[gene][1])
dataset = pd.DataFrame(data_dict)
for lower, upper, y in zip(dataset['lower'], dataset['upper'], range(len(dataset))):
plt.plot((lower, upper), (y, y), 'ro-')
path_output = self.__path_ic + "/" + disease.replace(" ", "_")
plt.yticks(range(len(dataset)), list(dataset['category']))
plt.savefig(fname=path_output, format="png")
plt.clf()
return ""
class EvolutionNbRankings(ExperimentFromDataset):
def __init__(self,
dataset_folder: str,
algo: MedianRanking,
scoring_scheme: ScoringScheme,
dataset_selector: DatasetSelector = None,
):
super().__init__(dataset_folder=dataset_folder, dataset_selector=dataset_selector)
self._algo = algo
self._scoring_cheme = scoring_scheme
def _run_final_data(self, raw_data: str) -> str:
return raw_data
def _run_raw_data(self) -> str:
to_test = list(range(10, 100, 10))
to_test.extend(list(range(100, 1001, 100)))
res = ""
for dataset in self.datasets:
print(dataset.name)
h_gene_list_scores = {}
for element in dataset.elements:
h_gene_list_scores[element] = []
shuffle(dataset.rankings)
for i in to_test:
dataset_new = Dataset(dataset.rankings[0:i])
dataset_new.name = dataset.name
consensus = self._algo.compute_consensus_rankings(dataset_new, self._scoring_cheme, True)
copeland_scores = consensus.copeland_scores
for element in dataset_new.elements:
cop_score_element = copeland_scores.get(element)
h_gene_list_scores[element].append(cop_score_element)
for element in dataset.elements:
res += dataset.name + ";" + str(element) + ";" + str(h_gene_list_scores[element]) + "\n"
return res
algor = get_algorithm(Algorithm.CopelandMethod)
scoring_scheme_exp = ScoringScheme.get_pseudodistance_scoring_scheme_p(1.)
for i in range(60, 150):
b = BootstrapExperiment(
dataset_folder="/home/pierre/Bureau/data_converge",
algo=algor,
scoring_scheme=scoring_scheme_exp,
nb_bootstrap=1000,
dataset_selector=DatasetSelector(nb_elem_min=i, nb_elem_max=i),
)
b.run()
| true | true |
f7f71e0011d41427e8bdcc44986f9e10bc21d3cb | 11,681 | py | Python | ament_tools/helper.py | richmattes/ament_tools | 2a25cdcc273fcd73e81e8a47fe892a0b5963307d | [
"Apache-2.0"
] | 1 | 2020-05-19T14:33:49.000Z | 2020-05-19T14:33:49.000Z | ros2_mod_ws/install/lib/python3.7/site-packages/ament_tools/helper.py | mintforpeople/robobo-ros2-ios-port | 1a5650304bd41060925ebba41d6c861d5062bfae | [
"Apache-2.0"
] | null | null | null | ros2_mod_ws/install/lib/python3.7/site-packages/ament_tools/helper.py | mintforpeople/robobo-ros2-ios-port | 1a5650304bd41060925ebba41d6c861d5062bfae | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import filecmp
from multiprocessing import cpu_count
import os
import re
import shlex
import shutil
import stat
from ament_tools.package_types import package_exists_at
def argparse_existing_dir(path):
if not os.path.exists(path):
raise argparse.ArgumentTypeError("Path '%s' does not exist" % path)
if not os.path.isdir(path):
raise argparse.ArgumentTypeError("Path '%s' is not a directory" % path)
return path
def argparse_existing_package(path):
path = argparse_existing_dir(path)
if not package_exists_at(path):
raise argparse.ArgumentTypeError(
"Path '%s' does not contain a package" % path)
return path
def determine_path_argument(cwd, base_path, argument, default):
if argument is None:
# if no argument is passed the default is relative to the base_path
return os.path.join(base_path, default)
# if an argument is passed it is relative to cwd (or absolute)
return os.path.abspath(os.path.join(cwd, argument))
def extract_jobs_flags(arguments):
"""
Extract make job flags from a list of other make flags, i.e. -j8 -l8.
:param arguments: string of space separated arguments which may or may not
contain make job flags
:type arguments: str
:returns: list of make jobs flags as a space separated string
:rtype: str
"""
regex = (
r'(?:^|\s)(-?(?:j|l)(?:\s*[0-9]+|\s|$))'
r'|'
r'(?:^|\s)((?:--)?(?:jobs|load-average)(?:(?:=|\s+)[0-9]+|(?:\s|$)))'
)
matches = re.findall(regex, arguments) or []
matches = [m[0] or m[1] for m in matches]
return ' '.join([m.strip() for m in matches]) if matches else None
def combine_make_flags(make_flags, args, extras):
"""
Combine make flags and arg's make job flags with make_flags in extras.
:param list make_flags: existing make_flags, extracted from args already.
:param list args: command line args with ``--make-flags ...`` extracted.
:param dict extras: extras dict to which make flags are added/extended.
"""
# Add make_flags in extras, if they exist, to verb's --make-flags
make_flags += extras.get('make_flags', [])
# Extract make job arguments from main arguments and add to make_flags
make_job_flags = extract_jobs_flags(' '.join(args))
if make_job_flags:
args = re.sub(make_job_flags, '', ' '.join(args)).split()
make_flags.extend(make_job_flags.split())
# Ensure make args will have job flags and then store make_flags in extras
extras['make_flags'] = ensure_make_job_flags(make_flags)
return args
def ensure_make_job_flags(input_make_args):
"""
Ensure that make will get correct job flags, either from args or env.
If no job flags are present and there are none in the MAKEFLAGS environment
variable, then make flags are set to the cpu_count, e.g. -j4 -l4.
:param input_make_args: list of make arguments to be handled
:type input_make_args: list
:returns: copied list of make arguments, potentially with modifications
:rtype: list
"""
make_args = list(input_make_args)
# If no -j/--jobs/-l/--load-average flags are in make_args
if not extract_jobs_flags(' '.join(make_args)):
# If -j/--jobs/-l/--load-average are in MAKEFLAGS
if extract_jobs_flags(os.environ.get('MAKEFLAGS', '')):
# Do not extend make arguments, let MAKEFLAGS set things
pass
else:
# Else extend the make_arguments to include some jobs flags
# Use the number of CPU cores
try:
jobs = cpu_count()
make_args.append('-j{0}'.format(jobs))
make_args.append('-l{0}'.format(jobs))
except NotImplementedError:
# If the number of cores cannot be determined,
# then do not extend args
pass
return make_args
def extract_argument_group(args, delimiting_option):
"""
Extract a group of arguments from a list of arguments using a delimiter.
Here is an example:
.. code-block:: python
>>> extract_argument_group(['foo', '--args', 'bar', '--baz'], '--args')
(['foo'], ['bar', '--baz'])
The group can always be ended using the double hyphen ``--``.
In order to pass a double hyphen as arguments, use three hyphens ``---``.
Any set of hyphens encountered after the delimiter, and up to ``--``, which
have three or more hyphens and are isolated, will be captured and reduced
by one hyphen.
For example:
.. code-block:: python
>> extract_argument_group(['foo',
'--args', 'bar', '--baz', '---', '--',
'--foo-option'], '--args')
(['foo', '--foo-option'], ['bar', '--baz', '--'])
In the result the ``--`` comes from the ``---`` in the input.
The ``--args`` and the corresponding ``--`` are removed entirely.
The delimiter and ``--`` terminator combination can also happen multiple
times, in which case the bodies of arguments are combined and returned in
the order they appeared.
For example:
.. code-block:: python
>> extract_argument_group(['foo',
'--args', 'ping', '--',
'bar',
'--args', 'pong', '--',
'baz',
'--args', '--'], '--args')
(['foo', 'bar', 'baz'], ['ping', 'pong'])
Note: ``--`` cannot be used as the ``delimiting_option``.
:param list args: list of strings which are ordered arguments.
:param str delimiting_option: option which denotes where to split the args.
:returns: tuple of arguments before and after the delimiter.
:rtype: tuple
:raises: ValueError if the delimiting_option is ``--``.
"""
if delimiting_option == '--':
raise ValueError("Cannot use '--' as the delimiter")
if delimiting_option not in args:
return args, []
trimmed_args = args
extracted_args = []
# Loop through all arguments extracting groups of arguments
while True:
try:
next_delimiter = trimmed_args.index(delimiting_option)
except ValueError:
# No delimiter's left in the arguments, stop looking
break
# Capture and remove args after the delimiter
tail = trimmed_args[next_delimiter + 1:]
trimmed_args = trimmed_args[:next_delimiter]
# Look for a terminator, '--'
next_terminator = None
try:
next_terminator = tail.index('--')
except ValueError:
pass
if next_terminator is None:
# No terminator, put all args in extracted_args and stop looking
extracted_args.extend(tail)
break
else:
# Terminator found, put args up, but not including terminator
# in extracted_args
extracted_args.extend(tail[:next_terminator])
# And put arguments after the terminator back in trimmed_args
# then continue looking for additional delimiters
trimmed_args.extend(tail[next_terminator + 1:])
# Iterate through extracted args and shorted tokens with 3+ -'s only
for i, token in enumerate(extracted_args):
# '--' should have been removed from extracted_args in the above loop
assert token != '--', "this shouldn't happen"
# Skip single hyphens
if token == '-':
continue
# Check for non-hyphen characters
if [c for c in token if c != '-']:
# contains something other than -, continue
continue
# Must be only hyphens with more than two, Shorted by one -
extracted_args[i] = token[1:]
return trimmed_args, extracted_args
def compute_deploy_destination(context, filename, dst_subfolder=''):
return os.path.join(context.install_space, dst_subfolder, filename)
def deploy_file(
context,
source_base_path,
filename,
dst_subfolder='',
executable=False,
skip_if_exists=False,
):
# copy the file if not already there and identical
source_path = os.path.join(source_base_path, filename)
# create destination folder if necessary
destination_path = compute_deploy_destination(context, filename, dst_subfolder)
# If the file exists and we should skip if we didn't install it.
if (
(os.path.exists(destination_path) or os.path.islink(destination_path)) and
skip_if_exists
):
# If the dest is not a symlink or if it is but it doesn't point to our source.
if (
not os.path.islink(destination_path) or
not os.path.samefile(source_path, destination_path)
):
# Finally if the content is not the same.
if not filecmp.cmp(source_path, destination_path):
# We (probably) didn't install it and shouldn't overwrite it.
print('-- [ament] Skipping (would overwrite):', destination_path)
return
print('-- [ament] Deploying:', destination_path)
os.makedirs(os.path.dirname(destination_path), exist_ok=True)
# remove existing file / symlink if it is not already what is intended
if os.path.exists(destination_path):
if not context.symlink_install:
if os.path.islink(destination_path) or not filecmp.cmp(source_path, destination_path):
os.remove(destination_path)
else:
if not os.path.islink(destination_path) or \
not os.path.samefile(source_path, destination_path):
# try-catch to guard against a TOCTOU error that can happen during parallel build.
try:
os.remove(destination_path)
except OSError:
pass
if not os.path.exists(destination_path):
if not context.symlink_install:
shutil.copyfile(source_path, destination_path)
else:
# while the destination might not exist it can still be a symlink
if os.path.islink(destination_path):
os.remove(destination_path)
# try-catch to guard against a TOCTOU error that can happen during parallel build
try:
os.symlink(source_path, destination_path)
except OSError:
pass
# set executable bit if necessary
if executable and not context.symlink_install:
mode = os.stat(destination_path).st_mode
new_mode = mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
if new_mode != mode:
os.chmod(destination_path, new_mode)
def quote_shell_command(cmd):
if os.name != 'nt':
return ' '.join([(shlex.quote(c) if c != '&&' else c) for c in cmd])
quoted = []
for c in cmd:
if ' ' in c:
c = '"%s"' % (c.replace('"', r'\"'))
quoted.append(c)
return ' '.join(quoted)
| 37.925325 | 98 | 0.623491 |
import argparse
import filecmp
from multiprocessing import cpu_count
import os
import re
import shlex
import shutil
import stat
from ament_tools.package_types import package_exists_at
def argparse_existing_dir(path):
if not os.path.exists(path):
raise argparse.ArgumentTypeError("Path '%s' does not exist" % path)
if not os.path.isdir(path):
raise argparse.ArgumentTypeError("Path '%s' is not a directory" % path)
return path
def argparse_existing_package(path):
path = argparse_existing_dir(path)
if not package_exists_at(path):
raise argparse.ArgumentTypeError(
"Path '%s' does not contain a package" % path)
return path
def determine_path_argument(cwd, base_path, argument, default):
if argument is None:
return os.path.join(base_path, default)
return os.path.abspath(os.path.join(cwd, argument))
def extract_jobs_flags(arguments):
regex = (
r'(?:^|\s)(-?(?:j|l)(?:\s*[0-9]+|\s|$))'
r'|'
r'(?:^|\s)((?:--)?(?:jobs|load-average)(?:(?:=|\s+)[0-9]+|(?:\s|$)))'
)
matches = re.findall(regex, arguments) or []
matches = [m[0] or m[1] for m in matches]
return ' '.join([m.strip() for m in matches]) if matches else None
def combine_make_flags(make_flags, args, extras):
make_flags += extras.get('make_flags', [])
# Extract make job arguments from main arguments and add to make_flags
make_job_flags = extract_jobs_flags(' '.join(args))
if make_job_flags:
args = re.sub(make_job_flags, '', ' '.join(args)).split()
make_flags.extend(make_job_flags.split())
# Ensure make args will have job flags and then store make_flags in extras
extras['make_flags'] = ensure_make_job_flags(make_flags)
return args
def ensure_make_job_flags(input_make_args):
make_args = list(input_make_args)
# If no -j/--jobs/-l/--load-average flags are in make_args
if not extract_jobs_flags(' '.join(make_args)):
# If -j/--jobs/-l/--load-average are in MAKEFLAGS
if extract_jobs_flags(os.environ.get('MAKEFLAGS', '')):
# Do not extend make arguments, let MAKEFLAGS set things
pass
else:
# Else extend the make_arguments to include some jobs flags
# Use the number of CPU cores
try:
jobs = cpu_count()
make_args.append('-j{0}'.format(jobs))
make_args.append('-l{0}'.format(jobs))
except NotImplementedError:
# If the number of cores cannot be determined,
# then do not extend args
pass
return make_args
def extract_argument_group(args, delimiting_option):
if delimiting_option == '--':
raise ValueError("Cannot use '--' as the delimiter")
if delimiting_option not in args:
return args, []
trimmed_args = args
extracted_args = []
# Loop through all arguments extracting groups of arguments
while True:
try:
next_delimiter = trimmed_args.index(delimiting_option)
except ValueError:
# No delimiter's left in the arguments, stop looking
break
tail = trimmed_args[next_delimiter + 1:]
trimmed_args = trimmed_args[:next_delimiter]
next_terminator = None
try:
next_terminator = tail.index('--')
except ValueError:
pass
if next_terminator is None:
extracted_args.extend(tail)
break
else:
extracted_args.extend(tail[:next_terminator])
trimmed_args.extend(tail[next_terminator + 1:])
for i, token in enumerate(extracted_args):
# '--' should have been removed from extracted_args in the above loop
assert token != '--', "this shouldn't happen"
if token == '-':
continue
if [c for c in token if c != '-']:
continue
extracted_args[i] = token[1:]
return trimmed_args, extracted_args
def compute_deploy_destination(context, filename, dst_subfolder=''):
return os.path.join(context.install_space, dst_subfolder, filename)
def deploy_file(
context,
source_base_path,
filename,
dst_subfolder='',
executable=False,
skip_if_exists=False,
):
source_path = os.path.join(source_base_path, filename)
destination_path = compute_deploy_destination(context, filename, dst_subfolder)
if (
(os.path.exists(destination_path) or os.path.islink(destination_path)) and
skip_if_exists
):
# If the dest is not a symlink or if it is but it doesn't point to our source.
if (
not os.path.islink(destination_path) or
not os.path.samefile(source_path, destination_path)
):
if not filecmp.cmp(source_path, destination_path):
print('-- [ament] Skipping (would overwrite):', destination_path)
return
print('-- [ament] Deploying:', destination_path)
os.makedirs(os.path.dirname(destination_path), exist_ok=True)
if os.path.exists(destination_path):
if not context.symlink_install:
if os.path.islink(destination_path) or not filecmp.cmp(source_path, destination_path):
os.remove(destination_path)
else:
if not os.path.islink(destination_path) or \
not os.path.samefile(source_path, destination_path):
try:
os.remove(destination_path)
except OSError:
pass
if not os.path.exists(destination_path):
if not context.symlink_install:
shutil.copyfile(source_path, destination_path)
else:
if os.path.islink(destination_path):
os.remove(destination_path)
try:
os.symlink(source_path, destination_path)
except OSError:
pass
if executable and not context.symlink_install:
mode = os.stat(destination_path).st_mode
new_mode = mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
if new_mode != mode:
os.chmod(destination_path, new_mode)
def quote_shell_command(cmd):
if os.name != 'nt':
return ' '.join([(shlex.quote(c) if c != '&&' else c) for c in cmd])
quoted = []
for c in cmd:
if ' ' in c:
c = '"%s"' % (c.replace('"', r'\"'))
quoted.append(c)
return ' '.join(quoted)
| true | true |
f7f71e598a74aa91592c5e8c711b50241522cb79 | 2,028 | py | Python | otlplus/urls.py | victory-jooyon/otlplus | 88bf72972e5d63e29355a9e60f7a5fec247b5890 | [
"MIT"
] | null | null | null | otlplus/urls.py | victory-jooyon/otlplus | 88bf72972e5d63e29355a9e60f7a5fec247b5890 | [
"MIT"
] | null | null | null | otlplus/urls.py | victory-jooyon/otlplus | 88bf72972e5d63e29355a9e60f7a5fec247b5890 | [
"MIT"
] | null | null | null | """otlplus URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.http import HttpResponse, HttpResponseRedirect
from settings import BASE_DIR
import os
from django.conf.urls import (
handler400, handler403, handler404, handler500
)
from django.conf import settings
from django.conf.urls.static import static
from apps.review import views as review_views
from apps.timetable import views as timetable_views
from django import views as django_views
from django.views import static as django_static
handler400 = 'apps.review.views.bad_request'
handler403 = 'apps.review.views.permission_denied'
handler404 = 'apps.review.views.page_not_found'
handler500 = 'apps.review.views.server_error'
urlpatterns = [
# Admin Page
url(r'^admin/', include(admin.site.urls)),
# OTLplus Apps
url(r'^main/$', review_views.search_view),
url(r'^credits/$', review_views.credits),
url(r'^licenses/$', review_views.licenses),
url(r'^$', lambda x: HttpResponseRedirect('/main/')),
url(r'^session/', include('apps.session.urls')),
url(r'^review/', include('apps.review.urls')),
url(r'^subject/', include('apps.subject.urls')),
url(r'^timetable/', include('apps.timetable.urls')),
url(r'^api/status$', lambda request: HttpResponse()),
# Media Root
url(r'^media/(?P<path>.*)$',django_static.serve,{'document_root': os.path.join(BASE_DIR, 'static')}),
]
| 36.872727 | 105 | 0.720414 | from django.conf.urls import include, url
from django.contrib import admin
from django.http import HttpResponse, HttpResponseRedirect
from settings import BASE_DIR
import os
from django.conf.urls import (
handler400, handler403, handler404, handler500
)
from django.conf import settings
from django.conf.urls.static import static
from apps.review import views as review_views
from apps.timetable import views as timetable_views
from django import views as django_views
from django.views import static as django_static
handler400 = 'apps.review.views.bad_request'
handler403 = 'apps.review.views.permission_denied'
handler404 = 'apps.review.views.page_not_found'
handler500 = 'apps.review.views.server_error'
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^main/$', review_views.search_view),
url(r'^credits/$', review_views.credits),
url(r'^licenses/$', review_views.licenses),
url(r'^$', lambda x: HttpResponseRedirect('/main/')),
url(r'^session/', include('apps.session.urls')),
url(r'^review/', include('apps.review.urls')),
url(r'^subject/', include('apps.subject.urls')),
url(r'^timetable/', include('apps.timetable.urls')),
url(r'^api/status$', lambda request: HttpResponse()),
url(r'^media/(?P<path>.*)$',django_static.serve,{'document_root': os.path.join(BASE_DIR, 'static')}),
]
| true | true |
f7f71f473620bf4be8dbe525823f8a59b77bb274 | 2,347 | py | Python | util/eval_tools.py | QUVA-Lab/lang-tracker | 6cb3630471765565b6f2d34a160f0cd51d95a082 | [
"BSD-2-Clause-FreeBSD"
] | 31 | 2017-09-13T13:40:59.000Z | 2022-01-25T16:55:19.000Z | util/eval_tools.py | zhenyangli/lang-tracker | dddd808a22582573ab0a5e4c3dbf0ba054e42d61 | [
"BSD-3-Clause"
] | 4 | 2017-09-14T01:56:58.000Z | 2021-01-28T00:58:58.000Z | util/eval_tools.py | QUVA-Lab/lang-tracker | 6cb3630471765565b6f2d34a160f0cd51d95a082 | [
"BSD-2-Clause-FreeBSD"
] | 9 | 2017-09-28T03:22:08.000Z | 2021-01-19T10:56:44.000Z | from __future__ import absolute_import, division, print_function
import numpy as np
import pyximport; pyximport.install()
from util.nms import cpu_nms as nms
# all boxes are [xmin, ymin, xmax, ymax] format, 0-indexed, including xmax and ymax
def compute_bbox_iou(bboxes, target):
if isinstance(bboxes, list):
bboxes = np.array(bboxes)
bboxes = bboxes.reshape((-1, 4))
if isinstance(target, list):
target = np.array(target)
target = target.reshape((-1, 4))
A_bboxes = (bboxes[..., 2]-bboxes[..., 0]+1) * (bboxes[..., 3]-bboxes[..., 1]+1)
A_target = (target[..., 2]-target[..., 0]+1) * (target[..., 3]-target[..., 1]+1)
assert(np.all(A_bboxes >= 0))
assert(np.all(A_target >= 0))
I_x1 = np.maximum(bboxes[..., 0], target[..., 0])
I_y1 = np.maximum(bboxes[..., 1], target[..., 1])
I_x2 = np.minimum(bboxes[..., 2], target[..., 2])
I_y2 = np.minimum(bboxes[..., 3], target[..., 3])
A_I = np.maximum(I_x2 - I_x1 + 1, 0) * np.maximum(I_y2 - I_y1 + 1, 0)
IoUs = A_I / (A_bboxes + A_target - A_I)
assert(np.all(0 <= IoUs) and np.all(IoUs <= 1))
return IoUs
# # all boxes are [num, height, width] binary array
def compute_mask_IU(masks, target):
assert(target.shape[-2:] == masks.shape[-2:])
I = np.sum(np.logical_and(masks, target))
U = np.sum(np.logical_or(masks, target))
return I, U
def compute_bbox_max(bbox_file):
with open(bbox_file) as f:
for line in f:
items = [int(x) for x in line.strip().split()]
box1 = np.array(items[0::4]).T
box2 = np.array(items[1::4]).T
box3 = np.array(items[2::4]).T
box4 = np.array(items[3::4]).T
bboxes = np.array([box1, box2, box1+box3-1, box2+box4-1]).T
col1 = np.min(np.array([bboxes[:,0], bboxes[:,2]]), axis=0)
col2 = np.min(np.array([bboxes[:,1], bboxes[:,3]]), axis=0)
col3 = np.max(np.array([bboxes[:,0], bboxes[:,2]]), axis=0)
col4 = np.max(np.array([bboxes[:,1], bboxes[:,3]]), axis=0)
bboxes = np.array([col1, col2, col3, col4]).T
max_sz = 0
max_box = bboxes[0, :]
for i in range(bboxes.shape[0]): # for each bbox
pred_box = bboxes[i, :]
box_sz = (pred_box[2] - pred_box[0])*(pred_box[3] - pred_box[1])
if box_sz > max_sz:
max_sz = box_sz
max_box = pred_box
return max_box
| 36.107692 | 84 | 0.587133 | from __future__ import absolute_import, division, print_function
import numpy as np
import pyximport; pyximport.install()
from util.nms import cpu_nms as nms
def compute_bbox_iou(bboxes, target):
if isinstance(bboxes, list):
bboxes = np.array(bboxes)
bboxes = bboxes.reshape((-1, 4))
if isinstance(target, list):
target = np.array(target)
target = target.reshape((-1, 4))
A_bboxes = (bboxes[..., 2]-bboxes[..., 0]+1) * (bboxes[..., 3]-bboxes[..., 1]+1)
A_target = (target[..., 2]-target[..., 0]+1) * (target[..., 3]-target[..., 1]+1)
assert(np.all(A_bboxes >= 0))
assert(np.all(A_target >= 0))
I_x1 = np.maximum(bboxes[..., 0], target[..., 0])
I_y1 = np.maximum(bboxes[..., 1], target[..., 1])
I_x2 = np.minimum(bboxes[..., 2], target[..., 2])
I_y2 = np.minimum(bboxes[..., 3], target[..., 3])
A_I = np.maximum(I_x2 - I_x1 + 1, 0) * np.maximum(I_y2 - I_y1 + 1, 0)
IoUs = A_I / (A_bboxes + A_target - A_I)
assert(np.all(0 <= IoUs) and np.all(IoUs <= 1))
return IoUs
arget.shape[-2:] == masks.shape[-2:])
I = np.sum(np.logical_and(masks, target))
U = np.sum(np.logical_or(masks, target))
return I, U
def compute_bbox_max(bbox_file):
with open(bbox_file) as f:
for line in f:
items = [int(x) for x in line.strip().split()]
box1 = np.array(items[0::4]).T
box2 = np.array(items[1::4]).T
box3 = np.array(items[2::4]).T
box4 = np.array(items[3::4]).T
bboxes = np.array([box1, box2, box1+box3-1, box2+box4-1]).T
col1 = np.min(np.array([bboxes[:,0], bboxes[:,2]]), axis=0)
col2 = np.min(np.array([bboxes[:,1], bboxes[:,3]]), axis=0)
col3 = np.max(np.array([bboxes[:,0], bboxes[:,2]]), axis=0)
col4 = np.max(np.array([bboxes[:,1], bboxes[:,3]]), axis=0)
bboxes = np.array([col1, col2, col3, col4]).T
max_sz = 0
max_box = bboxes[0, :]
for i in range(bboxes.shape[0]):
pred_box = bboxes[i, :]
box_sz = (pred_box[2] - pred_box[0])*(pred_box[3] - pred_box[1])
if box_sz > max_sz:
max_sz = box_sz
max_box = pred_box
return max_box
| true | true |
f7f720c190c39423b35613c01e9bd1634592fdfa | 887 | py | Python | model/utils/losses.py | katsugeneration/ml-project-template | fe68c2f3fa6b6e51cc29b340cb2a1aeeca221322 | [
"MIT"
] | 5 | 2019-08-29T06:25:38.000Z | 2021-01-22T17:10:13.000Z | model/utils/losses.py | katsugeneration/ml-project-template | fe68c2f3fa6b6e51cc29b340cb2a1aeeca221322 | [
"MIT"
] | null | null | null | model/utils/losses.py | katsugeneration/ml-project-template | fe68c2f3fa6b6e51cc29b340cb2a1aeeca221322 | [
"MIT"
] | null | null | null | # Copyright 2020 Katsuya Shimabukuro. All rights reserved.
# Licensed under the MIT License.
import tensorflow as tf
class MaskedSparseCategoricalCrossentropy():
"""SparseCategoricalCrossentropy without padding mask."""
def __call__(self, label, pred, **kwargs):
"""Calculate loss.
Args:
label (tf.Tensor): sequence label with shape (B, Seq).
pred (tf.Tensor): sequence label prediction likelihood with shape (B, Seq, Token) in [0, 1].
Return:
loss (tf.Tensor): mean loss float value without padding mask.
"""
mask = tf.math.logical_not(tf.math.equal(label, 0))
loss = tf.keras.losses.sparse_categorical_crossentropy(label, pred)
mask = tf.cast(mask, dtype=loss.dtype)
loss *= mask
return tf.reduce_mean(tf.reduce_sum(loss, axis=1) / tf.reduce_sum(mask, axis=1))
| 32.851852 | 104 | 0.65389 |
import tensorflow as tf
class MaskedSparseCategoricalCrossentropy():
def __call__(self, label, pred, **kwargs):
mask = tf.math.logical_not(tf.math.equal(label, 0))
loss = tf.keras.losses.sparse_categorical_crossentropy(label, pred)
mask = tf.cast(mask, dtype=loss.dtype)
loss *= mask
return tf.reduce_mean(tf.reduce_sum(loss, axis=1) / tf.reduce_sum(mask, axis=1))
| true | true |
f7f722da438db87fea079f7a44fffb6a8bae4acd | 2,682 | py | Python | app/core/models.py | VPuosk/recipe-app-api | 777bcac607664aea04d62e08fcc7c0baf80dd061 | [
"MIT"
] | null | null | null | app/core/models.py | VPuosk/recipe-app-api | 777bcac607664aea04d62e08fcc7c0baf80dd061 | [
"MIT"
] | null | null | null | app/core/models.py | VPuosk/recipe-app-api | 777bcac607664aea04d62e08fcc7c0baf80dd061 | [
"MIT"
] | null | null | null | import uuid
import os
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
def recipe_image_file_path(instance, filename):
"""Generate file path for new recipe image"""
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/recipe/', filename)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
""""Creates and saves a new user"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new superuser"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
"""Tag to be used for a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return f'{self.name}'
class Ingredient(models.Model):
"""Ingredient to be in a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Recipe(models.Model):
"""Recipe object"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField(Ingredient)
tags = models.ManyToManyField(Tag)
# could be 'Tag' in which case the depency order would not matter
image = models.ImageField(null=True, upload_to=recipe_image_file_path)
def __str__(self):
return self.title
| 29.152174 | 76 | 0.673751 | import uuid
import os
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
def recipe_image_file_path(instance, filename):
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/recipe/', filename)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return f'{self.name}'
class Ingredient(models.Model):
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Recipe(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField(Ingredient)
tags = models.ManyToManyField(Tag)
image = models.ImageField(null=True, upload_to=recipe_image_file_path)
def __str__(self):
return self.title
| true | true |
f7f72571461d757b4ac3d41732fd608785d342ce | 2,957 | py | Python | core/keyvalue/tests.py | jlin/inventory | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | [
"BSD-3-Clause"
] | 22 | 2015-01-16T01:36:32.000Z | 2020-06-08T00:46:18.000Z | core/keyvalue/tests.py | jlin/inventory | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | [
"BSD-3-Clause"
] | 8 | 2015-12-28T18:56:19.000Z | 2019-04-01T17:33:48.000Z | core/keyvalue/tests.py | jlin/inventory | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | [
"BSD-3-Clause"
] | 13 | 2015-01-13T20:56:22.000Z | 2022-02-23T06:01:17.000Z | from django.test import TestCase
from django.test.client import Client
from core.group.models import Group
from mozdns.tests.utils import create_fake_zone
from core.registration.static.models import StaticReg
from systems.tests.utils import create_fake_host
class KVApiTests(TestCase):
def setUp(self):
self.c = Client()
create_fake_zone('10.in-addr.arpa', suffix='')
root_domain = create_fake_zone('foobar.mozilla.com', suffix='')
system = create_fake_host(hostname="asdf.mozilla.com")
sreg = StaticReg.objects.create(
label='foo', domain=root_domain, system=system,
ip_type='4', ip_str='10.0.0.0'
)
g = Group.objects.create(name="foo")
self.test_objs = (
('groupkeyvalue', g),
('staticregkeyvalue', sreg),
('keyvalue', system),
)
def testCRUD(self):
for obj_class, o in self.test_objs:
self.do_stuff(obj_class, o)
def do_stuff(self, obj_class, o):
key = 'foo'
value = 'bar'
create = '/en-US/core/keyvalue/api/{kv_class}/{obj_pk}/create/'.format(
kv_class=obj_class, obj_pk=o.pk
)
detail = '/en-US/core/keyvalue/api/{kv_class}/{obj_pk}/list/'.format(
kv_class=obj_class, obj_pk=o.pk
)
resp1 = self.c.post(create, {'key': key, 'value': value})
self.assertEqual(resp1.status_code, 201)
resp2 = self.c.post(create, {'key': key, 'value': value})
self.assertEqual(resp2.status_code, 400)
resp3 = self.c.get(detail)
self.assertEqual(resp3.status_code, 200)
resp4 = self.c.get(detail)
self.assertEqual(resp4.status_code, 200)
self.assertTrue(1, len(o.keyvalue_set.all()))
kv = o.keyvalue_set.all()[0]
update = '/en-US/core/keyvalue/api/{kv_class}/{kv_pk}/update/'.format(
kv_class=obj_class, kv_pk=kv.pk
)
new_value = "happy magic"
resp5 = self.c.post(update, {'key': key, 'value': new_value})
self.assertEqual(resp5.status_code, 200)
kv = o.keyvalue_set.get(pk=kv.pk)
self.assertEqual(kv.value, new_value)
# Does bad update do what it's supposed to?
resp6 = self.c.post(update, {'key': key, 'value': ''})
self.assertEqual(resp6.status_code, 400)
kv = o.keyvalue_set.get(pk=kv.pk)
self.assertEqual(kv.value, new_value) # Should be no change
delete = '/en-US/core/keyvalue/api/{kv_class}/{kv_pk}/delete/'.format(
kv_class=obj_class, kv_pk=kv.pk
)
resp6 = self.c.post(delete, {'key': key, 'value': new_value})
self.assertEqual(resp6.status_code, 204)
self.assertEqual(0, len(o.keyvalue_set.all()))
class TestCaseUtils(object):
def localize_url(self, url):
if 'en-US' not in url:
url = url.replace('mozdns', 'en-US/mozdns')
return url
| 32.494505 | 79 | 0.607034 | from django.test import TestCase
from django.test.client import Client
from core.group.models import Group
from mozdns.tests.utils import create_fake_zone
from core.registration.static.models import StaticReg
from systems.tests.utils import create_fake_host
class KVApiTests(TestCase):
def setUp(self):
self.c = Client()
create_fake_zone('10.in-addr.arpa', suffix='')
root_domain = create_fake_zone('foobar.mozilla.com', suffix='')
system = create_fake_host(hostname="asdf.mozilla.com")
sreg = StaticReg.objects.create(
label='foo', domain=root_domain, system=system,
ip_type='4', ip_str='10.0.0.0'
)
g = Group.objects.create(name="foo")
self.test_objs = (
('groupkeyvalue', g),
('staticregkeyvalue', sreg),
('keyvalue', system),
)
def testCRUD(self):
for obj_class, o in self.test_objs:
self.do_stuff(obj_class, o)
def do_stuff(self, obj_class, o):
key = 'foo'
value = 'bar'
create = '/en-US/core/keyvalue/api/{kv_class}/{obj_pk}/create/'.format(
kv_class=obj_class, obj_pk=o.pk
)
detail = '/en-US/core/keyvalue/api/{kv_class}/{obj_pk}/list/'.format(
kv_class=obj_class, obj_pk=o.pk
)
resp1 = self.c.post(create, {'key': key, 'value': value})
self.assertEqual(resp1.status_code, 201)
resp2 = self.c.post(create, {'key': key, 'value': value})
self.assertEqual(resp2.status_code, 400)
resp3 = self.c.get(detail)
self.assertEqual(resp3.status_code, 200)
resp4 = self.c.get(detail)
self.assertEqual(resp4.status_code, 200)
self.assertTrue(1, len(o.keyvalue_set.all()))
kv = o.keyvalue_set.all()[0]
update = '/en-US/core/keyvalue/api/{kv_class}/{kv_pk}/update/'.format(
kv_class=obj_class, kv_pk=kv.pk
)
new_value = "happy magic"
resp5 = self.c.post(update, {'key': key, 'value': new_value})
self.assertEqual(resp5.status_code, 200)
kv = o.keyvalue_set.get(pk=kv.pk)
self.assertEqual(kv.value, new_value)
resp6 = self.c.post(update, {'key': key, 'value': ''})
self.assertEqual(resp6.status_code, 400)
kv = o.keyvalue_set.get(pk=kv.pk)
self.assertEqual(kv.value, new_value) # Should be no change
delete = '/en-US/core/keyvalue/api/{kv_class}/{kv_pk}/delete/'.format(
kv_class=obj_class, kv_pk=kv.pk
)
resp6 = self.c.post(delete, {'key': key, 'value': new_value})
self.assertEqual(resp6.status_code, 204)
self.assertEqual(0, len(o.keyvalue_set.all()))
class TestCaseUtils(object):
def localize_url(self, url):
if 'en-US' not in url:
url = url.replace('mozdns', 'en-US/mozdns')
return url
| true | true |
f7f726b31142890a4cf0035a5abfee7c2a9c0ecb | 5,421 | py | Python | img_proc/overlap_canny_deeplearning.py | norton-chris/MARS-Net | 6f671837d0629422680c78adf9b643894debae70 | [
"MIT"
] | 2 | 2021-09-09T14:20:16.000Z | 2022-03-28T15:02:33.000Z | img_proc/overlap_canny_deeplearning.py | norton-chris/MARS-Net | 6f671837d0629422680c78adf9b643894debae70 | [
"MIT"
] | 3 | 2021-11-10T16:33:56.000Z | 2022-03-30T11:56:52.000Z | img_proc/overlap_canny_deeplearning.py | norton-chris/MARS-Net | 6f671837d0629422680c78adf9b643894debae70 | [
"MIT"
] | 2 | 2022-03-28T01:33:09.000Z | 2022-03-28T01:38:33.000Z | '''
Author Junbong Jang
8/18/2020
1. extract edge from deep learning segmentation
2. overlap canny edge from raw image and result from 1.
3. fill the edge
4. overlay edge with raw image
'''
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import cv2
import numpy as np
from skimage import io
import matplotlib.pyplot as plt
import glob
import os
from PIL import Image
from scipy import ndimage
def auto_canny(image, canny_std_multiplier):
img_mean = np.average(image)
img_std = np.std(image)
lower = max(0, img_mean - (canny_std_multiplier+1)*img_std)
upper = max(0, img_mean - canny_std_multiplier*img_std)
# print('auto_canny:', img_mean, img_std, lower, upper)
edges = cv2.Canny(image, lower, upper, 3, L2gradient=True)
return edges
def extract_edge(img, img_name, saved_edge_path, canny_std_multiplier):
canny_edge = auto_canny(img, canny_std_multiplier)
canny_edge = pad_border_image(canny_edge)
im = Image.fromarray(canny_edge)
im.save(saved_edge_path + '/' + img_name)
def extract_edges(img_root_path, img_list, saved_edge_path, canny_std_multiplier):
# extract edge for each original image
for img_index in range(len(img_list)):
# Get an image and its name
img_path = img_list[img_index]
img = cv2.imread(img_path,cv2.IMREAD_GRAYSCALE)
img = crop_border_image(img)
img_name = img_path[len(img_root_path):]
print(img_name, img.shape)
extract_edge(img, img_name, saved_edge_path, canny_std_multiplier)
def crop_border_image(img):
return img[10:, 10:]
def pad_border_image(img):
img = np.pad(img, ((10,0),(10, 0)), 'constant') # ((top, bottom), (left, right))
return img
def overlap_edges(dl_canny_edge_list, img_canny_edge_list, img_list, saved_dl_edge_path, saved_overlap_path):
for img_index in range(len(dl_canny_edge_list)):
# Get an image and its name
dl_canny_edge = cv2.imread(dl_canny_edge_list[img_index], cv2.IMREAD_GRAYSCALE)
img_canny_edge = cv2.imread(img_canny_edge_list[img_index], cv2.IMREAD_GRAYSCALE)
img = cv2.imread(img_list[img_index], cv2.IMREAD_GRAYSCALE)
dl_canny_edge = crop_border_image(dl_canny_edge)
img_canny_edge = crop_border_image(img_canny_edge)
img = crop_border_image(img)
img_path = dl_canny_edge_list[img_index]
img_name = img_path[len(saved_dl_edge_path):]
print(img_name, img.shape, dl_canny_edge.shape, img_canny_edge.shape)
img3 = np.zeros((img.shape[0],img.shape[1],3), dtype=img.dtype)
#img3[:,:,0] = img
img3[:,:,1] = img
#img3[:,:,2] = img
img3[:,:,0] = img3[:,:,0] + dl_canny_edge
img3[:,:,2] = img3[:,:,2] + img_canny_edge
im = Image.fromarray(img3)
im.save(saved_overlap_path + '/' + img_name)
def overlay_edge_over_img(img, canny_edge, save_path):
# overlay with the original image
colorful_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
colorful_canny_edge = cv2.cvtColor(canny_edge, cv2.COLOR_GRAY2RGB)
colorful_canny_edge[:, :, 1:2] = 0
overlaid_img = cv2.addWeighted(colorful_img, 1, colorful_canny_edge, 0.3, 0)
if save_path != '':
im = Image.fromarray(overlaid_img)
im.save(save_path)
return overlaid_img
if __name__ == "__main__":
canny_std_multiplier = 1
constants = UserParams('predict')
for dataset_index in range(0, len(constants.dataset)):
a_dataset = constants.dataset[dataset_index]
img_root_path = constants.dataset_folder + a_dataset + constants.img_folder
saved_edge_path = f'generated/{a_dataset}/edge/'
saved_overlap_path = f'generated/{a_dataset}/overlap/'
if not os.path.exists(saved_edge_path):
os.makedirs(saved_edge_path)
if not os.path.exists(saved_overlap_path):
os.makedirs(saved_overlap_path)
# -----------------------------------
# get prediction images
# dl_prediction_root_path = f'../models/results/predict_wholeframe_round1_VGG16_trained/{a_dataset}/34_0_{a_dataset}/'
# dl_prediction_list = glob.glob(dl_prediction_root_path + '*' + '.png')
# print('number of images: ', len(dl_prediction_list))
# extract_edges(dl_prediction_root_path, dl_prediction_list, saved_edge_path, canny_std_multiplier)
# -----------------------------------
# get mask images
mask_list = glob.glob(constants.mask_folder + '*' + '.png')
extract_edges(mask_root_path, dl_prediction_list, saved_edge_path, canny_std_multiplier)
dl_canny_edge_list = glob.glob(saved_edge_path + '*' + '.png')
img_canny_root_path = f'../label_tool/generated_edge/{a_dataset}/'
img_canny_edge_list = glob.glob(img_canny_root_path + '*' + '.png')
img_list = glob.glob(img_root_path + '*' + '.png')
print(len(dl_canny_edge_list))
print(len(img_canny_edge_list))
overlap_edges(dl_canny_edge_list, img_canny_edge_list, img_list, saved_edge_path, saved_overlap_path)
# ------------------------------------
| 35.900662 | 127 | 0.646929 |
import matplotlib
matplotlib.use('Agg')
import cv2
import numpy as np
from skimage import io
import matplotlib.pyplot as plt
import glob
import os
from PIL import Image
from scipy import ndimage
def auto_canny(image, canny_std_multiplier):
img_mean = np.average(image)
img_std = np.std(image)
lower = max(0, img_mean - (canny_std_multiplier+1)*img_std)
upper = max(0, img_mean - canny_std_multiplier*img_std)
edges = cv2.Canny(image, lower, upper, 3, L2gradient=True)
return edges
def extract_edge(img, img_name, saved_edge_path, canny_std_multiplier):
canny_edge = auto_canny(img, canny_std_multiplier)
canny_edge = pad_border_image(canny_edge)
im = Image.fromarray(canny_edge)
im.save(saved_edge_path + '/' + img_name)
def extract_edges(img_root_path, img_list, saved_edge_path, canny_std_multiplier):
for img_index in range(len(img_list)):
img_path = img_list[img_index]
img = cv2.imread(img_path,cv2.IMREAD_GRAYSCALE)
img = crop_border_image(img)
img_name = img_path[len(img_root_path):]
print(img_name, img.shape)
extract_edge(img, img_name, saved_edge_path, canny_std_multiplier)
def crop_border_image(img):
return img[10:, 10:]
def pad_border_image(img):
img = np.pad(img, ((10,0),(10, 0)), 'constant')
return img
def overlap_edges(dl_canny_edge_list, img_canny_edge_list, img_list, saved_dl_edge_path, saved_overlap_path):
for img_index in range(len(dl_canny_edge_list)):
dl_canny_edge = cv2.imread(dl_canny_edge_list[img_index], cv2.IMREAD_GRAYSCALE)
img_canny_edge = cv2.imread(img_canny_edge_list[img_index], cv2.IMREAD_GRAYSCALE)
img = cv2.imread(img_list[img_index], cv2.IMREAD_GRAYSCALE)
dl_canny_edge = crop_border_image(dl_canny_edge)
img_canny_edge = crop_border_image(img_canny_edge)
img = crop_border_image(img)
img_path = dl_canny_edge_list[img_index]
img_name = img_path[len(saved_dl_edge_path):]
print(img_name, img.shape, dl_canny_edge.shape, img_canny_edge.shape)
img3 = np.zeros((img.shape[0],img.shape[1],3), dtype=img.dtype)
img3[:,:,1] = img
img3[:,:,0] = img3[:,:,0] + dl_canny_edge
img3[:,:,2] = img3[:,:,2] + img_canny_edge
im = Image.fromarray(img3)
im.save(saved_overlap_path + '/' + img_name)
def overlay_edge_over_img(img, canny_edge, save_path):
colorful_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
colorful_canny_edge = cv2.cvtColor(canny_edge, cv2.COLOR_GRAY2RGB)
colorful_canny_edge[:, :, 1:2] = 0
overlaid_img = cv2.addWeighted(colorful_img, 1, colorful_canny_edge, 0.3, 0)
if save_path != '':
im = Image.fromarray(overlaid_img)
im.save(save_path)
return overlaid_img
if __name__ == "__main__":
canny_std_multiplier = 1
constants = UserParams('predict')
for dataset_index in range(0, len(constants.dataset)):
a_dataset = constants.dataset[dataset_index]
img_root_path = constants.dataset_folder + a_dataset + constants.img_folder
saved_edge_path = f'generated/{a_dataset}/edge/'
saved_overlap_path = f'generated/{a_dataset}/overlap/'
if not os.path.exists(saved_edge_path):
os.makedirs(saved_edge_path)
if not os.path.exists(saved_overlap_path):
os.makedirs(saved_overlap_path)
mask_list = glob.glob(constants.mask_folder + '*' + '.png')
extract_edges(mask_root_path, dl_prediction_list, saved_edge_path, canny_std_multiplier)
dl_canny_edge_list = glob.glob(saved_edge_path + '*' + '.png')
img_canny_root_path = f'../label_tool/generated_edge/{a_dataset}/'
img_canny_edge_list = glob.glob(img_canny_root_path + '*' + '.png')
img_list = glob.glob(img_root_path + '*' + '.png')
print(len(dl_canny_edge_list))
print(len(img_canny_edge_list))
overlap_edges(dl_canny_edge_list, img_canny_edge_list, img_list, saved_edge_path, saved_overlap_path)
| true | true |
f7f7271d14c4203d881dd61fee9b56bdf2239749 | 2,035 | py | Python | compare.py | trawick/emptyhammock-project-template | 4f102bbe314a8065e2b9dcaa627e8e03b34d9eab | [
"Apache-2.0"
] | null | null | null | compare.py | trawick/emptyhammock-project-template | 4f102bbe314a8065e2b9dcaa627e8e03b34d9eab | [
"Apache-2.0"
] | 14 | 2018-12-06T23:02:15.000Z | 2021-06-11T17:51:56.000Z | compare.py | trawick/emptyhammock-project-template | 4f102bbe314a8065e2b9dcaa627e8e03b34d9eab | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import os
import re
import subprocess
import sys
def get_project_name(project_dir):
with open(os.path.join(project_dir, 'deploy', 'environments', 'all', 'vars.yml')) as f:
for l in f.readlines():
m = re.match('^project_name: (.*)$', l)
if m:
return m.group(1)
raise Exception('Could not find PROJECT_NAME in Ansible variables!')
def compare(template_dir, project_dir):
project_name = get_project_name(project_dir)
# stop ignoring templates if project template starts including that dir
assert not os.path.exists(os.path.join(template_dir, 'templates'))
subprocess.call([
'diff', '-ru',
'--exclude', project_name,
'--exclude', 'apps',
'--exclude', 'compare.py',
'--exclude', '.coverage',
'--exclude', 'docs',
'--exclude', '.env',
'--exclude', 'env',
'--exclude', 'env-deploy',
'--exclude', '.git',
'--exclude', '.idea',
'--exclude', '*.log',
'--exclude', 'media',
'--exclude', 'myproject',
'--exclude', 'node_modules',
'--exclude', 'package.json',
'--exclude', 'project.sql.gz',
'--exclude', '__pycache__',
'--exclude', '*.pyc',
'--exclude', '*.retry',
'--exclude', 'secrets.yml',
'--exclude', '*.sql',
'--exclude', 'standalone_tests',
'--exclude', '.vagrant',
'--exclude', '.vault_pass',
'--exclude', 'webpack.config.js',
'--exclude', 'webpack-stats.json',
template_dir, project_dir])
subprocess.call([
'diff', '-ru',
'--exclude', '__pycache__',
'--exclude', '*.pyc',
os.path.join(template_dir, 'myproject'),
os.path.join(project_dir, project_name)
])
def main():
if len(sys.argv) != 2:
print('Usage: %s path-to-other-project' % sys.argv[0], file=sys.stderr)
sys.exit(1)
compare('.', sys.argv[1])
if __name__ == '__main__':
main()
| 29.492754 | 91 | 0.540049 |
import os
import re
import subprocess
import sys
def get_project_name(project_dir):
with open(os.path.join(project_dir, 'deploy', 'environments', 'all', 'vars.yml')) as f:
for l in f.readlines():
m = re.match('^project_name: (.*)$', l)
if m:
return m.group(1)
raise Exception('Could not find PROJECT_NAME in Ansible variables!')
def compare(template_dir, project_dir):
project_name = get_project_name(project_dir)
assert not os.path.exists(os.path.join(template_dir, 'templates'))
subprocess.call([
'diff', '-ru',
'--exclude', project_name,
'--exclude', 'apps',
'--exclude', 'compare.py',
'--exclude', '.coverage',
'--exclude', 'docs',
'--exclude', '.env',
'--exclude', 'env',
'--exclude', 'env-deploy',
'--exclude', '.git',
'--exclude', '.idea',
'--exclude', '*.log',
'--exclude', 'media',
'--exclude', 'myproject',
'--exclude', 'node_modules',
'--exclude', 'package.json',
'--exclude', 'project.sql.gz',
'--exclude', '__pycache__',
'--exclude', '*.pyc',
'--exclude', '*.retry',
'--exclude', 'secrets.yml',
'--exclude', '*.sql',
'--exclude', 'standalone_tests',
'--exclude', '.vagrant',
'--exclude', '.vault_pass',
'--exclude', 'webpack.config.js',
'--exclude', 'webpack-stats.json',
template_dir, project_dir])
subprocess.call([
'diff', '-ru',
'--exclude', '__pycache__',
'--exclude', '*.pyc',
os.path.join(template_dir, 'myproject'),
os.path.join(project_dir, project_name)
])
def main():
if len(sys.argv) != 2:
print('Usage: %s path-to-other-project' % sys.argv[0], file=sys.stderr)
sys.exit(1)
compare('.', sys.argv[1])
if __name__ == '__main__':
main()
| true | true |
f7f7286c577af9328a1e7d0026499528338d7bfe | 930 | py | Python | setup.py | LaurenceKuhl/rmageddon-cli | 5ea067ddab452cfa809f52495ca8232870cb2a54 | [
"MIT"
] | null | null | null | setup.py | LaurenceKuhl/rmageddon-cli | 5ea067ddab452cfa809f52495ca8232870cb2a54 | [
"MIT"
] | null | null | null | setup.py | LaurenceKuhl/rmageddon-cli | 5ea067ddab452cfa809f52495ca8232870cb2a54 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
import sys
version = '0.3.0'
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='rmageddon',
version=version,
description='Small linting and building tool for R containers at QBiC',
long_description=readme,
keywords=['R', 'linting', 'lint', 'Docker', 'container'],
author='Sven Fillinger',
author_email='sven.fillinger@qbic.uni-tuebingen.de',
license=license,
scripts=['scripts/rmageddon'],
install_requires=required,
setup_requires=[
'twine>=1.11.0',
'setuptools>=38.6.',
] + ([] if sys.version_info.minor == 4 else ['wheel>=0.31.0']),
packages=find_packages(exclude='docs'),
include_package_data=True
)
| 25.833333 | 82 | 0.622581 |
from setuptools import setup, find_packages
import sys
version = '0.3.0'
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='rmageddon',
version=version,
description='Small linting and building tool for R containers at QBiC',
long_description=readme,
keywords=['R', 'linting', 'lint', 'Docker', 'container'],
author='Sven Fillinger',
author_email='sven.fillinger@qbic.uni-tuebingen.de',
license=license,
scripts=['scripts/rmageddon'],
install_requires=required,
setup_requires=[
'twine>=1.11.0',
'setuptools>=38.6.',
] + ([] if sys.version_info.minor == 4 else ['wheel>=0.31.0']),
packages=find_packages(exclude='docs'),
include_package_data=True
)
| true | true |
f7f7291987fbcc394ca11997ce2000fa2fa962bd | 2,382 | py | Python | appengine/monorail/tracker/test/rerank_helpers_test.py | xinghun61/infra | b5d4783f99461438ca9e6a477535617fadab6ba3 | [
"BSD-3-Clause"
] | 2 | 2021-04-13T21:22:18.000Z | 2021-09-07T02:11:57.000Z | appengine/monorail/tracker/test/rerank_helpers_test.py | asdfghjjklllllaaa/infra | 8f63af54e46194cd29291813f2790ff6e986804d | [
"BSD-3-Clause"
] | 21 | 2020-09-06T02:41:05.000Z | 2022-03-02T04:40:01.000Z | appengine/monorail/tracker/test/rerank_helpers_test.py | xinghun61/infra | b5d4783f99461438ca9e6a477535617fadab6ba3 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Unittests for monorail.tracker.rerank_helpers."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
from tracker import rerank_helpers
rerank_helpers.MAX_RANKING = 10
class Rerank_HelpersTest(unittest.TestCase):
def testGetInsertRankings(self):
lower = [(1, 0)]
higher = [(2, 10)]
moved_ids = [3]
ret = rerank_helpers.GetInsertRankings(lower, higher, moved_ids)
self.assertEqual(ret, [(3, 5)])
def testGetInsertRankings_Below(self):
lower = []
higher = [(1, 2)]
moved_ids = [2]
ret = rerank_helpers.GetInsertRankings(lower, higher, moved_ids)
self.assertEqual(ret, [(2, 1)])
def testGetInsertRankings_Above(self):
lower = [(1, 0)]
higher = []
moved_ids = [2]
ret = rerank_helpers.GetInsertRankings(lower, higher, moved_ids)
self.assertEqual(ret, [(2, 5)])
def testGetInsertRankings_Multiple(self):
lower = [(1, 0)]
higher = [(2, 10)]
moved_ids = [3,4,5]
ret = rerank_helpers.GetInsertRankings(lower, higher, moved_ids)
self.assertEqual(ret, [(3, 2), (4, 5), (5, 8)])
def testGetInsertRankings_SplitLow(self):
lower = [(1, 0), (2, 5)]
higher = [(3, 6), (4, 10)]
moved_ids = [5]
ret = rerank_helpers.GetInsertRankings(lower, higher, moved_ids)
self.assertEqual(ret, [(2, 2), (5, 5)])
def testGetInsertRankings_SplitHigh(self):
lower = [(1, 0), (2, 4)]
higher = [(3, 5), (4, 10)]
moved_ids = [5]
ret = rerank_helpers.GetInsertRankings(lower, higher, moved_ids)
self.assertEqual(ret, [(5, 6), (3, 9)])
def testGetInsertRankings_NoLower(self):
lower = []
higher = [(1, 1)]
moved_ids = [2]
ret = rerank_helpers.GetInsertRankings(lower, higher, moved_ids)
self.assertEqual(ret, [(2, 3), (1, 8)])
def testGetInsertRankings_NoRoom(self):
max_ranking, rerank_helpers.MAX_RANKING = rerank_helpers.MAX_RANKING, 1
lower = [(1, 0)]
higher = [(2, 1)]
moved_ids = [3]
ret = rerank_helpers.GetInsertRankings(lower, higher, moved_ids)
self.assertIsNone(ret)
rerank_helpers.MAX_RANKING = max_ranking
| 30.538462 | 75 | 0.675483 |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
from tracker import rerank_helpers
rerank_helpers.MAX_RANKING = 10
class Rerank_HelpersTest(unittest.TestCase):
def testGetInsertRankings(self):
lower = [(1, 0)]
higher = [(2, 10)]
moved_ids = [3]
ret = rerank_helpers.GetInsertRankings(lower, higher, moved_ids)
self.assertEqual(ret, [(3, 5)])
def testGetInsertRankings_Below(self):
lower = []
higher = [(1, 2)]
moved_ids = [2]
ret = rerank_helpers.GetInsertRankings(lower, higher, moved_ids)
self.assertEqual(ret, [(2, 1)])
def testGetInsertRankings_Above(self):
lower = [(1, 0)]
higher = []
moved_ids = [2]
ret = rerank_helpers.GetInsertRankings(lower, higher, moved_ids)
self.assertEqual(ret, [(2, 5)])
def testGetInsertRankings_Multiple(self):
lower = [(1, 0)]
higher = [(2, 10)]
moved_ids = [3,4,5]
ret = rerank_helpers.GetInsertRankings(lower, higher, moved_ids)
self.assertEqual(ret, [(3, 2), (4, 5), (5, 8)])
def testGetInsertRankings_SplitLow(self):
lower = [(1, 0), (2, 5)]
higher = [(3, 6), (4, 10)]
moved_ids = [5]
ret = rerank_helpers.GetInsertRankings(lower, higher, moved_ids)
self.assertEqual(ret, [(2, 2), (5, 5)])
def testGetInsertRankings_SplitHigh(self):
lower = [(1, 0), (2, 4)]
higher = [(3, 5), (4, 10)]
moved_ids = [5]
ret = rerank_helpers.GetInsertRankings(lower, higher, moved_ids)
self.assertEqual(ret, [(5, 6), (3, 9)])
def testGetInsertRankings_NoLower(self):
lower = []
higher = [(1, 1)]
moved_ids = [2]
ret = rerank_helpers.GetInsertRankings(lower, higher, moved_ids)
self.assertEqual(ret, [(2, 3), (1, 8)])
def testGetInsertRankings_NoRoom(self):
max_ranking, rerank_helpers.MAX_RANKING = rerank_helpers.MAX_RANKING, 1
lower = [(1, 0)]
higher = [(2, 1)]
moved_ids = [3]
ret = rerank_helpers.GetInsertRankings(lower, higher, moved_ids)
self.assertIsNone(ret)
rerank_helpers.MAX_RANKING = max_ranking
| true | true |
f7f7299adba17fa579a06603bcb4c423be0c7817 | 1,858 | py | Python | env/lib/python2.7/site-packages/django/conf/locale/ml/formats.py | diego-d5000/MisValesMd | b641782bc2546776e9f55f452ec7fb48100dc482 | [
"MIT"
] | null | null | null | env/lib/python2.7/site-packages/django/conf/locale/ml/formats.py | diego-d5000/MisValesMd | b641782bc2546776e9f55f452ec7fb48100dc482 | [
"MIT"
] | null | null | null | env/lib/python2.7/site-packages/django/conf/locale/ml/formats.py | diego-d5000/MisValesMd | b641782bc2546776e9f55f452ec7fb48100dc482 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| 42.227273 | 82 | 0.519914 |
from __future__ import unicode_literals
FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y',
-%m-%d',
'%m/%d/%Y %H:%M:%S',
'%m/%d/%Y %H:%M:%S.%f',
'%m/%d/%Y %H:%M',
'%m/%d/%Y',
'%m/%d/%y %H:%M:%S',
'%m/%d/%y %H:%M:%S.%f',
'%m/%d/%y %H:%M',
'%m/%d/%y',
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| true | true |
f7f729cadd4540117d08793efdc87c511a3c09f8 | 5,716 | py | Python | circ2/parser.py | kepbod/CIRCexplorer2 | a7df1003d260a525aac36f9e2f81e9db5c3f5ec2 | [
"MIT"
] | 1 | 2016-07-18T07:34:29.000Z | 2016-07-18T07:34:29.000Z | circ2/parser.py | kepbod/CIRCexplorer2 | a7df1003d260a525aac36f9e2f81e9db5c3f5ec2 | [
"MIT"
] | null | null | null | circ2/parser.py | kepbod/CIRCexplorer2 | a7df1003d260a525aac36f9e2f81e9db5c3f5ec2 | [
"MIT"
] | null | null | null | import os
import pysam
from collections import defaultdict
from itertools import groupby
from .genomic_interval import Interval
class Segment(object):
'''
Modified from https://github.com/brentp/cigar
'''
def __init__(self, pos, cigar):
self.ref_start = int(pos) - 1
self.ref_end = self.ref_start
read_consuming_ops = ("M", "I")
ref_consuming_ops = ("M", "D")
cig_iter = groupby(cigar, lambda c: c.isdigit())
self.read_start, self.read_end = 0, 0
for i, (g, n) in enumerate(cig_iter):
counts, tag = int("".join(n)), "".join(next(cig_iter)[1])
if i == 0 and tag == 'S':
self.read_start += counts
self.read_end += counts
if tag in read_consuming_ops:
self.read_end += counts
if tag in ref_consuming_ops:
self.ref_end += counts
def parse_fusion_bam(bam_f, pair_flag):
fusions = {}
bam = pysam.AlignmentFile(bam_f, 'rb')
for read in bam:
if read.is_secondary: # not the primary alignment
continue
if not read.has_tag('XF'): # not fusion junctions
continue
if pair_flag is True and not read.has_tag('XP'):
continue
xf_tag = ' '.join(read.get_tag('XF').split()[1:4])
query_name = "%s;%s" % (read.query_name, xf_tag)
chr1, chr2 = read.get_tag('XF').split()[1].split('-')
if chr1 != chr2: # not on the same chromosome
continue
strand = '+' if not read.is_reverse else '-'
if pair_flag is True:
xp_info = read.get_tag('XP')
else:
xp_info = ''
if query_name not in fusions: # first fragment
fusions[query_name] = [chr1, strand, read.reference_start,
read.reference_end, xp_info]
else: # second fragment
if chr1 == fusions[query_name][0] \
and strand == fusions[query_name][1]:
yield [chr1, strand, read.reference_start, read.reference_end,
xp_info] + [query_name]
yield fusions[query_name] + [query_name]
bam.close()
def parse_ref(ref_file, flag):
if flag == 1:
genes = defaultdict(list)
novel_genes = defaultdict(list)
gene_info = {}
chrom_info = set()
else:
genes = {}
with open(ref_file, 'r') as f:
for line in f:
gene_id, iso_id, chrom, strand = line.split()[:4]
total_id = '\t'.join(['iso', gene_id, iso_id, chrom, strand])
starts = [int(x) for x in line.split()[9].rstrip(',').split(',')]
ends = [int(x) for x in line.split()[10].rstrip(',').split(',')]
start = starts[0]
end = ends[-1]
if flag == 1:
if iso_id.startswith('CUFF'):
novel_genes[chrom].append([start, end, total_id])
else:
genes[chrom].append([start, end, total_id])
gene_info[total_id] = [starts, ends]
else:
genes['\t'.join([gene_id, iso_id, chrom, strand])] = [starts,
ends]
if flag == 1:
for chrom in genes:
genes[chrom] = Interval(genes[chrom])
chrom_info.add(chrom)
for chrom in novel_genes:
novel_genes[chrom] = Interval(novel_genes[chrom])
chrom_info.add(chrom)
return (genes, novel_genes, gene_info, chrom_info)
else:
return genes
def parse_bed(fus):
fusions = defaultdict(list)
fusion_index = {}
with open(fus, 'r') as f:
for line in f:
chrom, start, end, name = line.split()[:4]
start = int(start)
end = int(end)
reads = name.split('/')[1]
fusion_id = '%s\t%s' % (name, reads)
fusions[chrom].append([start, end, fusion_id])
fusion_index[fusion_id] = [start, end]
return (fusions, fusion_index)
def parse_junc(junc_f, flag=0):
junc = defaultdict(int)
if flag == 1:
left_junc = defaultdict(list)
right_junc = defaultdict(list)
elif flag == 2:
left_junc = defaultdict(int)
right_junc = defaultdict(int)
with open(junc_f, 'r') as f:
f.readline() # skip header
for line in f:
chrom = line.split()[0]
start = int(line.split()[1])
reads = int(line.split()[4])
size = int(line.split()[10].split(',')[0])
offset = int(line.split()[11].split(',')[1])
left = str(start + size)
right = str(start + offset)
junc_id = '\t'.join([chrom, left, right])
junc[junc_id] += reads
if flag == 1:
left_junc_id = '\t'.join([chrom, left])
right_junc_id = '\t'.join([chrom, right])
left_junc[left_junc_id].append([right, reads])
right_junc[right_junc_id].append([left, reads])
if flag == 2:
left_junc_id = '\t'.join([chrom, left])
right_junc_id = '\t'.join([chrom, right])
left_junc[left_junc_id] += reads
right_junc[right_junc_id] += reads
if flag:
return (junc, left_junc, right_junc)
else:
return junc
def check_fasta(fa_f, pysam_flag=True):
if not os.path.isfile(fa_f + '.fai'):
pysam.faidx(fa_f)
if pysam_flag: # return pysam FastaFile object
fa = pysam.FastaFile(fa_f)
return fa
else: # return fasta file path
return fa_f
| 35.283951 | 78 | 0.528341 | import os
import pysam
from collections import defaultdict
from itertools import groupby
from .genomic_interval import Interval
class Segment(object):
def __init__(self, pos, cigar):
self.ref_start = int(pos) - 1
self.ref_end = self.ref_start
read_consuming_ops = ("M", "I")
ref_consuming_ops = ("M", "D")
cig_iter = groupby(cigar, lambda c: c.isdigit())
self.read_start, self.read_end = 0, 0
for i, (g, n) in enumerate(cig_iter):
counts, tag = int("".join(n)), "".join(next(cig_iter)[1])
if i == 0 and tag == 'S':
self.read_start += counts
self.read_end += counts
if tag in read_consuming_ops:
self.read_end += counts
if tag in ref_consuming_ops:
self.ref_end += counts
def parse_fusion_bam(bam_f, pair_flag):
fusions = {}
bam = pysam.AlignmentFile(bam_f, 'rb')
for read in bam:
if read.is_secondary:
continue
if not read.has_tag('XF'):
continue
if pair_flag is True and not read.has_tag('XP'):
continue
xf_tag = ' '.join(read.get_tag('XF').split()[1:4])
query_name = "%s;%s" % (read.query_name, xf_tag)
chr1, chr2 = read.get_tag('XF').split()[1].split('-')
if chr1 != chr2:
continue
strand = '+' if not read.is_reverse else '-'
if pair_flag is True:
xp_info = read.get_tag('XP')
else:
xp_info = ''
if query_name not in fusions:
fusions[query_name] = [chr1, strand, read.reference_start,
read.reference_end, xp_info]
else:
if chr1 == fusions[query_name][0] \
and strand == fusions[query_name][1]:
yield [chr1, strand, read.reference_start, read.reference_end,
xp_info] + [query_name]
yield fusions[query_name] + [query_name]
bam.close()
def parse_ref(ref_file, flag):
if flag == 1:
genes = defaultdict(list)
novel_genes = defaultdict(list)
gene_info = {}
chrom_info = set()
else:
genes = {}
with open(ref_file, 'r') as f:
for line in f:
gene_id, iso_id, chrom, strand = line.split()[:4]
total_id = '\t'.join(['iso', gene_id, iso_id, chrom, strand])
starts = [int(x) for x in line.split()[9].rstrip(',').split(',')]
ends = [int(x) for x in line.split()[10].rstrip(',').split(',')]
start = starts[0]
end = ends[-1]
if flag == 1:
if iso_id.startswith('CUFF'):
novel_genes[chrom].append([start, end, total_id])
else:
genes[chrom].append([start, end, total_id])
gene_info[total_id] = [starts, ends]
else:
genes['\t'.join([gene_id, iso_id, chrom, strand])] = [starts,
ends]
if flag == 1:
for chrom in genes:
genes[chrom] = Interval(genes[chrom])
chrom_info.add(chrom)
for chrom in novel_genes:
novel_genes[chrom] = Interval(novel_genes[chrom])
chrom_info.add(chrom)
return (genes, novel_genes, gene_info, chrom_info)
else:
return genes
def parse_bed(fus):
fusions = defaultdict(list)
fusion_index = {}
with open(fus, 'r') as f:
for line in f:
chrom, start, end, name = line.split()[:4]
start = int(start)
end = int(end)
reads = name.split('/')[1]
fusion_id = '%s\t%s' % (name, reads)
fusions[chrom].append([start, end, fusion_id])
fusion_index[fusion_id] = [start, end]
return (fusions, fusion_index)
def parse_junc(junc_f, flag=0):
junc = defaultdict(int)
if flag == 1:
left_junc = defaultdict(list)
right_junc = defaultdict(list)
elif flag == 2:
left_junc = defaultdict(int)
right_junc = defaultdict(int)
with open(junc_f, 'r') as f:
f.readline()
for line in f:
chrom = line.split()[0]
start = int(line.split()[1])
reads = int(line.split()[4])
size = int(line.split()[10].split(',')[0])
offset = int(line.split()[11].split(',')[1])
left = str(start + size)
right = str(start + offset)
junc_id = '\t'.join([chrom, left, right])
junc[junc_id] += reads
if flag == 1:
left_junc_id = '\t'.join([chrom, left])
right_junc_id = '\t'.join([chrom, right])
left_junc[left_junc_id].append([right, reads])
right_junc[right_junc_id].append([left, reads])
if flag == 2:
left_junc_id = '\t'.join([chrom, left])
right_junc_id = '\t'.join([chrom, right])
left_junc[left_junc_id] += reads
right_junc[right_junc_id] += reads
if flag:
return (junc, left_junc, right_junc)
else:
return junc
def check_fasta(fa_f, pysam_flag=True):
if not os.path.isfile(fa_f + '.fai'):
pysam.faidx(fa_f)
if pysam_flag:
fa = pysam.FastaFile(fa_f)
return fa
else:
return fa_f
| true | true |
f7f72a25b450822753412e7bd2164c22391555bf | 483 | py | Python | sim_scripts/SaveImage.py | avbotz/eva-public | 29996eba2c1c7df35025b8a2e7ab27169fa1fe10 | [
"MIT"
] | 2 | 2016-08-15T05:33:42.000Z | 2019-09-29T21:09:47.000Z | sim_scripts/SaveImage.py | avbotz/eva-public | 29996eba2c1c7df35025b8a2e7ab27169fa1fe10 | [
"MIT"
] | null | null | null | sim_scripts/SaveImage.py | avbotz/eva-public | 29996eba2c1c7df35025b8a2e7ab27169fa1fe10 | [
"MIT"
] | null | null | null | ## SaveImage.py
## Description: Saves Camera Images periodically (depending on the value set in the Logic Editor) for EVA.
## If SleepState is true, image saving is paused.
from bge import render
from bge import logic
controller = logic.getCurrentController()
object = controller.owner
path = logic.expandPath('//')
if(object['SleepState'] == 0):
render.setWindowSize(object['WindowWidth'], object['WindowHeight']*2)
render.makeScreenshot(path + 'Camera.png') | 34.5 | 106 | 0.726708 | render.setWindowSize(object['WindowWidth'], object['WindowHeight']*2)
render.makeScreenshot(path + 'Camera.png') | true | true |
f7f72ac6d4026f944a7a5728b4f9e1a4fa9a9425 | 547 | py | Python | backend/extensions.py | hpf0532/corona | 020f7766dbac808961cc8f7ebb921aac6825464f | [
"MIT"
] | 6 | 2020-03-09T13:05:04.000Z | 2021-12-14T08:57:22.000Z | backend/extensions.py | hpf0532/corona | 020f7766dbac808961cc8f7ebb921aac6825464f | [
"MIT"
] | 7 | 2021-04-07T20:53:53.000Z | 2022-03-12T00:23:06.000Z | backend/extensions.py | hpf0532/corona | 020f7766dbac808961cc8f7ebb921aac6825464f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Author: hpf
# Date: 2020/3/1 上午10:44
# File: extensions.py
# IDE: PyCharm
import redis
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_mail import Mail
from flask_avatars import Avatars
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from backend.settings import POOL
db = SQLAlchemy()
migrate = Migrate()
mail = Mail()
avatars = Avatars()
limiter = Limiter(key_func=get_remote_address)
# redis连接
redis_conn = redis.Redis(connection_pool=POOL)
| 21.88 | 49 | 0.786106 |
import redis
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_mail import Mail
from flask_avatars import Avatars
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from backend.settings import POOL
db = SQLAlchemy()
migrate = Migrate()
mail = Mail()
avatars = Avatars()
limiter = Limiter(key_func=get_remote_address)
redis_conn = redis.Redis(connection_pool=POOL)
| true | true |
f7f72b0250b2e7a7cd533ba1ac086659508049b0 | 595 | py | Python | src/Services/Event/UpcomingRaw.py | MetuMobile/server-side | fcbe59833c308b16e5d43c8aab1e78550b71040d | [
"MIT"
] | null | null | null | src/Services/Event/UpcomingRaw.py | MetuMobile/server-side | fcbe59833c308b16e5d43c8aab1e78550b71040d | [
"MIT"
] | 7 | 2017-06-07T13:19:27.000Z | 2017-06-07T14:22:15.000Z | src/Services/Event/UpcomingRaw.py | MetuMobile/server-side | fcbe59833c308b16e5d43c8aab1e78550b71040d | [
"MIT"
] | null | null | null | from flask.views import MethodView
from flask import jsonify
from Services.Event.MetuEventsDb import MetuEventsDb
class UpcomingRaw(MethodView):
def get(self):
return jsonify(RawUpcomingEvents=self.getRawUpcomingEvents())
def getRawUpcomingEvents(self):
db = MetuEventsDb()
events = db.fetchAllRaw()
db.connection.close()
return events
def _isLongerThan6Months(self, event):
firstDate = event['ilk_tarih']
lastDate = event['son_tarih']
delta = lastDate - firstDate
return True if delta.days>180 else False
| 27.045455 | 69 | 0.689076 | from flask.views import MethodView
from flask import jsonify
from Services.Event.MetuEventsDb import MetuEventsDb
class UpcomingRaw(MethodView):
def get(self):
return jsonify(RawUpcomingEvents=self.getRawUpcomingEvents())
def getRawUpcomingEvents(self):
db = MetuEventsDb()
events = db.fetchAllRaw()
db.connection.close()
return events
def _isLongerThan6Months(self, event):
firstDate = event['ilk_tarih']
lastDate = event['son_tarih']
delta = lastDate - firstDate
return True if delta.days>180 else False
| true | true |
f7f72b21fb947df53e998432e3045f57df11c0bc | 71,664 | bzl | Python | dotnet/private/deps/nuget.bzl | lb5tr/rules_dotnet | 43479c10ef07156a29a4265caa72db33279219db | [
"Apache-2.0"
] | null | null | null | dotnet/private/deps/nuget.bzl | lb5tr/rules_dotnet | 43479c10ef07156a29a4265caa72db33279219db | [
"Apache-2.0"
] | null | null | null | dotnet/private/deps/nuget.bzl | lb5tr/rules_dotnet | 43479c10ef07156a29a4265caa72db33279219db | [
"Apache-2.0"
] | null | null | null | load("@io_bazel_rules_dotnet//dotnet/private:rules/nuget.bzl", "nuget_package")
def dotnet_repositories_nuget():
### Generated by the tool
nuget_package(
name = "microsoft.netcore.platforms",
package = "microsoft.netcore.platforms",
version = "1.1.0",
sha256 = "15e338d24b5c39b4099389cc612841eb51ff13c07bb4829f97d39b27420e7023",
)
nuget_package(
name = "newtonsoft.json",
package = "newtonsoft.json",
version = "9.0.1",
sha256 = "998081ae052120917346e2cb57d488888147a2fcdf47c52ea9f83a7b4f049e55",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.0/Newtonsoft.Json.dll",
"netcoreapp2.1": "lib/netstandard1.0/Newtonsoft.Json.dll",
},
net_lib = {
"net45": "lib/net45/Newtonsoft.Json.dll",
"net451": "lib/net45/Newtonsoft.Json.dll",
"net452": "lib/net45/Newtonsoft.Json.dll",
"net46": "lib/net45/Newtonsoft.Json.dll",
"net461": "lib/net45/Newtonsoft.Json.dll",
"net462": "lib/net45/Newtonsoft.Json.dll",
"net47": "lib/net45/Newtonsoft.Json.dll",
"net471": "lib/net45/Newtonsoft.Json.dll",
"net472": "lib/net45/Newtonsoft.Json.dll",
"netstandard1.0": "lib/netstandard1.0/Newtonsoft.Json.dll",
"netstandard1.1": "lib/netstandard1.0/Newtonsoft.Json.dll",
"netstandard1.2": "lib/netstandard1.0/Newtonsoft.Json.dll",
"netstandard1.3": "lib/netstandard1.0/Newtonsoft.Json.dll",
"netstandard1.4": "lib/netstandard1.0/Newtonsoft.Json.dll",
"netstandard1.5": "lib/netstandard1.0/Newtonsoft.Json.dll",
"netstandard1.6": "lib/netstandard1.0/Newtonsoft.Json.dll",
"netstandard2.0": "lib/netstandard1.0/Newtonsoft.Json.dll",
},
mono_lib = "lib/net45/Newtonsoft.Json.dll",
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netcoreapp2.1": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
},
net_files = {
"net45": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"net451": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"net452": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"net46": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"net461": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"net462": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"net47": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"net471": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"net472": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netstandard1.0": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netstandard1.1": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netstandard1.2": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netstandard1.3": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netstandard1.4": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netstandard1.5": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netstandard1.6": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netstandard2.0": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
},
mono_files = [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
)
nuget_package(
name = "nuget.frameworks",
package = "nuget.frameworks",
version = "4.9.2",
sha256 = "e461391653e748cfcb72f8dbf81ae0c7dba87ce3a66d8668a4be0245986ff6de",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Frameworks.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Frameworks.dll",
},
net_lib = {
"net45": "lib/net40/NuGet.Frameworks.dll",
"net451": "lib/net40/NuGet.Frameworks.dll",
"net452": "lib/net40/NuGet.Frameworks.dll",
"net46": "lib/net46/NuGet.Frameworks.dll",
"net461": "lib/net46/NuGet.Frameworks.dll",
"net462": "lib/net46/NuGet.Frameworks.dll",
"net47": "lib/net46/NuGet.Frameworks.dll",
"net471": "lib/net46/NuGet.Frameworks.dll",
"net472": "lib/net46/NuGet.Frameworks.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Frameworks.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Frameworks.dll",
},
mono_lib = "lib/net46/NuGet.Frameworks.dll",
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Frameworks.dll",
"lib/netstandard1.6/NuGet.Frameworks.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Frameworks.dll",
"lib/netstandard1.6/NuGet.Frameworks.xml",
],
},
net_files = {
"net45": [
"lib/net40/NuGet.Frameworks.dll",
"lib/net40/NuGet.Frameworks.xml",
],
"net451": [
"lib/net40/NuGet.Frameworks.dll",
"lib/net40/NuGet.Frameworks.xml",
],
"net452": [
"lib/net40/NuGet.Frameworks.dll",
"lib/net40/NuGet.Frameworks.xml",
],
"net46": [
"lib/net46/NuGet.Frameworks.dll",
"lib/net46/NuGet.Frameworks.xml",
],
"net461": [
"lib/net46/NuGet.Frameworks.dll",
"lib/net46/NuGet.Frameworks.xml",
],
"net462": [
"lib/net46/NuGet.Frameworks.dll",
"lib/net46/NuGet.Frameworks.xml",
],
"net47": [
"lib/net46/NuGet.Frameworks.dll",
"lib/net46/NuGet.Frameworks.xml",
],
"net471": [
"lib/net46/NuGet.Frameworks.dll",
"lib/net46/NuGet.Frameworks.xml",
],
"net472": [
"lib/net46/NuGet.Frameworks.dll",
"lib/net46/NuGet.Frameworks.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Frameworks.dll",
"lib/netstandard1.6/NuGet.Frameworks.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Frameworks.dll",
"lib/netstandard1.6/NuGet.Frameworks.xml",
],
},
mono_files = [
"lib/net46/NuGet.Frameworks.dll",
"lib/net46/NuGet.Frameworks.xml",
],
)
nuget_package(
name = "nuget.common",
package = "nuget.common",
version = "4.9.2",
sha256 = "001e8aaae2c07f4914735895b5106ebedc372b2b70dd9fa583f4ee59e943bae8",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Common.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Common.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Common.dll",
"net461": "lib/net46/NuGet.Common.dll",
"net462": "lib/net46/NuGet.Common.dll",
"net47": "lib/net46/NuGet.Common.dll",
"net471": "lib/net46/NuGet.Common.dll",
"net472": "lib/net46/NuGet.Common.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Common.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Common.dll",
},
mono_lib = "lib/net46/NuGet.Common.dll",
core_deps = {
"net46": [
"@nuget.frameworks//:net46_net",
],
"net461": [
"@nuget.frameworks//:net461_net",
],
"net462": [
"@nuget.frameworks//:net462_net",
],
"net47": [
"@nuget.frameworks//:net47_net",
],
"net471": [
"@nuget.frameworks//:net471_net",
],
"net472": [
"@nuget.frameworks//:net472_net",
],
"netstandard1.6": [
"@nuget.frameworks//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.frameworks//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.frameworks//:net46_net",
],
"net461": [
"@nuget.frameworks//:net461_net",
],
"net462": [
"@nuget.frameworks//:net462_net",
],
"net47": [
"@nuget.frameworks//:net47_net",
],
"net471": [
"@nuget.frameworks//:net471_net",
],
"net472": [
"@nuget.frameworks//:net472_net",
],
"netstandard1.6": [
"@nuget.frameworks//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.frameworks//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.frameworks//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Common.dll",
"lib/netstandard1.6/NuGet.Common.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Common.dll",
"lib/netstandard1.6/NuGet.Common.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Common.dll",
"lib/net46/NuGet.Common.xml",
],
"net461": [
"lib/net46/NuGet.Common.dll",
"lib/net46/NuGet.Common.xml",
],
"net462": [
"lib/net46/NuGet.Common.dll",
"lib/net46/NuGet.Common.xml",
],
"net47": [
"lib/net46/NuGet.Common.dll",
"lib/net46/NuGet.Common.xml",
],
"net471": [
"lib/net46/NuGet.Common.dll",
"lib/net46/NuGet.Common.xml",
],
"net472": [
"lib/net46/NuGet.Common.dll",
"lib/net46/NuGet.Common.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Common.dll",
"lib/netstandard1.6/NuGet.Common.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Common.dll",
"lib/netstandard1.6/NuGet.Common.xml",
],
},
mono_files = [
"lib/net46/NuGet.Common.dll",
"lib/net46/NuGet.Common.xml",
],
)
nuget_package(
name = "nuget.configuration",
package = "nuget.configuration",
version = "4.9.2",
sha256 = "103fd9eab88ececa0190b041c3ae6eaf470929280a35c7f444ffd6ad6b0328b6",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Configuration.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Configuration.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Configuration.dll",
"net461": "lib/net46/NuGet.Configuration.dll",
"net462": "lib/net46/NuGet.Configuration.dll",
"net47": "lib/net46/NuGet.Configuration.dll",
"net471": "lib/net46/NuGet.Configuration.dll",
"net472": "lib/net46/NuGet.Configuration.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Configuration.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Configuration.dll",
},
mono_lib = "lib/net46/NuGet.Configuration.dll",
core_deps = {
"net46": [
"@nuget.common//:net46_net",
],
"net461": [
"@nuget.common//:net461_net",
],
"net462": [
"@nuget.common//:net462_net",
],
"net47": [
"@nuget.common//:net47_net",
],
"net471": [
"@nuget.common//:net471_net",
],
"net472": [
"@nuget.common//:net472_net",
],
"netstandard1.6": [
"@nuget.common//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.common//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.common//:net46_net",
],
"net461": [
"@nuget.common//:net461_net",
],
"net462": [
"@nuget.common//:net462_net",
],
"net47": [
"@nuget.common//:net47_net",
],
"net471": [
"@nuget.common//:net471_net",
],
"net472": [
"@nuget.common//:net472_net",
],
"netstandard1.6": [
"@nuget.common//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.common//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.common//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Configuration.dll",
"lib/netstandard1.6/NuGet.Configuration.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Configuration.dll",
"lib/netstandard1.6/NuGet.Configuration.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Configuration.dll",
"lib/net46/NuGet.Configuration.xml",
],
"net461": [
"lib/net46/NuGet.Configuration.dll",
"lib/net46/NuGet.Configuration.xml",
],
"net462": [
"lib/net46/NuGet.Configuration.dll",
"lib/net46/NuGet.Configuration.xml",
],
"net47": [
"lib/net46/NuGet.Configuration.dll",
"lib/net46/NuGet.Configuration.xml",
],
"net471": [
"lib/net46/NuGet.Configuration.dll",
"lib/net46/NuGet.Configuration.xml",
],
"net472": [
"lib/net46/NuGet.Configuration.dll",
"lib/net46/NuGet.Configuration.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Configuration.dll",
"lib/netstandard1.6/NuGet.Configuration.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Configuration.dll",
"lib/netstandard1.6/NuGet.Configuration.xml",
],
},
mono_files = [
"lib/net46/NuGet.Configuration.dll",
"lib/net46/NuGet.Configuration.xml",
],
)
nuget_package(
name = "nuget.versioning",
package = "nuget.versioning",
version = "4.9.2",
sha256 = "10bae2c865a86ba5a955e32b886f7626f1a0af1959ff9d9d9ff010d5049c7cc8",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Versioning.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Versioning.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Versioning.dll",
"net461": "lib/net46/NuGet.Versioning.dll",
"net462": "lib/net46/NuGet.Versioning.dll",
"net47": "lib/net46/NuGet.Versioning.dll",
"net471": "lib/net46/NuGet.Versioning.dll",
"net472": "lib/net46/NuGet.Versioning.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Versioning.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Versioning.dll",
},
mono_lib = "lib/net46/NuGet.Versioning.dll",
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Versioning.dll",
"lib/netstandard1.6/NuGet.Versioning.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Versioning.dll",
"lib/netstandard1.6/NuGet.Versioning.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Versioning.dll",
"lib/net46/NuGet.Versioning.xml",
],
"net461": [
"lib/net46/NuGet.Versioning.dll",
"lib/net46/NuGet.Versioning.xml",
],
"net462": [
"lib/net46/NuGet.Versioning.dll",
"lib/net46/NuGet.Versioning.xml",
],
"net47": [
"lib/net46/NuGet.Versioning.dll",
"lib/net46/NuGet.Versioning.xml",
],
"net471": [
"lib/net46/NuGet.Versioning.dll",
"lib/net46/NuGet.Versioning.xml",
],
"net472": [
"lib/net46/NuGet.Versioning.dll",
"lib/net46/NuGet.Versioning.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Versioning.dll",
"lib/netstandard1.6/NuGet.Versioning.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Versioning.dll",
"lib/netstandard1.6/NuGet.Versioning.xml",
],
},
mono_files = [
"lib/net46/NuGet.Versioning.dll",
"lib/net46/NuGet.Versioning.xml",
],
)
nuget_package(
name = "nuget.librarymodel",
package = "nuget.librarymodel",
version = "4.9.2",
sha256 = "da08ab3101ab4adc527947abb9739ad665b37084b73a2165a184cf71be0a64e0",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.LibraryModel.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.LibraryModel.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.LibraryModel.dll",
"net461": "lib/net46/NuGet.LibraryModel.dll",
"net462": "lib/net46/NuGet.LibraryModel.dll",
"net47": "lib/net46/NuGet.LibraryModel.dll",
"net471": "lib/net46/NuGet.LibraryModel.dll",
"net472": "lib/net46/NuGet.LibraryModel.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.LibraryModel.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.LibraryModel.dll",
},
mono_lib = "lib/net46/NuGet.LibraryModel.dll",
core_deps = {
"net46": [
"@nuget.common//:net46_net",
"@nuget.versioning//:net46_net",
],
"net461": [
"@nuget.common//:net461_net",
"@nuget.versioning//:net461_net",
],
"net462": [
"@nuget.common//:net462_net",
"@nuget.versioning//:net462_net",
],
"net47": [
"@nuget.common//:net47_net",
"@nuget.versioning//:net47_net",
],
"net471": [
"@nuget.common//:net471_net",
"@nuget.versioning//:net471_net",
],
"net472": [
"@nuget.common//:net472_net",
"@nuget.versioning//:net472_net",
],
"netstandard1.6": [
"@nuget.common//:netstandard1.6_net",
"@nuget.versioning//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.common//:netstandard2.0_net",
"@nuget.versioning//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.common//:net46_net",
"@nuget.versioning//:net46_net",
],
"net461": [
"@nuget.common//:net461_net",
"@nuget.versioning//:net461_net",
],
"net462": [
"@nuget.common//:net462_net",
"@nuget.versioning//:net462_net",
],
"net47": [
"@nuget.common//:net47_net",
"@nuget.versioning//:net47_net",
],
"net471": [
"@nuget.common//:net471_net",
"@nuget.versioning//:net471_net",
],
"net472": [
"@nuget.common//:net472_net",
"@nuget.versioning//:net472_net",
],
"netstandard1.6": [
"@nuget.common//:netstandard1.6_net",
"@nuget.versioning//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.common//:netstandard2.0_net",
"@nuget.versioning//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.common//:mono",
"@nuget.versioning//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.LibraryModel.dll",
"lib/netstandard1.6/NuGet.LibraryModel.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.LibraryModel.dll",
"lib/netstandard1.6/NuGet.LibraryModel.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.LibraryModel.dll",
"lib/net46/NuGet.LibraryModel.xml",
],
"net461": [
"lib/net46/NuGet.LibraryModel.dll",
"lib/net46/NuGet.LibraryModel.xml",
],
"net462": [
"lib/net46/NuGet.LibraryModel.dll",
"lib/net46/NuGet.LibraryModel.xml",
],
"net47": [
"lib/net46/NuGet.LibraryModel.dll",
"lib/net46/NuGet.LibraryModel.xml",
],
"net471": [
"lib/net46/NuGet.LibraryModel.dll",
"lib/net46/NuGet.LibraryModel.xml",
],
"net472": [
"lib/net46/NuGet.LibraryModel.dll",
"lib/net46/NuGet.LibraryModel.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.LibraryModel.dll",
"lib/netstandard1.6/NuGet.LibraryModel.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.LibraryModel.dll",
"lib/netstandard1.6/NuGet.LibraryModel.xml",
],
},
mono_files = [
"lib/net46/NuGet.LibraryModel.dll",
"lib/net46/NuGet.LibraryModel.xml",
],
)
nuget_package(
name = "nuget.packaging.core",
package = "nuget.packaging.core",
version = "4.9.2",
sha256 = "7e40e1692af1cf7f7d18ea52b4f396831e0e02256a95b2831233cbe3b2f8b4f3",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Packaging.Core.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Packaging.Core.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Packaging.Core.dll",
"net461": "lib/net46/NuGet.Packaging.Core.dll",
"net462": "lib/net46/NuGet.Packaging.Core.dll",
"net47": "lib/net46/NuGet.Packaging.Core.dll",
"net471": "lib/net46/NuGet.Packaging.Core.dll",
"net472": "lib/net46/NuGet.Packaging.Core.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Packaging.Core.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Packaging.Core.dll",
},
mono_lib = "lib/net46/NuGet.Packaging.Core.dll",
core_deps = {
"net46": [
"@nuget.common//:net46_net",
"@nuget.versioning//:net46_net",
],
"net461": [
"@nuget.common//:net461_net",
"@nuget.versioning//:net461_net",
],
"net462": [
"@nuget.common//:net462_net",
"@nuget.versioning//:net462_net",
],
"net47": [
"@nuget.common//:net47_net",
"@nuget.versioning//:net47_net",
],
"net471": [
"@nuget.common//:net471_net",
"@nuget.versioning//:net471_net",
],
"net472": [
"@nuget.common//:net472_net",
"@nuget.versioning//:net472_net",
],
"netstandard1.6": [
"@nuget.common//:netstandard1.6_net",
"@nuget.versioning//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.common//:netstandard2.0_net",
"@nuget.versioning//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.common//:net46_net",
"@nuget.versioning//:net46_net",
],
"net461": [
"@nuget.common//:net461_net",
"@nuget.versioning//:net461_net",
],
"net462": [
"@nuget.common//:net462_net",
"@nuget.versioning//:net462_net",
],
"net47": [
"@nuget.common//:net47_net",
"@nuget.versioning//:net47_net",
],
"net471": [
"@nuget.common//:net471_net",
"@nuget.versioning//:net471_net",
],
"net472": [
"@nuget.common//:net472_net",
"@nuget.versioning//:net472_net",
],
"netstandard1.6": [
"@nuget.common//:netstandard1.6_net",
"@nuget.versioning//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.common//:netstandard2.0_net",
"@nuget.versioning//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.common//:mono",
"@nuget.versioning//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Packaging.Core.dll",
"lib/netstandard1.6/NuGet.Packaging.Core.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Packaging.Core.dll",
"lib/netstandard1.6/NuGet.Packaging.Core.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Packaging.Core.dll",
"lib/net46/NuGet.Packaging.Core.xml",
],
"net461": [
"lib/net46/NuGet.Packaging.Core.dll",
"lib/net46/NuGet.Packaging.Core.xml",
],
"net462": [
"lib/net46/NuGet.Packaging.Core.dll",
"lib/net46/NuGet.Packaging.Core.xml",
],
"net47": [
"lib/net46/NuGet.Packaging.Core.dll",
"lib/net46/NuGet.Packaging.Core.xml",
],
"net471": [
"lib/net46/NuGet.Packaging.Core.dll",
"lib/net46/NuGet.Packaging.Core.xml",
],
"net472": [
"lib/net46/NuGet.Packaging.Core.dll",
"lib/net46/NuGet.Packaging.Core.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Packaging.Core.dll",
"lib/netstandard1.6/NuGet.Packaging.Core.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Packaging.Core.dll",
"lib/netstandard1.6/NuGet.Packaging.Core.xml",
],
},
mono_files = [
"lib/net46/NuGet.Packaging.Core.dll",
"lib/net46/NuGet.Packaging.Core.xml",
],
)
nuget_package(
name = "nuget.packaging",
package = "nuget.packaging",
version = "4.9.2",
sha256 = "a096e81f1a769fc0e6173cd2673e0be47974d42e6365ca513b0639e6be047ed1",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Packaging.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Packaging.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Packaging.dll",
"net461": "lib/net46/NuGet.Packaging.dll",
"net462": "lib/net46/NuGet.Packaging.dll",
"net47": "lib/net46/NuGet.Packaging.dll",
"net471": "lib/net46/NuGet.Packaging.dll",
"net472": "lib/net46/NuGet.Packaging.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Packaging.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Packaging.dll",
},
mono_lib = "lib/net46/NuGet.Packaging.dll",
core_deps = {
"net46": [
"@nuget.configuration//:net46_net",
"@nuget.packaging.core//:net46_net",
"@newtonsoft.json//:net46_net",
],
"net461": [
"@nuget.configuration//:net461_net",
"@nuget.packaging.core//:net461_net",
"@newtonsoft.json//:net461_net",
],
"net462": [
"@nuget.configuration//:net462_net",
"@nuget.packaging.core//:net462_net",
"@newtonsoft.json//:net462_net",
],
"net47": [
"@nuget.configuration//:net47_net",
"@nuget.packaging.core//:net47_net",
"@newtonsoft.json//:net47_net",
],
"net471": [
"@nuget.configuration//:net471_net",
"@nuget.packaging.core//:net471_net",
"@newtonsoft.json//:net471_net",
],
"net472": [
"@nuget.configuration//:net472_net",
"@nuget.packaging.core//:net472_net",
"@newtonsoft.json//:net472_net",
],
"netstandard1.6": [
"@nuget.configuration//:netstandard1.6_net",
"@nuget.packaging.core//:netstandard1.6_net",
"@newtonsoft.json//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.configuration//:netstandard2.0_net",
"@nuget.packaging.core//:netstandard2.0_net",
"@newtonsoft.json//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.configuration//:net46_net",
"@nuget.packaging.core//:net46_net",
"@newtonsoft.json//:net46_net",
],
"net461": [
"@nuget.configuration//:net461_net",
"@nuget.packaging.core//:net461_net",
"@newtonsoft.json//:net461_net",
],
"net462": [
"@nuget.configuration//:net462_net",
"@nuget.packaging.core//:net462_net",
"@newtonsoft.json//:net462_net",
],
"net47": [
"@nuget.configuration//:net47_net",
"@nuget.packaging.core//:net47_net",
"@newtonsoft.json//:net47_net",
],
"net471": [
"@nuget.configuration//:net471_net",
"@nuget.packaging.core//:net471_net",
"@newtonsoft.json//:net471_net",
],
"net472": [
"@nuget.configuration//:net472_net",
"@nuget.packaging.core//:net472_net",
"@newtonsoft.json//:net472_net",
],
"netstandard1.6": [
"@nuget.configuration//:netstandard1.6_net",
"@nuget.packaging.core//:netstandard1.6_net",
"@newtonsoft.json//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.configuration//:netstandard2.0_net",
"@nuget.packaging.core//:netstandard2.0_net",
"@newtonsoft.json//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.configuration//:mono",
"@nuget.packaging.core//:mono",
"@newtonsoft.json//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Packaging.dll",
"lib/netstandard1.6/NuGet.Packaging.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Packaging.dll",
"lib/netstandard1.6/NuGet.Packaging.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Packaging.dll",
"lib/net46/NuGet.Packaging.xml",
],
"net461": [
"lib/net46/NuGet.Packaging.dll",
"lib/net46/NuGet.Packaging.xml",
],
"net462": [
"lib/net46/NuGet.Packaging.dll",
"lib/net46/NuGet.Packaging.xml",
],
"net47": [
"lib/net46/NuGet.Packaging.dll",
"lib/net46/NuGet.Packaging.xml",
],
"net471": [
"lib/net46/NuGet.Packaging.dll",
"lib/net46/NuGet.Packaging.xml",
],
"net472": [
"lib/net46/NuGet.Packaging.dll",
"lib/net46/NuGet.Packaging.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Packaging.dll",
"lib/netstandard1.6/NuGet.Packaging.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Packaging.dll",
"lib/netstandard1.6/NuGet.Packaging.xml",
],
},
mono_files = [
"lib/net46/NuGet.Packaging.dll",
"lib/net46/NuGet.Packaging.xml",
],
)
nuget_package(
name = "nuget.protocol",
package = "nuget.protocol",
version = "4.9.2",
sha256 = "f413d9ad117c08e0805bd78d48473113062e0424cf389f8d23b835bb081937fe",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Protocol.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Protocol.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Protocol.dll",
"net461": "lib/net46/NuGet.Protocol.dll",
"net462": "lib/net46/NuGet.Protocol.dll",
"net47": "lib/net46/NuGet.Protocol.dll",
"net471": "lib/net46/NuGet.Protocol.dll",
"net472": "lib/net46/NuGet.Protocol.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Protocol.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Protocol.dll",
},
mono_lib = "lib/net46/NuGet.Protocol.dll",
core_deps = {
"net46": [
"@nuget.configuration//:net46_net",
"@nuget.packaging//:net46_net",
],
"net461": [
"@nuget.configuration//:net461_net",
"@nuget.packaging//:net461_net",
],
"net462": [
"@nuget.configuration//:net462_net",
"@nuget.packaging//:net462_net",
],
"net47": [
"@nuget.configuration//:net47_net",
"@nuget.packaging//:net47_net",
],
"net471": [
"@nuget.configuration//:net471_net",
"@nuget.packaging//:net471_net",
],
"net472": [
"@nuget.configuration//:net472_net",
"@nuget.packaging//:net472_net",
],
"netstandard1.6": [
"@nuget.configuration//:netstandard1.6_net",
"@nuget.packaging//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.configuration//:netstandard2.0_net",
"@nuget.packaging//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.configuration//:net46_net",
"@nuget.packaging//:net46_net",
],
"net461": [
"@nuget.configuration//:net461_net",
"@nuget.packaging//:net461_net",
],
"net462": [
"@nuget.configuration//:net462_net",
"@nuget.packaging//:net462_net",
],
"net47": [
"@nuget.configuration//:net47_net",
"@nuget.packaging//:net47_net",
],
"net471": [
"@nuget.configuration//:net471_net",
"@nuget.packaging//:net471_net",
],
"net472": [
"@nuget.configuration//:net472_net",
"@nuget.packaging//:net472_net",
],
"netstandard1.6": [
"@nuget.configuration//:netstandard1.6_net",
"@nuget.packaging//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.configuration//:netstandard2.0_net",
"@nuget.packaging//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.configuration//:mono",
"@nuget.packaging//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Protocol.dll",
"lib/netstandard1.6/NuGet.Protocol.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Protocol.dll",
"lib/netstandard1.6/NuGet.Protocol.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Protocol.dll",
"lib/net46/NuGet.Protocol.xml",
],
"net461": [
"lib/net46/NuGet.Protocol.dll",
"lib/net46/NuGet.Protocol.xml",
],
"net462": [
"lib/net46/NuGet.Protocol.dll",
"lib/net46/NuGet.Protocol.xml",
],
"net47": [
"lib/net46/NuGet.Protocol.dll",
"lib/net46/NuGet.Protocol.xml",
],
"net471": [
"lib/net46/NuGet.Protocol.dll",
"lib/net46/NuGet.Protocol.xml",
],
"net472": [
"lib/net46/NuGet.Protocol.dll",
"lib/net46/NuGet.Protocol.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Protocol.dll",
"lib/netstandard1.6/NuGet.Protocol.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Protocol.dll",
"lib/netstandard1.6/NuGet.Protocol.xml",
],
},
mono_files = [
"lib/net46/NuGet.Protocol.dll",
"lib/net46/NuGet.Protocol.xml",
],
)
nuget_package(
name = "nuget.credentials",
package = "nuget.credentials",
version = "4.9.2",
sha256 = "58a309f7bdd96871d15eb19952099b4d6bda7f3f9f8644969a55f7fe4b92ae33",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Credentials.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Credentials.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Credentials.dll",
"net461": "lib/net46/NuGet.Credentials.dll",
"net462": "lib/net46/NuGet.Credentials.dll",
"net47": "lib/net46/NuGet.Credentials.dll",
"net471": "lib/net46/NuGet.Credentials.dll",
"net472": "lib/net46/NuGet.Credentials.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Credentials.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Credentials.dll",
},
mono_lib = "lib/net46/NuGet.Credentials.dll",
core_deps = {
"net46": [
"@nuget.protocol//:net46_net",
],
"net461": [
"@nuget.protocol//:net461_net",
],
"net462": [
"@nuget.protocol//:net462_net",
],
"net47": [
"@nuget.protocol//:net47_net",
],
"net471": [
"@nuget.protocol//:net471_net",
],
"net472": [
"@nuget.protocol//:net472_net",
],
"netstandard1.6": [
"@nuget.protocol//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.protocol//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.protocol//:net46_net",
],
"net461": [
"@nuget.protocol//:net461_net",
],
"net462": [
"@nuget.protocol//:net462_net",
],
"net47": [
"@nuget.protocol//:net47_net",
],
"net471": [
"@nuget.protocol//:net471_net",
],
"net472": [
"@nuget.protocol//:net472_net",
],
"netstandard1.6": [
"@nuget.protocol//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.protocol//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.protocol//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Credentials.dll",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Credentials.dll",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Credentials.dll",
],
"net461": [
"lib/net46/NuGet.Credentials.dll",
],
"net462": [
"lib/net46/NuGet.Credentials.dll",
],
"net47": [
"lib/net46/NuGet.Credentials.dll",
],
"net471": [
"lib/net46/NuGet.Credentials.dll",
],
"net472": [
"lib/net46/NuGet.Credentials.dll",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Credentials.dll",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Credentials.dll",
],
},
mono_files = [
"lib/net46/NuGet.Credentials.dll",
],
)
nuget_package(
name = "nuget.dependencyresolver.core",
package = "nuget.dependencyresolver.core",
version = "4.9.2",
sha256 = "42b917c6d65394cdc57175b8cea9035b3ecd8d8efc321ba0c6911ee5ebc42b7b",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.DependencyResolver.Core.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.DependencyResolver.Core.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.DependencyResolver.Core.dll",
"net461": "lib/net46/NuGet.DependencyResolver.Core.dll",
"net462": "lib/net46/NuGet.DependencyResolver.Core.dll",
"net47": "lib/net46/NuGet.DependencyResolver.Core.dll",
"net471": "lib/net46/NuGet.DependencyResolver.Core.dll",
"net472": "lib/net46/NuGet.DependencyResolver.Core.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.DependencyResolver.Core.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.DependencyResolver.Core.dll",
},
mono_lib = "lib/net46/NuGet.DependencyResolver.Core.dll",
core_deps = {
"net46": [
"@nuget.librarymodel//:net46_net",
"@nuget.protocol//:net46_net",
],
"net461": [
"@nuget.librarymodel//:net461_net",
"@nuget.protocol//:net461_net",
],
"net462": [
"@nuget.librarymodel//:net462_net",
"@nuget.protocol//:net462_net",
],
"net47": [
"@nuget.librarymodel//:net47_net",
"@nuget.protocol//:net47_net",
],
"net471": [
"@nuget.librarymodel//:net471_net",
"@nuget.protocol//:net471_net",
],
"net472": [
"@nuget.librarymodel//:net472_net",
"@nuget.protocol//:net472_net",
],
"netstandard1.6": [
"@nuget.librarymodel//:netstandard1.6_net",
"@nuget.protocol//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.librarymodel//:netstandard2.0_net",
"@nuget.protocol//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.librarymodel//:net46_net",
"@nuget.protocol//:net46_net",
],
"net461": [
"@nuget.librarymodel//:net461_net",
"@nuget.protocol//:net461_net",
],
"net462": [
"@nuget.librarymodel//:net462_net",
"@nuget.protocol//:net462_net",
],
"net47": [
"@nuget.librarymodel//:net47_net",
"@nuget.protocol//:net47_net",
],
"net471": [
"@nuget.librarymodel//:net471_net",
"@nuget.protocol//:net471_net",
],
"net472": [
"@nuget.librarymodel//:net472_net",
"@nuget.protocol//:net472_net",
],
"netstandard1.6": [
"@nuget.librarymodel//:netstandard1.6_net",
"@nuget.protocol//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.librarymodel//:netstandard2.0_net",
"@nuget.protocol//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.librarymodel//:mono",
"@nuget.protocol//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.DependencyResolver.Core.dll",
"lib/netstandard1.6/NuGet.DependencyResolver.Core.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.DependencyResolver.Core.dll",
"lib/netstandard1.6/NuGet.DependencyResolver.Core.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.DependencyResolver.Core.dll",
"lib/net46/NuGet.DependencyResolver.Core.xml",
],
"net461": [
"lib/net46/NuGet.DependencyResolver.Core.dll",
"lib/net46/NuGet.DependencyResolver.Core.xml",
],
"net462": [
"lib/net46/NuGet.DependencyResolver.Core.dll",
"lib/net46/NuGet.DependencyResolver.Core.xml",
],
"net47": [
"lib/net46/NuGet.DependencyResolver.Core.dll",
"lib/net46/NuGet.DependencyResolver.Core.xml",
],
"net471": [
"lib/net46/NuGet.DependencyResolver.Core.dll",
"lib/net46/NuGet.DependencyResolver.Core.xml",
],
"net472": [
"lib/net46/NuGet.DependencyResolver.Core.dll",
"lib/net46/NuGet.DependencyResolver.Core.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.DependencyResolver.Core.dll",
"lib/netstandard1.6/NuGet.DependencyResolver.Core.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.DependencyResolver.Core.dll",
"lib/netstandard1.6/NuGet.DependencyResolver.Core.xml",
],
},
mono_files = [
"lib/net46/NuGet.DependencyResolver.Core.dll",
"lib/net46/NuGet.DependencyResolver.Core.xml",
],
)
nuget_package(
name = "nuget.projectmodel",
package = "nuget.projectmodel",
version = "4.9.2",
sha256 = "b13a9db9d88b0b8d568c367dd837437793e8f57afb61beda4aa485457ddc23b4",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.ProjectModel.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.ProjectModel.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.ProjectModel.dll",
"net461": "lib/net46/NuGet.ProjectModel.dll",
"net462": "lib/net46/NuGet.ProjectModel.dll",
"net47": "lib/net46/NuGet.ProjectModel.dll",
"net471": "lib/net46/NuGet.ProjectModel.dll",
"net472": "lib/net46/NuGet.ProjectModel.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.ProjectModel.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.ProjectModel.dll",
},
mono_lib = "lib/net46/NuGet.ProjectModel.dll",
core_deps = {
"net46": [
"@nuget.dependencyresolver.core//:net46_net",
],
"net461": [
"@nuget.dependencyresolver.core//:net461_net",
],
"net462": [
"@nuget.dependencyresolver.core//:net462_net",
],
"net47": [
"@nuget.dependencyresolver.core//:net47_net",
],
"net471": [
"@nuget.dependencyresolver.core//:net471_net",
],
"net472": [
"@nuget.dependencyresolver.core//:net472_net",
],
"netstandard1.6": [
"@nuget.dependencyresolver.core//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.dependencyresolver.core//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.dependencyresolver.core//:net46_net",
],
"net461": [
"@nuget.dependencyresolver.core//:net461_net",
],
"net462": [
"@nuget.dependencyresolver.core//:net462_net",
],
"net47": [
"@nuget.dependencyresolver.core//:net47_net",
],
"net471": [
"@nuget.dependencyresolver.core//:net471_net",
],
"net472": [
"@nuget.dependencyresolver.core//:net472_net",
],
"netstandard1.6": [
"@nuget.dependencyresolver.core//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.dependencyresolver.core//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.dependencyresolver.core//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.ProjectModel.dll",
"lib/netstandard1.6/NuGet.ProjectModel.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.ProjectModel.dll",
"lib/netstandard1.6/NuGet.ProjectModel.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.ProjectModel.dll",
"lib/net46/NuGet.ProjectModel.xml",
],
"net461": [
"lib/net46/NuGet.ProjectModel.dll",
"lib/net46/NuGet.ProjectModel.xml",
],
"net462": [
"lib/net46/NuGet.ProjectModel.dll",
"lib/net46/NuGet.ProjectModel.xml",
],
"net47": [
"lib/net46/NuGet.ProjectModel.dll",
"lib/net46/NuGet.ProjectModel.xml",
],
"net471": [
"lib/net46/NuGet.ProjectModel.dll",
"lib/net46/NuGet.ProjectModel.xml",
],
"net472": [
"lib/net46/NuGet.ProjectModel.dll",
"lib/net46/NuGet.ProjectModel.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.ProjectModel.dll",
"lib/netstandard1.6/NuGet.ProjectModel.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.ProjectModel.dll",
"lib/netstandard1.6/NuGet.ProjectModel.xml",
],
},
mono_files = [
"lib/net46/NuGet.ProjectModel.dll",
"lib/net46/NuGet.ProjectModel.xml",
],
)
nuget_package(
name = "nuget.commands",
package = "nuget.commands",
version = "4.9.2",
sha256 = "50566e04602b90f0032fea33f3972a3dbbddc5fe799357e5a18e6a2f8583b26d",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Commands.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Commands.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Commands.dll",
"net461": "lib/net46/NuGet.Commands.dll",
"net462": "lib/net46/NuGet.Commands.dll",
"net47": "lib/net46/NuGet.Commands.dll",
"net471": "lib/net46/NuGet.Commands.dll",
"net472": "lib/net46/NuGet.Commands.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Commands.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Commands.dll",
},
mono_lib = "lib/net46/NuGet.Commands.dll",
core_deps = {
"net46": [
"@nuget.credentials//:net46_net",
"@nuget.projectmodel//:net46_net",
],
"net461": [
"@nuget.credentials//:net461_net",
"@nuget.projectmodel//:net461_net",
],
"net462": [
"@nuget.credentials//:net462_net",
"@nuget.projectmodel//:net462_net",
],
"net47": [
"@nuget.credentials//:net47_net",
"@nuget.projectmodel//:net47_net",
],
"net471": [
"@nuget.credentials//:net471_net",
"@nuget.projectmodel//:net471_net",
],
"net472": [
"@nuget.credentials//:net472_net",
"@nuget.projectmodel//:net472_net",
],
"netstandard1.6": [
"@nuget.credentials//:netstandard1.6_net",
"@nuget.projectmodel//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.credentials//:netstandard2.0_net",
"@nuget.projectmodel//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.credentials//:net46_net",
"@nuget.projectmodel//:net46_net",
],
"net461": [
"@nuget.credentials//:net461_net",
"@nuget.projectmodel//:net461_net",
],
"net462": [
"@nuget.credentials//:net462_net",
"@nuget.projectmodel//:net462_net",
],
"net47": [
"@nuget.credentials//:net47_net",
"@nuget.projectmodel//:net47_net",
],
"net471": [
"@nuget.credentials//:net471_net",
"@nuget.projectmodel//:net471_net",
],
"net472": [
"@nuget.credentials//:net472_net",
"@nuget.projectmodel//:net472_net",
],
"netstandard1.6": [
"@nuget.credentials//:netstandard1.6_net",
"@nuget.projectmodel//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.credentials//:netstandard2.0_net",
"@nuget.projectmodel//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.credentials//:mono",
"@nuget.projectmodel//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Commands.dll",
"lib/netstandard1.6/NuGet.Commands.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Commands.dll",
"lib/netstandard1.6/NuGet.Commands.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Commands.dll",
"lib/net46/NuGet.Commands.xml",
],
"net461": [
"lib/net46/NuGet.Commands.dll",
"lib/net46/NuGet.Commands.xml",
],
"net462": [
"lib/net46/NuGet.Commands.dll",
"lib/net46/NuGet.Commands.xml",
],
"net47": [
"lib/net46/NuGet.Commands.dll",
"lib/net46/NuGet.Commands.xml",
],
"net471": [
"lib/net46/NuGet.Commands.dll",
"lib/net46/NuGet.Commands.xml",
],
"net472": [
"lib/net46/NuGet.Commands.dll",
"lib/net46/NuGet.Commands.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Commands.dll",
"lib/netstandard1.6/NuGet.Commands.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Commands.dll",
"lib/netstandard1.6/NuGet.Commands.xml",
],
},
mono_files = [
"lib/net46/NuGet.Commands.dll",
"lib/net46/NuGet.Commands.xml",
],
)
nuget_package(
name = "nuget.resolver",
package = "nuget.resolver",
version = "4.9.2",
sha256 = "2ab469a5aaac1d5150fcdda976dec8f555fcb8baf61f191bc871b9390f9a30c2",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Resolver.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Resolver.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Resolver.dll",
"net461": "lib/net46/NuGet.Resolver.dll",
"net462": "lib/net46/NuGet.Resolver.dll",
"net47": "lib/net46/NuGet.Resolver.dll",
"net471": "lib/net46/NuGet.Resolver.dll",
"net472": "lib/net46/NuGet.Resolver.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Resolver.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Resolver.dll",
},
mono_lib = "lib/net46/NuGet.Resolver.dll",
core_deps = {
"net46": [
"@nuget.protocol//:net46_net",
],
"net461": [
"@nuget.protocol//:net461_net",
],
"net462": [
"@nuget.protocol//:net462_net",
],
"net47": [
"@nuget.protocol//:net47_net",
],
"net471": [
"@nuget.protocol//:net471_net",
],
"net472": [
"@nuget.protocol//:net472_net",
],
"netstandard1.6": [
"@nuget.protocol//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.protocol//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.protocol//:net46_net",
],
"net461": [
"@nuget.protocol//:net461_net",
],
"net462": [
"@nuget.protocol//:net462_net",
],
"net47": [
"@nuget.protocol//:net47_net",
],
"net471": [
"@nuget.protocol//:net471_net",
],
"net472": [
"@nuget.protocol//:net472_net",
],
"netstandard1.6": [
"@nuget.protocol//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.protocol//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.protocol//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Resolver.dll",
"lib/netstandard1.6/NuGet.Resolver.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Resolver.dll",
"lib/netstandard1.6/NuGet.Resolver.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Resolver.dll",
"lib/net46/NuGet.Resolver.xml",
],
"net461": [
"lib/net46/NuGet.Resolver.dll",
"lib/net46/NuGet.Resolver.xml",
],
"net462": [
"lib/net46/NuGet.Resolver.dll",
"lib/net46/NuGet.Resolver.xml",
],
"net47": [
"lib/net46/NuGet.Resolver.dll",
"lib/net46/NuGet.Resolver.xml",
],
"net471": [
"lib/net46/NuGet.Resolver.dll",
"lib/net46/NuGet.Resolver.xml",
],
"net472": [
"lib/net46/NuGet.Resolver.dll",
"lib/net46/NuGet.Resolver.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Resolver.dll",
"lib/netstandard1.6/NuGet.Resolver.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Resolver.dll",
"lib/netstandard1.6/NuGet.Resolver.xml",
],
},
mono_files = [
"lib/net46/NuGet.Resolver.dll",
"lib/net46/NuGet.Resolver.xml",
],
)
nuget_package(
name = "nuget.packagemanagement.netstandard",
package = "nuget.packagemanagement.netstandard",
version = "4.9.2",
sha256 = "c51c50b2da22e2f8649bbf8a151a04fdd119e631141bf8b88e4ebb4335f5ff7e",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.PackageManagement.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.PackageManagement.dll",
},
net_lib = {
"net461": "lib/netstandard1.6/NuGet.PackageManagement.dll",
"net462": "lib/netstandard1.6/NuGet.PackageManagement.dll",
"net47": "lib/netstandard1.6/NuGet.PackageManagement.dll",
"net471": "lib/netstandard1.6/NuGet.PackageManagement.dll",
"net472": "lib/netstandard1.6/NuGet.PackageManagement.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.PackageManagement.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.PackageManagement.dll",
},
mono_lib = "lib/netstandard1.6/NuGet.PackageManagement.dll",
core_deps = {
"net461": [
"@nuget.commands//:net461_net",
"@nuget.resolver//:net461_net",
],
"net462": [
"@nuget.commands//:net462_net",
"@nuget.resolver//:net462_net",
],
"net47": [
"@nuget.commands//:net47_net",
"@nuget.resolver//:net47_net",
],
"net471": [
"@nuget.commands//:net471_net",
"@nuget.resolver//:net471_net",
],
"net472": [
"@nuget.commands//:net472_net",
"@nuget.resolver//:net472_net",
],
"netstandard1.6": [
"@nuget.commands//:netstandard1.6_net",
"@nuget.resolver//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.commands//:netstandard2.0_net",
"@nuget.resolver//:netstandard2.0_net",
],
},
net_deps = {
"net461": [
"@nuget.commands//:net461_net",
"@nuget.resolver//:net461_net",
],
"net462": [
"@nuget.commands//:net462_net",
"@nuget.resolver//:net462_net",
],
"net47": [
"@nuget.commands//:net47_net",
"@nuget.resolver//:net47_net",
],
"net471": [
"@nuget.commands//:net471_net",
"@nuget.resolver//:net471_net",
],
"net472": [
"@nuget.commands//:net472_net",
"@nuget.resolver//:net472_net",
],
"netstandard1.6": [
"@nuget.commands//:netstandard1.6_net",
"@nuget.resolver//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.commands//:netstandard2.0_net",
"@nuget.resolver//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.commands//:mono",
"@nuget.resolver//:mono",
"@io_bazel_rules_dotnet//dotnet/stdlib:netstandard.library.dll",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
},
net_files = {
"net461": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
"net462": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
"net47": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
"net471": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
"net472": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
},
mono_files = [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
)
nuget_package(
name = "commandlineparser",
package = "commandlineparser",
version = "2.6.0",
sha256 = "76bdbc991c9f416cc1e775b17ce5d76cd0f851dcafaf096cbc433698b77f4dd5",
core_lib = {
"netcoreapp2.0": "lib/netstandard2.0/CommandLine.dll",
"netcoreapp2.1": "lib/netstandard2.0/CommandLine.dll",
},
net_lib = {
"net45": "lib/net45/CommandLine.dll",
"net451": "lib/net45/CommandLine.dll",
"net452": "lib/net45/CommandLine.dll",
"net46": "lib/net45/CommandLine.dll",
"net461": "lib/net461/CommandLine.dll",
"net462": "lib/net461/CommandLine.dll",
"net47": "lib/net461/CommandLine.dll",
"net471": "lib/net461/CommandLine.dll",
"net472": "lib/net461/CommandLine.dll",
"netstandard2.0": "lib/netstandard2.0/CommandLine.dll",
},
mono_lib = "lib/net461/CommandLine.dll",
core_files = {
"netcoreapp2.0": [
"lib/netstandard2.0/CommandLine.dll",
"lib/netstandard2.0/CommandLine.xml",
],
"netcoreapp2.1": [
"lib/netstandard2.0/CommandLine.dll",
"lib/netstandard2.0/CommandLine.xml",
],
},
net_files = {
"net45": [
"lib/net45/CommandLine.dll",
"lib/net45/CommandLine.xml",
],
"net451": [
"lib/net45/CommandLine.dll",
"lib/net45/CommandLine.xml",
],
"net452": [
"lib/net45/CommandLine.dll",
"lib/net45/CommandLine.xml",
],
"net46": [
"lib/net45/CommandLine.dll",
"lib/net45/CommandLine.xml",
],
"net461": [
"lib/net461/CommandLine.dll",
"lib/net461/CommandLine.xml",
],
"net462": [
"lib/net461/CommandLine.dll",
"lib/net461/CommandLine.xml",
],
"net47": [
"lib/net461/CommandLine.dll",
"lib/net461/CommandLine.xml",
],
"net471": [
"lib/net461/CommandLine.dll",
"lib/net461/CommandLine.xml",
],
"net472": [
"lib/net461/CommandLine.dll",
"lib/net461/CommandLine.xml",
],
"netstandard2.0": [
"lib/netstandard2.0/CommandLine.dll",
"lib/netstandard2.0/CommandLine.xml",
],
},
mono_files = [
"lib/net461/CommandLine.dll",
"lib/net461/CommandLine.xml",
],
)
### End of generated by the tool
| 36.193939 | 85 | 0.471059 | load("@io_bazel_rules_dotnet//dotnet/private:rules/nuget.bzl", "nuget_package")
def dotnet_repositories_nuget():
etcore.platforms",
package = "microsoft.netcore.platforms",
version = "1.1.0",
sha256 = "15e338d24b5c39b4099389cc612841eb51ff13c07bb4829f97d39b27420e7023",
)
nuget_package(
name = "newtonsoft.json",
package = "newtonsoft.json",
version = "9.0.1",
sha256 = "998081ae052120917346e2cb57d488888147a2fcdf47c52ea9f83a7b4f049e55",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.0/Newtonsoft.Json.dll",
"netcoreapp2.1": "lib/netstandard1.0/Newtonsoft.Json.dll",
},
net_lib = {
"net45": "lib/net45/Newtonsoft.Json.dll",
"net451": "lib/net45/Newtonsoft.Json.dll",
"net452": "lib/net45/Newtonsoft.Json.dll",
"net46": "lib/net45/Newtonsoft.Json.dll",
"net461": "lib/net45/Newtonsoft.Json.dll",
"net462": "lib/net45/Newtonsoft.Json.dll",
"net47": "lib/net45/Newtonsoft.Json.dll",
"net471": "lib/net45/Newtonsoft.Json.dll",
"net472": "lib/net45/Newtonsoft.Json.dll",
"netstandard1.0": "lib/netstandard1.0/Newtonsoft.Json.dll",
"netstandard1.1": "lib/netstandard1.0/Newtonsoft.Json.dll",
"netstandard1.2": "lib/netstandard1.0/Newtonsoft.Json.dll",
"netstandard1.3": "lib/netstandard1.0/Newtonsoft.Json.dll",
"netstandard1.4": "lib/netstandard1.0/Newtonsoft.Json.dll",
"netstandard1.5": "lib/netstandard1.0/Newtonsoft.Json.dll",
"netstandard1.6": "lib/netstandard1.0/Newtonsoft.Json.dll",
"netstandard2.0": "lib/netstandard1.0/Newtonsoft.Json.dll",
},
mono_lib = "lib/net45/Newtonsoft.Json.dll",
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netcoreapp2.1": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
},
net_files = {
"net45": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"net451": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"net452": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"net46": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"net461": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"net462": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"net47": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"net471": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"net472": [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netstandard1.0": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netstandard1.1": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netstandard1.2": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netstandard1.3": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netstandard1.4": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netstandard1.5": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netstandard1.6": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
"netstandard2.0": [
"lib/netstandard1.0/Newtonsoft.Json.dll",
"lib/netstandard1.0/Newtonsoft.Json.xml",
"tools/install.ps1",
],
},
mono_files = [
"lib/net45/Newtonsoft.Json.dll",
"lib/net45/Newtonsoft.Json.xml",
"tools/install.ps1",
],
)
nuget_package(
name = "nuget.frameworks",
package = "nuget.frameworks",
version = "4.9.2",
sha256 = "e461391653e748cfcb72f8dbf81ae0c7dba87ce3a66d8668a4be0245986ff6de",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Frameworks.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Frameworks.dll",
},
net_lib = {
"net45": "lib/net40/NuGet.Frameworks.dll",
"net451": "lib/net40/NuGet.Frameworks.dll",
"net452": "lib/net40/NuGet.Frameworks.dll",
"net46": "lib/net46/NuGet.Frameworks.dll",
"net461": "lib/net46/NuGet.Frameworks.dll",
"net462": "lib/net46/NuGet.Frameworks.dll",
"net47": "lib/net46/NuGet.Frameworks.dll",
"net471": "lib/net46/NuGet.Frameworks.dll",
"net472": "lib/net46/NuGet.Frameworks.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Frameworks.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Frameworks.dll",
},
mono_lib = "lib/net46/NuGet.Frameworks.dll",
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Frameworks.dll",
"lib/netstandard1.6/NuGet.Frameworks.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Frameworks.dll",
"lib/netstandard1.6/NuGet.Frameworks.xml",
],
},
net_files = {
"net45": [
"lib/net40/NuGet.Frameworks.dll",
"lib/net40/NuGet.Frameworks.xml",
],
"net451": [
"lib/net40/NuGet.Frameworks.dll",
"lib/net40/NuGet.Frameworks.xml",
],
"net452": [
"lib/net40/NuGet.Frameworks.dll",
"lib/net40/NuGet.Frameworks.xml",
],
"net46": [
"lib/net46/NuGet.Frameworks.dll",
"lib/net46/NuGet.Frameworks.xml",
],
"net461": [
"lib/net46/NuGet.Frameworks.dll",
"lib/net46/NuGet.Frameworks.xml",
],
"net462": [
"lib/net46/NuGet.Frameworks.dll",
"lib/net46/NuGet.Frameworks.xml",
],
"net47": [
"lib/net46/NuGet.Frameworks.dll",
"lib/net46/NuGet.Frameworks.xml",
],
"net471": [
"lib/net46/NuGet.Frameworks.dll",
"lib/net46/NuGet.Frameworks.xml",
],
"net472": [
"lib/net46/NuGet.Frameworks.dll",
"lib/net46/NuGet.Frameworks.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Frameworks.dll",
"lib/netstandard1.6/NuGet.Frameworks.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Frameworks.dll",
"lib/netstandard1.6/NuGet.Frameworks.xml",
],
},
mono_files = [
"lib/net46/NuGet.Frameworks.dll",
"lib/net46/NuGet.Frameworks.xml",
],
)
nuget_package(
name = "nuget.common",
package = "nuget.common",
version = "4.9.2",
sha256 = "001e8aaae2c07f4914735895b5106ebedc372b2b70dd9fa583f4ee59e943bae8",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Common.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Common.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Common.dll",
"net461": "lib/net46/NuGet.Common.dll",
"net462": "lib/net46/NuGet.Common.dll",
"net47": "lib/net46/NuGet.Common.dll",
"net471": "lib/net46/NuGet.Common.dll",
"net472": "lib/net46/NuGet.Common.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Common.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Common.dll",
},
mono_lib = "lib/net46/NuGet.Common.dll",
core_deps = {
"net46": [
"@nuget.frameworks//:net46_net",
],
"net461": [
"@nuget.frameworks//:net461_net",
],
"net462": [
"@nuget.frameworks//:net462_net",
],
"net47": [
"@nuget.frameworks//:net47_net",
],
"net471": [
"@nuget.frameworks//:net471_net",
],
"net472": [
"@nuget.frameworks//:net472_net",
],
"netstandard1.6": [
"@nuget.frameworks//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.frameworks//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.frameworks//:net46_net",
],
"net461": [
"@nuget.frameworks//:net461_net",
],
"net462": [
"@nuget.frameworks//:net462_net",
],
"net47": [
"@nuget.frameworks//:net47_net",
],
"net471": [
"@nuget.frameworks//:net471_net",
],
"net472": [
"@nuget.frameworks//:net472_net",
],
"netstandard1.6": [
"@nuget.frameworks//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.frameworks//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.frameworks//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Common.dll",
"lib/netstandard1.6/NuGet.Common.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Common.dll",
"lib/netstandard1.6/NuGet.Common.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Common.dll",
"lib/net46/NuGet.Common.xml",
],
"net461": [
"lib/net46/NuGet.Common.dll",
"lib/net46/NuGet.Common.xml",
],
"net462": [
"lib/net46/NuGet.Common.dll",
"lib/net46/NuGet.Common.xml",
],
"net47": [
"lib/net46/NuGet.Common.dll",
"lib/net46/NuGet.Common.xml",
],
"net471": [
"lib/net46/NuGet.Common.dll",
"lib/net46/NuGet.Common.xml",
],
"net472": [
"lib/net46/NuGet.Common.dll",
"lib/net46/NuGet.Common.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Common.dll",
"lib/netstandard1.6/NuGet.Common.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Common.dll",
"lib/netstandard1.6/NuGet.Common.xml",
],
},
mono_files = [
"lib/net46/NuGet.Common.dll",
"lib/net46/NuGet.Common.xml",
],
)
nuget_package(
name = "nuget.configuration",
package = "nuget.configuration",
version = "4.9.2",
sha256 = "103fd9eab88ececa0190b041c3ae6eaf470929280a35c7f444ffd6ad6b0328b6",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Configuration.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Configuration.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Configuration.dll",
"net461": "lib/net46/NuGet.Configuration.dll",
"net462": "lib/net46/NuGet.Configuration.dll",
"net47": "lib/net46/NuGet.Configuration.dll",
"net471": "lib/net46/NuGet.Configuration.dll",
"net472": "lib/net46/NuGet.Configuration.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Configuration.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Configuration.dll",
},
mono_lib = "lib/net46/NuGet.Configuration.dll",
core_deps = {
"net46": [
"@nuget.common//:net46_net",
],
"net461": [
"@nuget.common//:net461_net",
],
"net462": [
"@nuget.common//:net462_net",
],
"net47": [
"@nuget.common//:net47_net",
],
"net471": [
"@nuget.common//:net471_net",
],
"net472": [
"@nuget.common//:net472_net",
],
"netstandard1.6": [
"@nuget.common//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.common//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.common//:net46_net",
],
"net461": [
"@nuget.common//:net461_net",
],
"net462": [
"@nuget.common//:net462_net",
],
"net47": [
"@nuget.common//:net47_net",
],
"net471": [
"@nuget.common//:net471_net",
],
"net472": [
"@nuget.common//:net472_net",
],
"netstandard1.6": [
"@nuget.common//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.common//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.common//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Configuration.dll",
"lib/netstandard1.6/NuGet.Configuration.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Configuration.dll",
"lib/netstandard1.6/NuGet.Configuration.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Configuration.dll",
"lib/net46/NuGet.Configuration.xml",
],
"net461": [
"lib/net46/NuGet.Configuration.dll",
"lib/net46/NuGet.Configuration.xml",
],
"net462": [
"lib/net46/NuGet.Configuration.dll",
"lib/net46/NuGet.Configuration.xml",
],
"net47": [
"lib/net46/NuGet.Configuration.dll",
"lib/net46/NuGet.Configuration.xml",
],
"net471": [
"lib/net46/NuGet.Configuration.dll",
"lib/net46/NuGet.Configuration.xml",
],
"net472": [
"lib/net46/NuGet.Configuration.dll",
"lib/net46/NuGet.Configuration.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Configuration.dll",
"lib/netstandard1.6/NuGet.Configuration.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Configuration.dll",
"lib/netstandard1.6/NuGet.Configuration.xml",
],
},
mono_files = [
"lib/net46/NuGet.Configuration.dll",
"lib/net46/NuGet.Configuration.xml",
],
)
nuget_package(
name = "nuget.versioning",
package = "nuget.versioning",
version = "4.9.2",
sha256 = "10bae2c865a86ba5a955e32b886f7626f1a0af1959ff9d9d9ff010d5049c7cc8",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Versioning.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Versioning.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Versioning.dll",
"net461": "lib/net46/NuGet.Versioning.dll",
"net462": "lib/net46/NuGet.Versioning.dll",
"net47": "lib/net46/NuGet.Versioning.dll",
"net471": "lib/net46/NuGet.Versioning.dll",
"net472": "lib/net46/NuGet.Versioning.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Versioning.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Versioning.dll",
},
mono_lib = "lib/net46/NuGet.Versioning.dll",
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Versioning.dll",
"lib/netstandard1.6/NuGet.Versioning.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Versioning.dll",
"lib/netstandard1.6/NuGet.Versioning.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Versioning.dll",
"lib/net46/NuGet.Versioning.xml",
],
"net461": [
"lib/net46/NuGet.Versioning.dll",
"lib/net46/NuGet.Versioning.xml",
],
"net462": [
"lib/net46/NuGet.Versioning.dll",
"lib/net46/NuGet.Versioning.xml",
],
"net47": [
"lib/net46/NuGet.Versioning.dll",
"lib/net46/NuGet.Versioning.xml",
],
"net471": [
"lib/net46/NuGet.Versioning.dll",
"lib/net46/NuGet.Versioning.xml",
],
"net472": [
"lib/net46/NuGet.Versioning.dll",
"lib/net46/NuGet.Versioning.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Versioning.dll",
"lib/netstandard1.6/NuGet.Versioning.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Versioning.dll",
"lib/netstandard1.6/NuGet.Versioning.xml",
],
},
mono_files = [
"lib/net46/NuGet.Versioning.dll",
"lib/net46/NuGet.Versioning.xml",
],
)
nuget_package(
name = "nuget.librarymodel",
package = "nuget.librarymodel",
version = "4.9.2",
sha256 = "da08ab3101ab4adc527947abb9739ad665b37084b73a2165a184cf71be0a64e0",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.LibraryModel.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.LibraryModel.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.LibraryModel.dll",
"net461": "lib/net46/NuGet.LibraryModel.dll",
"net462": "lib/net46/NuGet.LibraryModel.dll",
"net47": "lib/net46/NuGet.LibraryModel.dll",
"net471": "lib/net46/NuGet.LibraryModel.dll",
"net472": "lib/net46/NuGet.LibraryModel.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.LibraryModel.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.LibraryModel.dll",
},
mono_lib = "lib/net46/NuGet.LibraryModel.dll",
core_deps = {
"net46": [
"@nuget.common//:net46_net",
"@nuget.versioning//:net46_net",
],
"net461": [
"@nuget.common//:net461_net",
"@nuget.versioning//:net461_net",
],
"net462": [
"@nuget.common//:net462_net",
"@nuget.versioning//:net462_net",
],
"net47": [
"@nuget.common//:net47_net",
"@nuget.versioning//:net47_net",
],
"net471": [
"@nuget.common//:net471_net",
"@nuget.versioning//:net471_net",
],
"net472": [
"@nuget.common//:net472_net",
"@nuget.versioning//:net472_net",
],
"netstandard1.6": [
"@nuget.common//:netstandard1.6_net",
"@nuget.versioning//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.common//:netstandard2.0_net",
"@nuget.versioning//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.common//:net46_net",
"@nuget.versioning//:net46_net",
],
"net461": [
"@nuget.common//:net461_net",
"@nuget.versioning//:net461_net",
],
"net462": [
"@nuget.common//:net462_net",
"@nuget.versioning//:net462_net",
],
"net47": [
"@nuget.common//:net47_net",
"@nuget.versioning//:net47_net",
],
"net471": [
"@nuget.common//:net471_net",
"@nuget.versioning//:net471_net",
],
"net472": [
"@nuget.common//:net472_net",
"@nuget.versioning//:net472_net",
],
"netstandard1.6": [
"@nuget.common//:netstandard1.6_net",
"@nuget.versioning//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.common//:netstandard2.0_net",
"@nuget.versioning//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.common//:mono",
"@nuget.versioning//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.LibraryModel.dll",
"lib/netstandard1.6/NuGet.LibraryModel.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.LibraryModel.dll",
"lib/netstandard1.6/NuGet.LibraryModel.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.LibraryModel.dll",
"lib/net46/NuGet.LibraryModel.xml",
],
"net461": [
"lib/net46/NuGet.LibraryModel.dll",
"lib/net46/NuGet.LibraryModel.xml",
],
"net462": [
"lib/net46/NuGet.LibraryModel.dll",
"lib/net46/NuGet.LibraryModel.xml",
],
"net47": [
"lib/net46/NuGet.LibraryModel.dll",
"lib/net46/NuGet.LibraryModel.xml",
],
"net471": [
"lib/net46/NuGet.LibraryModel.dll",
"lib/net46/NuGet.LibraryModel.xml",
],
"net472": [
"lib/net46/NuGet.LibraryModel.dll",
"lib/net46/NuGet.LibraryModel.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.LibraryModel.dll",
"lib/netstandard1.6/NuGet.LibraryModel.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.LibraryModel.dll",
"lib/netstandard1.6/NuGet.LibraryModel.xml",
],
},
mono_files = [
"lib/net46/NuGet.LibraryModel.dll",
"lib/net46/NuGet.LibraryModel.xml",
],
)
nuget_package(
name = "nuget.packaging.core",
package = "nuget.packaging.core",
version = "4.9.2",
sha256 = "7e40e1692af1cf7f7d18ea52b4f396831e0e02256a95b2831233cbe3b2f8b4f3",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Packaging.Core.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Packaging.Core.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Packaging.Core.dll",
"net461": "lib/net46/NuGet.Packaging.Core.dll",
"net462": "lib/net46/NuGet.Packaging.Core.dll",
"net47": "lib/net46/NuGet.Packaging.Core.dll",
"net471": "lib/net46/NuGet.Packaging.Core.dll",
"net472": "lib/net46/NuGet.Packaging.Core.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Packaging.Core.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Packaging.Core.dll",
},
mono_lib = "lib/net46/NuGet.Packaging.Core.dll",
core_deps = {
"net46": [
"@nuget.common//:net46_net",
"@nuget.versioning//:net46_net",
],
"net461": [
"@nuget.common//:net461_net",
"@nuget.versioning//:net461_net",
],
"net462": [
"@nuget.common//:net462_net",
"@nuget.versioning//:net462_net",
],
"net47": [
"@nuget.common//:net47_net",
"@nuget.versioning//:net47_net",
],
"net471": [
"@nuget.common//:net471_net",
"@nuget.versioning//:net471_net",
],
"net472": [
"@nuget.common//:net472_net",
"@nuget.versioning//:net472_net",
],
"netstandard1.6": [
"@nuget.common//:netstandard1.6_net",
"@nuget.versioning//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.common//:netstandard2.0_net",
"@nuget.versioning//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.common//:net46_net",
"@nuget.versioning//:net46_net",
],
"net461": [
"@nuget.common//:net461_net",
"@nuget.versioning//:net461_net",
],
"net462": [
"@nuget.common//:net462_net",
"@nuget.versioning//:net462_net",
],
"net47": [
"@nuget.common//:net47_net",
"@nuget.versioning//:net47_net",
],
"net471": [
"@nuget.common//:net471_net",
"@nuget.versioning//:net471_net",
],
"net472": [
"@nuget.common//:net472_net",
"@nuget.versioning//:net472_net",
],
"netstandard1.6": [
"@nuget.common//:netstandard1.6_net",
"@nuget.versioning//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.common//:netstandard2.0_net",
"@nuget.versioning//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.common//:mono",
"@nuget.versioning//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Packaging.Core.dll",
"lib/netstandard1.6/NuGet.Packaging.Core.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Packaging.Core.dll",
"lib/netstandard1.6/NuGet.Packaging.Core.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Packaging.Core.dll",
"lib/net46/NuGet.Packaging.Core.xml",
],
"net461": [
"lib/net46/NuGet.Packaging.Core.dll",
"lib/net46/NuGet.Packaging.Core.xml",
],
"net462": [
"lib/net46/NuGet.Packaging.Core.dll",
"lib/net46/NuGet.Packaging.Core.xml",
],
"net47": [
"lib/net46/NuGet.Packaging.Core.dll",
"lib/net46/NuGet.Packaging.Core.xml",
],
"net471": [
"lib/net46/NuGet.Packaging.Core.dll",
"lib/net46/NuGet.Packaging.Core.xml",
],
"net472": [
"lib/net46/NuGet.Packaging.Core.dll",
"lib/net46/NuGet.Packaging.Core.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Packaging.Core.dll",
"lib/netstandard1.6/NuGet.Packaging.Core.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Packaging.Core.dll",
"lib/netstandard1.6/NuGet.Packaging.Core.xml",
],
},
mono_files = [
"lib/net46/NuGet.Packaging.Core.dll",
"lib/net46/NuGet.Packaging.Core.xml",
],
)
nuget_package(
name = "nuget.packaging",
package = "nuget.packaging",
version = "4.9.2",
sha256 = "a096e81f1a769fc0e6173cd2673e0be47974d42e6365ca513b0639e6be047ed1",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Packaging.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Packaging.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Packaging.dll",
"net461": "lib/net46/NuGet.Packaging.dll",
"net462": "lib/net46/NuGet.Packaging.dll",
"net47": "lib/net46/NuGet.Packaging.dll",
"net471": "lib/net46/NuGet.Packaging.dll",
"net472": "lib/net46/NuGet.Packaging.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Packaging.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Packaging.dll",
},
mono_lib = "lib/net46/NuGet.Packaging.dll",
core_deps = {
"net46": [
"@nuget.configuration//:net46_net",
"@nuget.packaging.core//:net46_net",
"@newtonsoft.json//:net46_net",
],
"net461": [
"@nuget.configuration//:net461_net",
"@nuget.packaging.core//:net461_net",
"@newtonsoft.json//:net461_net",
],
"net462": [
"@nuget.configuration//:net462_net",
"@nuget.packaging.core//:net462_net",
"@newtonsoft.json//:net462_net",
],
"net47": [
"@nuget.configuration//:net47_net",
"@nuget.packaging.core//:net47_net",
"@newtonsoft.json//:net47_net",
],
"net471": [
"@nuget.configuration//:net471_net",
"@nuget.packaging.core//:net471_net",
"@newtonsoft.json//:net471_net",
],
"net472": [
"@nuget.configuration//:net472_net",
"@nuget.packaging.core//:net472_net",
"@newtonsoft.json//:net472_net",
],
"netstandard1.6": [
"@nuget.configuration//:netstandard1.6_net",
"@nuget.packaging.core//:netstandard1.6_net",
"@newtonsoft.json//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.configuration//:netstandard2.0_net",
"@nuget.packaging.core//:netstandard2.0_net",
"@newtonsoft.json//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.configuration//:net46_net",
"@nuget.packaging.core//:net46_net",
"@newtonsoft.json//:net46_net",
],
"net461": [
"@nuget.configuration//:net461_net",
"@nuget.packaging.core//:net461_net",
"@newtonsoft.json//:net461_net",
],
"net462": [
"@nuget.configuration//:net462_net",
"@nuget.packaging.core//:net462_net",
"@newtonsoft.json//:net462_net",
],
"net47": [
"@nuget.configuration//:net47_net",
"@nuget.packaging.core//:net47_net",
"@newtonsoft.json//:net47_net",
],
"net471": [
"@nuget.configuration//:net471_net",
"@nuget.packaging.core//:net471_net",
"@newtonsoft.json//:net471_net",
],
"net472": [
"@nuget.configuration//:net472_net",
"@nuget.packaging.core//:net472_net",
"@newtonsoft.json//:net472_net",
],
"netstandard1.6": [
"@nuget.configuration//:netstandard1.6_net",
"@nuget.packaging.core//:netstandard1.6_net",
"@newtonsoft.json//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.configuration//:netstandard2.0_net",
"@nuget.packaging.core//:netstandard2.0_net",
"@newtonsoft.json//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.configuration//:mono",
"@nuget.packaging.core//:mono",
"@newtonsoft.json//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Packaging.dll",
"lib/netstandard1.6/NuGet.Packaging.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Packaging.dll",
"lib/netstandard1.6/NuGet.Packaging.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Packaging.dll",
"lib/net46/NuGet.Packaging.xml",
],
"net461": [
"lib/net46/NuGet.Packaging.dll",
"lib/net46/NuGet.Packaging.xml",
],
"net462": [
"lib/net46/NuGet.Packaging.dll",
"lib/net46/NuGet.Packaging.xml",
],
"net47": [
"lib/net46/NuGet.Packaging.dll",
"lib/net46/NuGet.Packaging.xml",
],
"net471": [
"lib/net46/NuGet.Packaging.dll",
"lib/net46/NuGet.Packaging.xml",
],
"net472": [
"lib/net46/NuGet.Packaging.dll",
"lib/net46/NuGet.Packaging.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Packaging.dll",
"lib/netstandard1.6/NuGet.Packaging.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Packaging.dll",
"lib/netstandard1.6/NuGet.Packaging.xml",
],
},
mono_files = [
"lib/net46/NuGet.Packaging.dll",
"lib/net46/NuGet.Packaging.xml",
],
)
nuget_package(
name = "nuget.protocol",
package = "nuget.protocol",
version = "4.9.2",
sha256 = "f413d9ad117c08e0805bd78d48473113062e0424cf389f8d23b835bb081937fe",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Protocol.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Protocol.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Protocol.dll",
"net461": "lib/net46/NuGet.Protocol.dll",
"net462": "lib/net46/NuGet.Protocol.dll",
"net47": "lib/net46/NuGet.Protocol.dll",
"net471": "lib/net46/NuGet.Protocol.dll",
"net472": "lib/net46/NuGet.Protocol.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Protocol.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Protocol.dll",
},
mono_lib = "lib/net46/NuGet.Protocol.dll",
core_deps = {
"net46": [
"@nuget.configuration//:net46_net",
"@nuget.packaging//:net46_net",
],
"net461": [
"@nuget.configuration//:net461_net",
"@nuget.packaging//:net461_net",
],
"net462": [
"@nuget.configuration//:net462_net",
"@nuget.packaging//:net462_net",
],
"net47": [
"@nuget.configuration//:net47_net",
"@nuget.packaging//:net47_net",
],
"net471": [
"@nuget.configuration//:net471_net",
"@nuget.packaging//:net471_net",
],
"net472": [
"@nuget.configuration//:net472_net",
"@nuget.packaging//:net472_net",
],
"netstandard1.6": [
"@nuget.configuration//:netstandard1.6_net",
"@nuget.packaging//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.configuration//:netstandard2.0_net",
"@nuget.packaging//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.configuration//:net46_net",
"@nuget.packaging//:net46_net",
],
"net461": [
"@nuget.configuration//:net461_net",
"@nuget.packaging//:net461_net",
],
"net462": [
"@nuget.configuration//:net462_net",
"@nuget.packaging//:net462_net",
],
"net47": [
"@nuget.configuration//:net47_net",
"@nuget.packaging//:net47_net",
],
"net471": [
"@nuget.configuration//:net471_net",
"@nuget.packaging//:net471_net",
],
"net472": [
"@nuget.configuration//:net472_net",
"@nuget.packaging//:net472_net",
],
"netstandard1.6": [
"@nuget.configuration//:netstandard1.6_net",
"@nuget.packaging//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.configuration//:netstandard2.0_net",
"@nuget.packaging//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.configuration//:mono",
"@nuget.packaging//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Protocol.dll",
"lib/netstandard1.6/NuGet.Protocol.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Protocol.dll",
"lib/netstandard1.6/NuGet.Protocol.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Protocol.dll",
"lib/net46/NuGet.Protocol.xml",
],
"net461": [
"lib/net46/NuGet.Protocol.dll",
"lib/net46/NuGet.Protocol.xml",
],
"net462": [
"lib/net46/NuGet.Protocol.dll",
"lib/net46/NuGet.Protocol.xml",
],
"net47": [
"lib/net46/NuGet.Protocol.dll",
"lib/net46/NuGet.Protocol.xml",
],
"net471": [
"lib/net46/NuGet.Protocol.dll",
"lib/net46/NuGet.Protocol.xml",
],
"net472": [
"lib/net46/NuGet.Protocol.dll",
"lib/net46/NuGet.Protocol.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Protocol.dll",
"lib/netstandard1.6/NuGet.Protocol.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Protocol.dll",
"lib/netstandard1.6/NuGet.Protocol.xml",
],
},
mono_files = [
"lib/net46/NuGet.Protocol.dll",
"lib/net46/NuGet.Protocol.xml",
],
)
nuget_package(
name = "nuget.credentials",
package = "nuget.credentials",
version = "4.9.2",
sha256 = "58a309f7bdd96871d15eb19952099b4d6bda7f3f9f8644969a55f7fe4b92ae33",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Credentials.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Credentials.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Credentials.dll",
"net461": "lib/net46/NuGet.Credentials.dll",
"net462": "lib/net46/NuGet.Credentials.dll",
"net47": "lib/net46/NuGet.Credentials.dll",
"net471": "lib/net46/NuGet.Credentials.dll",
"net472": "lib/net46/NuGet.Credentials.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Credentials.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Credentials.dll",
},
mono_lib = "lib/net46/NuGet.Credentials.dll",
core_deps = {
"net46": [
"@nuget.protocol//:net46_net",
],
"net461": [
"@nuget.protocol//:net461_net",
],
"net462": [
"@nuget.protocol//:net462_net",
],
"net47": [
"@nuget.protocol//:net47_net",
],
"net471": [
"@nuget.protocol//:net471_net",
],
"net472": [
"@nuget.protocol//:net472_net",
],
"netstandard1.6": [
"@nuget.protocol//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.protocol//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.protocol//:net46_net",
],
"net461": [
"@nuget.protocol//:net461_net",
],
"net462": [
"@nuget.protocol//:net462_net",
],
"net47": [
"@nuget.protocol//:net47_net",
],
"net471": [
"@nuget.protocol//:net471_net",
],
"net472": [
"@nuget.protocol//:net472_net",
],
"netstandard1.6": [
"@nuget.protocol//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.protocol//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.protocol//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Credentials.dll",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Credentials.dll",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Credentials.dll",
],
"net461": [
"lib/net46/NuGet.Credentials.dll",
],
"net462": [
"lib/net46/NuGet.Credentials.dll",
],
"net47": [
"lib/net46/NuGet.Credentials.dll",
],
"net471": [
"lib/net46/NuGet.Credentials.dll",
],
"net472": [
"lib/net46/NuGet.Credentials.dll",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Credentials.dll",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Credentials.dll",
],
},
mono_files = [
"lib/net46/NuGet.Credentials.dll",
],
)
nuget_package(
name = "nuget.dependencyresolver.core",
package = "nuget.dependencyresolver.core",
version = "4.9.2",
sha256 = "42b917c6d65394cdc57175b8cea9035b3ecd8d8efc321ba0c6911ee5ebc42b7b",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.DependencyResolver.Core.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.DependencyResolver.Core.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.DependencyResolver.Core.dll",
"net461": "lib/net46/NuGet.DependencyResolver.Core.dll",
"net462": "lib/net46/NuGet.DependencyResolver.Core.dll",
"net47": "lib/net46/NuGet.DependencyResolver.Core.dll",
"net471": "lib/net46/NuGet.DependencyResolver.Core.dll",
"net472": "lib/net46/NuGet.DependencyResolver.Core.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.DependencyResolver.Core.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.DependencyResolver.Core.dll",
},
mono_lib = "lib/net46/NuGet.DependencyResolver.Core.dll",
core_deps = {
"net46": [
"@nuget.librarymodel//:net46_net",
"@nuget.protocol//:net46_net",
],
"net461": [
"@nuget.librarymodel//:net461_net",
"@nuget.protocol//:net461_net",
],
"net462": [
"@nuget.librarymodel//:net462_net",
"@nuget.protocol//:net462_net",
],
"net47": [
"@nuget.librarymodel//:net47_net",
"@nuget.protocol//:net47_net",
],
"net471": [
"@nuget.librarymodel//:net471_net",
"@nuget.protocol//:net471_net",
],
"net472": [
"@nuget.librarymodel//:net472_net",
"@nuget.protocol//:net472_net",
],
"netstandard1.6": [
"@nuget.librarymodel//:netstandard1.6_net",
"@nuget.protocol//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.librarymodel//:netstandard2.0_net",
"@nuget.protocol//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.librarymodel//:net46_net",
"@nuget.protocol//:net46_net",
],
"net461": [
"@nuget.librarymodel//:net461_net",
"@nuget.protocol//:net461_net",
],
"net462": [
"@nuget.librarymodel//:net462_net",
"@nuget.protocol//:net462_net",
],
"net47": [
"@nuget.librarymodel//:net47_net",
"@nuget.protocol//:net47_net",
],
"net471": [
"@nuget.librarymodel//:net471_net",
"@nuget.protocol//:net471_net",
],
"net472": [
"@nuget.librarymodel//:net472_net",
"@nuget.protocol//:net472_net",
],
"netstandard1.6": [
"@nuget.librarymodel//:netstandard1.6_net",
"@nuget.protocol//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.librarymodel//:netstandard2.0_net",
"@nuget.protocol//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.librarymodel//:mono",
"@nuget.protocol//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.DependencyResolver.Core.dll",
"lib/netstandard1.6/NuGet.DependencyResolver.Core.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.DependencyResolver.Core.dll",
"lib/netstandard1.6/NuGet.DependencyResolver.Core.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.DependencyResolver.Core.dll",
"lib/net46/NuGet.DependencyResolver.Core.xml",
],
"net461": [
"lib/net46/NuGet.DependencyResolver.Core.dll",
"lib/net46/NuGet.DependencyResolver.Core.xml",
],
"net462": [
"lib/net46/NuGet.DependencyResolver.Core.dll",
"lib/net46/NuGet.DependencyResolver.Core.xml",
],
"net47": [
"lib/net46/NuGet.DependencyResolver.Core.dll",
"lib/net46/NuGet.DependencyResolver.Core.xml",
],
"net471": [
"lib/net46/NuGet.DependencyResolver.Core.dll",
"lib/net46/NuGet.DependencyResolver.Core.xml",
],
"net472": [
"lib/net46/NuGet.DependencyResolver.Core.dll",
"lib/net46/NuGet.DependencyResolver.Core.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.DependencyResolver.Core.dll",
"lib/netstandard1.6/NuGet.DependencyResolver.Core.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.DependencyResolver.Core.dll",
"lib/netstandard1.6/NuGet.DependencyResolver.Core.xml",
],
},
mono_files = [
"lib/net46/NuGet.DependencyResolver.Core.dll",
"lib/net46/NuGet.DependencyResolver.Core.xml",
],
)
nuget_package(
name = "nuget.projectmodel",
package = "nuget.projectmodel",
version = "4.9.2",
sha256 = "b13a9db9d88b0b8d568c367dd837437793e8f57afb61beda4aa485457ddc23b4",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.ProjectModel.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.ProjectModel.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.ProjectModel.dll",
"net461": "lib/net46/NuGet.ProjectModel.dll",
"net462": "lib/net46/NuGet.ProjectModel.dll",
"net47": "lib/net46/NuGet.ProjectModel.dll",
"net471": "lib/net46/NuGet.ProjectModel.dll",
"net472": "lib/net46/NuGet.ProjectModel.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.ProjectModel.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.ProjectModel.dll",
},
mono_lib = "lib/net46/NuGet.ProjectModel.dll",
core_deps = {
"net46": [
"@nuget.dependencyresolver.core//:net46_net",
],
"net461": [
"@nuget.dependencyresolver.core//:net461_net",
],
"net462": [
"@nuget.dependencyresolver.core//:net462_net",
],
"net47": [
"@nuget.dependencyresolver.core//:net47_net",
],
"net471": [
"@nuget.dependencyresolver.core//:net471_net",
],
"net472": [
"@nuget.dependencyresolver.core//:net472_net",
],
"netstandard1.6": [
"@nuget.dependencyresolver.core//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.dependencyresolver.core//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.dependencyresolver.core//:net46_net",
],
"net461": [
"@nuget.dependencyresolver.core//:net461_net",
],
"net462": [
"@nuget.dependencyresolver.core//:net462_net",
],
"net47": [
"@nuget.dependencyresolver.core//:net47_net",
],
"net471": [
"@nuget.dependencyresolver.core//:net471_net",
],
"net472": [
"@nuget.dependencyresolver.core//:net472_net",
],
"netstandard1.6": [
"@nuget.dependencyresolver.core//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.dependencyresolver.core//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.dependencyresolver.core//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.ProjectModel.dll",
"lib/netstandard1.6/NuGet.ProjectModel.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.ProjectModel.dll",
"lib/netstandard1.6/NuGet.ProjectModel.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.ProjectModel.dll",
"lib/net46/NuGet.ProjectModel.xml",
],
"net461": [
"lib/net46/NuGet.ProjectModel.dll",
"lib/net46/NuGet.ProjectModel.xml",
],
"net462": [
"lib/net46/NuGet.ProjectModel.dll",
"lib/net46/NuGet.ProjectModel.xml",
],
"net47": [
"lib/net46/NuGet.ProjectModel.dll",
"lib/net46/NuGet.ProjectModel.xml",
],
"net471": [
"lib/net46/NuGet.ProjectModel.dll",
"lib/net46/NuGet.ProjectModel.xml",
],
"net472": [
"lib/net46/NuGet.ProjectModel.dll",
"lib/net46/NuGet.ProjectModel.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.ProjectModel.dll",
"lib/netstandard1.6/NuGet.ProjectModel.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.ProjectModel.dll",
"lib/netstandard1.6/NuGet.ProjectModel.xml",
],
},
mono_files = [
"lib/net46/NuGet.ProjectModel.dll",
"lib/net46/NuGet.ProjectModel.xml",
],
)
nuget_package(
name = "nuget.commands",
package = "nuget.commands",
version = "4.9.2",
sha256 = "50566e04602b90f0032fea33f3972a3dbbddc5fe799357e5a18e6a2f8583b26d",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Commands.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Commands.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Commands.dll",
"net461": "lib/net46/NuGet.Commands.dll",
"net462": "lib/net46/NuGet.Commands.dll",
"net47": "lib/net46/NuGet.Commands.dll",
"net471": "lib/net46/NuGet.Commands.dll",
"net472": "lib/net46/NuGet.Commands.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Commands.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Commands.dll",
},
mono_lib = "lib/net46/NuGet.Commands.dll",
core_deps = {
"net46": [
"@nuget.credentials//:net46_net",
"@nuget.projectmodel//:net46_net",
],
"net461": [
"@nuget.credentials//:net461_net",
"@nuget.projectmodel//:net461_net",
],
"net462": [
"@nuget.credentials//:net462_net",
"@nuget.projectmodel//:net462_net",
],
"net47": [
"@nuget.credentials//:net47_net",
"@nuget.projectmodel//:net47_net",
],
"net471": [
"@nuget.credentials//:net471_net",
"@nuget.projectmodel//:net471_net",
],
"net472": [
"@nuget.credentials//:net472_net",
"@nuget.projectmodel//:net472_net",
],
"netstandard1.6": [
"@nuget.credentials//:netstandard1.6_net",
"@nuget.projectmodel//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.credentials//:netstandard2.0_net",
"@nuget.projectmodel//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.credentials//:net46_net",
"@nuget.projectmodel//:net46_net",
],
"net461": [
"@nuget.credentials//:net461_net",
"@nuget.projectmodel//:net461_net",
],
"net462": [
"@nuget.credentials//:net462_net",
"@nuget.projectmodel//:net462_net",
],
"net47": [
"@nuget.credentials//:net47_net",
"@nuget.projectmodel//:net47_net",
],
"net471": [
"@nuget.credentials//:net471_net",
"@nuget.projectmodel//:net471_net",
],
"net472": [
"@nuget.credentials//:net472_net",
"@nuget.projectmodel//:net472_net",
],
"netstandard1.6": [
"@nuget.credentials//:netstandard1.6_net",
"@nuget.projectmodel//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.credentials//:netstandard2.0_net",
"@nuget.projectmodel//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.credentials//:mono",
"@nuget.projectmodel//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Commands.dll",
"lib/netstandard1.6/NuGet.Commands.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Commands.dll",
"lib/netstandard1.6/NuGet.Commands.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Commands.dll",
"lib/net46/NuGet.Commands.xml",
],
"net461": [
"lib/net46/NuGet.Commands.dll",
"lib/net46/NuGet.Commands.xml",
],
"net462": [
"lib/net46/NuGet.Commands.dll",
"lib/net46/NuGet.Commands.xml",
],
"net47": [
"lib/net46/NuGet.Commands.dll",
"lib/net46/NuGet.Commands.xml",
],
"net471": [
"lib/net46/NuGet.Commands.dll",
"lib/net46/NuGet.Commands.xml",
],
"net472": [
"lib/net46/NuGet.Commands.dll",
"lib/net46/NuGet.Commands.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Commands.dll",
"lib/netstandard1.6/NuGet.Commands.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Commands.dll",
"lib/netstandard1.6/NuGet.Commands.xml",
],
},
mono_files = [
"lib/net46/NuGet.Commands.dll",
"lib/net46/NuGet.Commands.xml",
],
)
nuget_package(
name = "nuget.resolver",
package = "nuget.resolver",
version = "4.9.2",
sha256 = "2ab469a5aaac1d5150fcdda976dec8f555fcb8baf61f191bc871b9390f9a30c2",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.Resolver.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.Resolver.dll",
},
net_lib = {
"net46": "lib/net46/NuGet.Resolver.dll",
"net461": "lib/net46/NuGet.Resolver.dll",
"net462": "lib/net46/NuGet.Resolver.dll",
"net47": "lib/net46/NuGet.Resolver.dll",
"net471": "lib/net46/NuGet.Resolver.dll",
"net472": "lib/net46/NuGet.Resolver.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.Resolver.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.Resolver.dll",
},
mono_lib = "lib/net46/NuGet.Resolver.dll",
core_deps = {
"net46": [
"@nuget.protocol//:net46_net",
],
"net461": [
"@nuget.protocol//:net461_net",
],
"net462": [
"@nuget.protocol//:net462_net",
],
"net47": [
"@nuget.protocol//:net47_net",
],
"net471": [
"@nuget.protocol//:net471_net",
],
"net472": [
"@nuget.protocol//:net472_net",
],
"netstandard1.6": [
"@nuget.protocol//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.protocol//:netstandard2.0_net",
],
},
net_deps = {
"net46": [
"@nuget.protocol//:net46_net",
],
"net461": [
"@nuget.protocol//:net461_net",
],
"net462": [
"@nuget.protocol//:net462_net",
],
"net47": [
"@nuget.protocol//:net47_net",
],
"net471": [
"@nuget.protocol//:net471_net",
],
"net472": [
"@nuget.protocol//:net472_net",
],
"netstandard1.6": [
"@nuget.protocol//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.protocol//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.protocol//:mono",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.Resolver.dll",
"lib/netstandard1.6/NuGet.Resolver.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.Resolver.dll",
"lib/netstandard1.6/NuGet.Resolver.xml",
],
},
net_files = {
"net46": [
"lib/net46/NuGet.Resolver.dll",
"lib/net46/NuGet.Resolver.xml",
],
"net461": [
"lib/net46/NuGet.Resolver.dll",
"lib/net46/NuGet.Resolver.xml",
],
"net462": [
"lib/net46/NuGet.Resolver.dll",
"lib/net46/NuGet.Resolver.xml",
],
"net47": [
"lib/net46/NuGet.Resolver.dll",
"lib/net46/NuGet.Resolver.xml",
],
"net471": [
"lib/net46/NuGet.Resolver.dll",
"lib/net46/NuGet.Resolver.xml",
],
"net472": [
"lib/net46/NuGet.Resolver.dll",
"lib/net46/NuGet.Resolver.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.Resolver.dll",
"lib/netstandard1.6/NuGet.Resolver.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.Resolver.dll",
"lib/netstandard1.6/NuGet.Resolver.xml",
],
},
mono_files = [
"lib/net46/NuGet.Resolver.dll",
"lib/net46/NuGet.Resolver.xml",
],
)
nuget_package(
name = "nuget.packagemanagement.netstandard",
package = "nuget.packagemanagement.netstandard",
version = "4.9.2",
sha256 = "c51c50b2da22e2f8649bbf8a151a04fdd119e631141bf8b88e4ebb4335f5ff7e",
core_lib = {
"netcoreapp2.0": "lib/netstandard1.6/NuGet.PackageManagement.dll",
"netcoreapp2.1": "lib/netstandard1.6/NuGet.PackageManagement.dll",
},
net_lib = {
"net461": "lib/netstandard1.6/NuGet.PackageManagement.dll",
"net462": "lib/netstandard1.6/NuGet.PackageManagement.dll",
"net47": "lib/netstandard1.6/NuGet.PackageManagement.dll",
"net471": "lib/netstandard1.6/NuGet.PackageManagement.dll",
"net472": "lib/netstandard1.6/NuGet.PackageManagement.dll",
"netstandard1.6": "lib/netstandard1.6/NuGet.PackageManagement.dll",
"netstandard2.0": "lib/netstandard1.6/NuGet.PackageManagement.dll",
},
mono_lib = "lib/netstandard1.6/NuGet.PackageManagement.dll",
core_deps = {
"net461": [
"@nuget.commands//:net461_net",
"@nuget.resolver//:net461_net",
],
"net462": [
"@nuget.commands//:net462_net",
"@nuget.resolver//:net462_net",
],
"net47": [
"@nuget.commands//:net47_net",
"@nuget.resolver//:net47_net",
],
"net471": [
"@nuget.commands//:net471_net",
"@nuget.resolver//:net471_net",
],
"net472": [
"@nuget.commands//:net472_net",
"@nuget.resolver//:net472_net",
],
"netstandard1.6": [
"@nuget.commands//:netstandard1.6_net",
"@nuget.resolver//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.commands//:netstandard2.0_net",
"@nuget.resolver//:netstandard2.0_net",
],
},
net_deps = {
"net461": [
"@nuget.commands//:net461_net",
"@nuget.resolver//:net461_net",
],
"net462": [
"@nuget.commands//:net462_net",
"@nuget.resolver//:net462_net",
],
"net47": [
"@nuget.commands//:net47_net",
"@nuget.resolver//:net47_net",
],
"net471": [
"@nuget.commands//:net471_net",
"@nuget.resolver//:net471_net",
],
"net472": [
"@nuget.commands//:net472_net",
"@nuget.resolver//:net472_net",
],
"netstandard1.6": [
"@nuget.commands//:netstandard1.6_net",
"@nuget.resolver//:netstandard1.6_net",
],
"netstandard2.0": [
"@nuget.commands//:netstandard2.0_net",
"@nuget.resolver//:netstandard2.0_net",
],
},
mono_deps = [
"@nuget.commands//:mono",
"@nuget.resolver//:mono",
"@io_bazel_rules_dotnet//dotnet/stdlib:netstandard.library.dll",
],
core_files = {
"netcoreapp2.0": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
"netcoreapp2.1": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
},
net_files = {
"net461": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
"net462": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
"net47": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
"net471": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
"net472": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
"netstandard1.6": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
"netstandard2.0": [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
},
mono_files = [
"lib/netstandard1.6/NuGet.PackageManagement.dll",
"lib/netstandard1.6/NuGet.PackageManagement.xml",
],
)
nuget_package(
name = "commandlineparser",
package = "commandlineparser",
version = "2.6.0",
sha256 = "76bdbc991c9f416cc1e775b17ce5d76cd0f851dcafaf096cbc433698b77f4dd5",
core_lib = {
"netcoreapp2.0": "lib/netstandard2.0/CommandLine.dll",
"netcoreapp2.1": "lib/netstandard2.0/CommandLine.dll",
},
net_lib = {
"net45": "lib/net45/CommandLine.dll",
"net451": "lib/net45/CommandLine.dll",
"net452": "lib/net45/CommandLine.dll",
"net46": "lib/net45/CommandLine.dll",
"net461": "lib/net461/CommandLine.dll",
"net462": "lib/net461/CommandLine.dll",
"net47": "lib/net461/CommandLine.dll",
"net471": "lib/net461/CommandLine.dll",
"net472": "lib/net461/CommandLine.dll",
"netstandard2.0": "lib/netstandard2.0/CommandLine.dll",
},
mono_lib = "lib/net461/CommandLine.dll",
core_files = {
"netcoreapp2.0": [
"lib/netstandard2.0/CommandLine.dll",
"lib/netstandard2.0/CommandLine.xml",
],
"netcoreapp2.1": [
"lib/netstandard2.0/CommandLine.dll",
"lib/netstandard2.0/CommandLine.xml",
],
},
net_files = {
"net45": [
"lib/net45/CommandLine.dll",
"lib/net45/CommandLine.xml",
],
"net451": [
"lib/net45/CommandLine.dll",
"lib/net45/CommandLine.xml",
],
"net452": [
"lib/net45/CommandLine.dll",
"lib/net45/CommandLine.xml",
],
"net46": [
"lib/net45/CommandLine.dll",
"lib/net45/CommandLine.xml",
],
"net461": [
"lib/net461/CommandLine.dll",
"lib/net461/CommandLine.xml",
],
"net462": [
"lib/net461/CommandLine.dll",
"lib/net461/CommandLine.xml",
],
"net47": [
"lib/net461/CommandLine.dll",
"lib/net461/CommandLine.xml",
],
"net471": [
"lib/net461/CommandLine.dll",
"lib/net461/CommandLine.xml",
],
"net472": [
"lib/net461/CommandLine.dll",
"lib/net461/CommandLine.xml",
],
"netstandard2.0": [
"lib/netstandard2.0/CommandLine.dll",
"lib/netstandard2.0/CommandLine.xml",
],
},
mono_files = [
"lib/net461/CommandLine.dll",
"lib/net461/CommandLine.xml",
],
)
| true | true |
f7f72b6815007845f1e334348196a1e2f205da44 | 5,510 | py | Python | lib/kb_cufflinks/core/script_utils.py | mclark58/kb_cufflinks | c60fffba2da63c1bf5b15823115af0200ad47490 | [
"MIT"
] | 1 | 2020-01-13T21:19:27.000Z | 2020-01-13T21:19:27.000Z | lib/kb_cufflinks/core/script_utils.py | mclark58/kb_cufflinks | c60fffba2da63c1bf5b15823115af0200ad47490 | [
"MIT"
] | 5 | 2017-11-14T17:47:49.000Z | 2019-06-10T17:07:03.000Z | lib/kb_cufflinks/core/script_utils.py | mclark58/kb_cufflinks | c60fffba2da63c1bf5b15823115af0200ad47490 | [
"MIT"
] | 7 | 2017-06-20T13:54:44.000Z | 2019-10-10T20:30:44.000Z | import logging
import os
import subprocess
import traceback
from os import listdir
from os.path import isfile, join
from zipfile import ZipFile
'''
A utility python module containing a set of methods necessary for this kbase
module.
'''
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
def create_logger(log_dir, name):
"""Create a logger
args: name (str): name of logger
returns: logger (obj): logging.Logger instance
"""
logger = logging.getLogger(name)
fmt = logging.Formatter('%(asctime)s - %(process)d - %(name)s - '
' %(levelname)s -%(message)s')
hdl = logging.FileHandler(os.path.join(log_dir, name + '.log'))
hdl.setFormatter(fmt)
logger.addHandler(hdl)
return logger
def if_obj_exists(logger, ws_client, ws_id, o_type, obj_l):
obj_list = ws_client.list_objects({"workspaces": [ws_id], "type": o_type, 'showHidden': 1})
obj_names = [i[1] for i in obj_list]
existing_names = [i for i in obj_l if i in obj_names]
obj_ids = None
if len(existing_names) != 0:
e_queries = [{'name': j, 'workspace': ws_id} for j in existing_names]
e_infos = ws_client.get_object_info_new({"objects": e_queries})
obj_ids = [(str(k[1]), (str(k[6]) + '/' + str(k[0]) + '/' + str(k[4]))) for k in e_infos]
return obj_ids
def log(message, level=logging.INFO, logger=None):
if logger is None:
if level == logging.DEBUG:
print('\nDEBUG: ' + message + '\n')
elif level == logging.INFO:
print('\nINFO: ' + message + '\n')
elif level == logging.WARNING:
print('\nWARNING: ' + message + '\n')
elif level == logging.ERROR:
print('\nERROR: ' + message + '\n')
elif level == logging.CRITICAL:
print('\nCRITICAL: ' + message + '\n')
else:
logger.log(level, '\n' + message + '\n')
def zip_files(logger, src_path, output_fn):
"""
Compress all index files (not directory) into an output zip file on disk.
"""
files = [f for f in listdir(src_path) if isfile(join(src_path, f))]
with ZipFile(output_fn, 'w', allowZip64=True) as izip:
for f in files:
izip.write(join(src_path, f), f)
def unzip_files(logger, src_fn, dst_path):
"""
Extract all index files into an output zip file on disk.
"""
with ZipFile(src_fn, 'r') as ozip:
ozip.extractall(dst_path)
def whereis(program):
"""
returns path of program if it exists in your ``$PATH`` variable or `
`None`` otherwise
"""
for path in os.environ.get('PATH', '').split(':'):
if os.path.exists(os.path.join(path, program)) and not os.path.isdir(
os.path.join(path, program)):
return os.path.join(path, program)
return None
def runProgram(logger=None,
progName=None,
argStr=None,
script_dir=None,
working_dir=None):
"""
Convenience func to handle calling and monitoring output of external programs.
:param progName: name of system program command
:param argStr: string containing command line options for ``progName``
:returns: subprocess.communicate object
"""
# Ensure program is callable.
if script_dir is not None:
progPath = os.path.join(script_dir, progName)
else:
progPath = progName
progPath = whereis(progName)
if not progPath:
raise RuntimeError(
None,
'{0} command not found in your PATH environmental variable. {1}'.format(
progName,
os.environ.get(
'PATH',
'')))
# Construct shell command
cmdStr = "%s %s" % (progPath, argStr)
print("Executing : " + cmdStr)
if logger is not None:
logger.info("Executing : " + cmdStr)
# if working_dir is None:
logger.info("Executing: " + cmdStr + " on cwd")
else:
logger.info("Executing: " + cmdStr + " on " + working_dir)
# Set up process obj
process = subprocess.Popen(cmdStr,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=working_dir)
# Get results
result, stderr = process.communicate()
# print result
# print stderr
# keep this until your code is stable for easier debugging
if logger is not None and result is not None and len(result) > 0:
logger.info(result)
else:
print(result)
if logger is not None and stderr is not None and len(stderr) > 0:
logger.info(stderr)
else:
print(stderr)
# Check returncode for success/failure
if process.returncode != 0:
raise RuntimeError("Command execution failed {0}".format(
"".join(traceback.format_exc())))
# Return result
return {"result": result, "stderr": stderr}
def check_sys_stat(logger):
# check_disk_space(logger)
check_memory_usage(logger)
check_cpu_usage(logger)
def check_disk_space(logger):
runProgram(logger=logger, progName="df", argStr="-h")
def check_memory_usage(logger):
runProgram(logger=logger, progName="vmstat", argStr="-s")
def check_cpu_usage(logger):
runProgram(logger=logger, progName="mpstat", argStr="-P ALL")
| 30.10929 | 97 | 0.6 | import logging
import os
import subprocess
import traceback
from os import listdir
from os.path import isfile, join
from zipfile import ZipFile
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
def create_logger(log_dir, name):
logger = logging.getLogger(name)
fmt = logging.Formatter('%(asctime)s - %(process)d - %(name)s - '
' %(levelname)s -%(message)s')
hdl = logging.FileHandler(os.path.join(log_dir, name + '.log'))
hdl.setFormatter(fmt)
logger.addHandler(hdl)
return logger
def if_obj_exists(logger, ws_client, ws_id, o_type, obj_l):
obj_list = ws_client.list_objects({"workspaces": [ws_id], "type": o_type, 'showHidden': 1})
obj_names = [i[1] for i in obj_list]
existing_names = [i for i in obj_l if i in obj_names]
obj_ids = None
if len(existing_names) != 0:
e_queries = [{'name': j, 'workspace': ws_id} for j in existing_names]
e_infos = ws_client.get_object_info_new({"objects": e_queries})
obj_ids = [(str(k[1]), (str(k[6]) + '/' + str(k[0]) + '/' + str(k[4]))) for k in e_infos]
return obj_ids
def log(message, level=logging.INFO, logger=None):
if logger is None:
if level == logging.DEBUG:
print('\nDEBUG: ' + message + '\n')
elif level == logging.INFO:
print('\nINFO: ' + message + '\n')
elif level == logging.WARNING:
print('\nWARNING: ' + message + '\n')
elif level == logging.ERROR:
print('\nERROR: ' + message + '\n')
elif level == logging.CRITICAL:
print('\nCRITICAL: ' + message + '\n')
else:
logger.log(level, '\n' + message + '\n')
def zip_files(logger, src_path, output_fn):
files = [f for f in listdir(src_path) if isfile(join(src_path, f))]
with ZipFile(output_fn, 'w', allowZip64=True) as izip:
for f in files:
izip.write(join(src_path, f), f)
def unzip_files(logger, src_fn, dst_path):
with ZipFile(src_fn, 'r') as ozip:
ozip.extractall(dst_path)
def whereis(program):
for path in os.environ.get('PATH', '').split(':'):
if os.path.exists(os.path.join(path, program)) and not os.path.isdir(
os.path.join(path, program)):
return os.path.join(path, program)
return None
def runProgram(logger=None,
progName=None,
argStr=None,
script_dir=None,
working_dir=None):
if script_dir is not None:
progPath = os.path.join(script_dir, progName)
else:
progPath = progName
progPath = whereis(progName)
if not progPath:
raise RuntimeError(
None,
'{0} command not found in your PATH environmental variable. {1}'.format(
progName,
os.environ.get(
'PATH',
'')))
cmdStr = "%s %s" % (progPath, argStr)
print("Executing : " + cmdStr)
if logger is not None:
logger.info("Executing : " + cmdStr)
logger.info("Executing: " + cmdStr + " on cwd")
else:
logger.info("Executing: " + cmdStr + " on " + working_dir)
process = subprocess.Popen(cmdStr,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=working_dir)
result, stderr = process.communicate()
if logger is not None and result is not None and len(result) > 0:
logger.info(result)
else:
print(result)
if logger is not None and stderr is not None and len(stderr) > 0:
logger.info(stderr)
else:
print(stderr)
if process.returncode != 0:
raise RuntimeError("Command execution failed {0}".format(
"".join(traceback.format_exc())))
return {"result": result, "stderr": stderr}
def check_sys_stat(logger):
check_memory_usage(logger)
check_cpu_usage(logger)
def check_disk_space(logger):
runProgram(logger=logger, progName="df", argStr="-h")
def check_memory_usage(logger):
runProgram(logger=logger, progName="vmstat", argStr="-s")
def check_cpu_usage(logger):
runProgram(logger=logger, progName="mpstat", argStr="-P ALL")
| true | true |
f7f72bebf424dab07c5da33aaf954ba76312dcbd | 11,313 | py | Python | tronapi/main.py | h1ght1me/tron-api-python | 7cdb23d0480fe86126aa5121e47a6029f3f1b0c7 | [
"MIT"
] | null | null | null | tronapi/main.py | h1ght1me/tron-api-python | 7cdb23d0480fe86126aa5121e47a6029f3f1b0c7 | [
"MIT"
] | null | null | null | tronapi/main.py | h1ght1me/tron-api-python | 7cdb23d0480fe86126aa5121e47a6029f3f1b0c7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# Copyright (c) iEXBase. All rights reserved.
# Licensed under the MIT License.
# See License.txt in the project root for license information.
# --------------------------------------------------------------------
"""
tronapi.main
===============
Connect to the Tron network.
:copyright: © 2019 by the iEXBase.
:license: MIT License
"""
from tronapi.common.datastructures import AttributeDict
from urllib.parse import urlencode
from eth_utils import (
apply_to_return_value,
to_hex,
keccak as tron_keccak,
)
from hexbytes import HexBytes
from trx_utils import (
to_sun,
from_sun,
is_integer,
add_0x_prefix,
remove_0x_prefix,
is_address
)
from tronapi.common.abi import map_abi_data
from tronapi.common.account import Address, PrivateKey, Account
from tronapi.common.normalizers import abi_resolver
from tronapi.common.encoding import (
to_bytes,
to_int,
to_text,
to_json,
hex_encode_abi_type
)
from tronapi.exceptions import (
InvalidTronError,
TronError
)
from tronapi.manager import TronManager
from tronapi import HttpProvider, constants
from tronapi.transactionbuilder import TransactionBuilder
from tronapi.trx import Trx
DEFAULT_MODULES = {
'trx': Trx
}
class Tron:
# Providers
HTTPProvider = HttpProvider
_default_block = None
_private_key = None
_default_address = AttributeDict({})
# Encoding and Decoding
toBytes = staticmethod(to_bytes)
toInt = staticmethod(to_int)
toHex = staticmethod(to_hex)
toText = staticmethod(to_text)
toJSON = staticmethod(to_json)
# Currency Utility
toSun = staticmethod(to_sun)
fromSun = staticmethod(from_sun)
# Validate address
isAddress = staticmethod(is_address)
def __init__(self, **kwargs):
"""Connect to the Tron network.
Args:
kwargs (Any): We fill the most necessary parameters
for working with blockchain Tron
"""
# We check the obtained nodes, if the necessary parameters
# are not specified, then we take the default
kwargs.setdefault('full_node', constants.DEFAULT_NODES['full_node'])
kwargs.setdefault('solidity_node', constants.DEFAULT_NODES['solidity_node'])
kwargs.setdefault('event_server', constants.DEFAULT_NODES['event_server'])
# The node manager allows you to automatically determine the node
# on the router or manually refer to a specific node.
# solidity_node, full_node or event_server
self.manager = TronManager(self, dict(
full_node=kwargs.get('full_node'),
solidity_node=kwargs.get('solidity_node'),
event_server=kwargs.get('event_server')
))
# If the parameter of the private key is not empty,
# then write to the variable
if 'private_key' in kwargs:
self.private_key = kwargs.get('private_key')
# We check whether the default wallet address is set when
# defining the class, and then written to the variable
if 'default_address' in kwargs:
self.default_address = kwargs.get('default_address')
# If custom methods are not declared,
# we take the default from the list
modules = kwargs.setdefault('modules', DEFAULT_MODULES)
for module_name, module_class in modules.items():
module_class.attach(self, module_name)
self.transaction_builder = TransactionBuilder(self)
@property
def default_block(self):
return self._default_block
@default_block.setter
def default_block(self, block_id):
"""Sets the default block used as a reference for all future calls."""
if block_id in ('latest', 'earliest', 0):
self._default_block = block_id
return
if not is_integer(block_id) or not block_id:
raise ValueError('Invalid block ID provided')
self._default_block = abs(block_id)
@property
def providers(self):
"""List providers"""
return self.manager.providers
@property
def private_key(self):
"""Get a private key"""
return self._private_key
@private_key.setter
def private_key(self, value: str) -> None:
"""Set a private key used with the TronAPI instance,
used for obtaining the address, signing transactions etc...
Args:
value (str): Private key
"""
try:
private_key = PrivateKey(value)
except ValueError:
raise TronError('Invalid private key provided')
self._private_key = str(private_key).lower()
@property
def default_address(self) -> AttributeDict:
"""Get a TRON Address"""
return self._default_address
@default_address.setter
def default_address(self, address: str) -> None:
"""Sets the address used with all Tron API.
Will not sign any transactions.
Args:
address (str) Tron Address
"""
if not self.isAddress(address):
raise InvalidTronError('Invalid address provided')
_hex = self.address.to_hex(address)
_base58 = self.address.from_hex(address)
_private_base58 = self.address.from_private_key(self._private_key).base58
# check the addresses
if self._private_key and _private_base58 != _base58:
self._private_key = None
self._default_address = AttributeDict({
'hex': _hex,
'base58': _base58
})
def get_event_result(self, **kwargs):
"""Will return all events matching the filters.
Args:
kwargs (any): List parameters
"""
# Check the most necessary parameters
since_timestamp = kwargs.setdefault('since_timestamp', 0)
event_name = kwargs.setdefault('event_name', 'Notify')
block_number = kwargs.setdefault('block_number', '')
size = kwargs.setdefault('size', 20)
page = kwargs.setdefault('page', 1)
only_confirmed = kwargs.setdefault('only_confirmed', None)
only_unconfirmed = kwargs.setdefault('only_unconfirmed', None)
previous_last = kwargs.setdefault('previous_last_event_fingerprint', None)
contract_address = kwargs.setdefault('contract_address', self.default_address.hex)
if not self.isAddress(contract_address):
raise InvalidTronError('Invalid contract address provided')
if event_name and not contract_address:
raise TronError('Usage of event name filtering requires a contract address')
if block_number and event_name is None:
raise TronError('Usage of block number filtering requires an event name')
if not is_integer(page):
raise ValueError('Invalid size provided')
if not is_integer(since_timestamp):
raise ValueError('Invalid sinceTimestamp provided')
# If the size exceeds 200, displays an error
if size > 200:
raise ValueError('Defaulting to maximum accepted size: 200')
# We collect all parameters in one array
route_params = []
if contract_address:
route_params.append(contract_address)
if event_name:
route_params.append(event_name)
if block_number:
route_params.append(block_number)
route = '/'.join(route_params)
qs = {
'since': since_timestamp,
'page': page,
'size': size
}
if only_confirmed is not None:
qs.update({'onlyConfirmed': only_confirmed})
if only_unconfirmed is not None and not only_confirmed:
qs.update({'onlyUnconfirmed': only_unconfirmed})
if previous_last is not None:
qs.update({'previousLastEventFingerprint': previous_last})
return self.manager.request("/event/contract/{0}?{1}"
.format(route, urlencode(qs)), method='get')
def get_event_transaction_id(self, tx_id):
"""Will return all events within a transactionID.
Args:
tx_id (str): TransactionID to query for events.
"""
response = self.manager.request('/event/transaction/' + tx_id, method='get')
return response
@property
def address(self) -> Address:
"""Helper object that allows you to convert
between hex/base58 and private key representations of a TRON address.
Note:
If you wish to convert generic data to hexadecimal strings,
please use the function tron.to_hex.
"""
return Address()
@property
def create_account(self) -> PrivateKey:
"""Create account
Warning: Please control risks when using this API.
To ensure environmental security, please do not invoke APIs
provided by other or invoke this very API on a public network.
"""
return Account.create()
@staticmethod
def is_valid_provider(provider) -> bool:
"""Check connected provider
Args:
provider(HttpProvider): Provider
"""
return isinstance(provider, HttpProvider)
def solidity_sha3(self, abi_types, values):
"""
Executes keccak256 exactly as Solidity does.
Takes list of abi_types as inputs -- `[uint24, int8[], bool]`
and list of corresponding values -- `[20, [-1, 5, 0], True]`
Args:
abi_types (any): types abi
values (any): values
Examples:
>>> tron = Tron()
>>> sol = tron.solidity_sha3(['uint8[]'], [[1, 2, 3, 4, 5]])
>>> assert sol.hex() == '0x5917e5a395fb9b454434de59651d36822a9e29c5ec57474df3e67937b969460c'
"""
if len(abi_types) != len(values):
raise ValueError(
"Length mismatch between provided abi types and values. Got "
"{0} types and {1} values.".format(len(abi_types), len(values))
)
normalized_values = map_abi_data([abi_resolver()], abi_types, values)
hex_string = add_0x_prefix(''.join(
remove_0x_prefix(hex_encode_abi_type(abi_type, value))
for abi_type, value
in zip(abi_types, normalized_values)
))
return self.keccak(hexstr=hex_string)
@staticmethod
@apply_to_return_value(HexBytes)
def keccak(primitive=None, text=None, hexstr=None):
if isinstance(primitive, (bytes, int, type(None))):
input_bytes = to_bytes(primitive, hexstr=hexstr, text=text)
return tron_keccak(input_bytes)
raise TypeError(
"You called keccak with first arg %r and keywords %r. You must call it with one of "
"these approaches: keccak(text='txt'), keccak(hexstr='0x747874'), "
"keccak(b'\\x74\\x78\\x74'), or keccak(0x747874)." % (
primitive,
{'text': text, 'hexstr': hexstr}
)
)
def is_connected(self):
"""List of available providers"""
return self.manager.is_connected()
| 31.77809 | 108 | 0.623442 |
from tronapi.common.datastructures import AttributeDict
from urllib.parse import urlencode
from eth_utils import (
apply_to_return_value,
to_hex,
keccak as tron_keccak,
)
from hexbytes import HexBytes
from trx_utils import (
to_sun,
from_sun,
is_integer,
add_0x_prefix,
remove_0x_prefix,
is_address
)
from tronapi.common.abi import map_abi_data
from tronapi.common.account import Address, PrivateKey, Account
from tronapi.common.normalizers import abi_resolver
from tronapi.common.encoding import (
to_bytes,
to_int,
to_text,
to_json,
hex_encode_abi_type
)
from tronapi.exceptions import (
InvalidTronError,
TronError
)
from tronapi.manager import TronManager
from tronapi import HttpProvider, constants
from tronapi.transactionbuilder import TransactionBuilder
from tronapi.trx import Trx
DEFAULT_MODULES = {
'trx': Trx
}
class Tron:
HTTPProvider = HttpProvider
_default_block = None
_private_key = None
_default_address = AttributeDict({})
toBytes = staticmethod(to_bytes)
toInt = staticmethod(to_int)
toHex = staticmethod(to_hex)
toText = staticmethod(to_text)
toJSON = staticmethod(to_json)
toSun = staticmethod(to_sun)
fromSun = staticmethod(from_sun)
isAddress = staticmethod(is_address)
def __init__(self, **kwargs):
kwargs.setdefault('full_node', constants.DEFAULT_NODES['full_node'])
kwargs.setdefault('solidity_node', constants.DEFAULT_NODES['solidity_node'])
kwargs.setdefault('event_server', constants.DEFAULT_NODES['event_server'])
self.manager = TronManager(self, dict(
full_node=kwargs.get('full_node'),
solidity_node=kwargs.get('solidity_node'),
event_server=kwargs.get('event_server')
))
if 'private_key' in kwargs:
self.private_key = kwargs.get('private_key')
if 'default_address' in kwargs:
self.default_address = kwargs.get('default_address')
modules = kwargs.setdefault('modules', DEFAULT_MODULES)
for module_name, module_class in modules.items():
module_class.attach(self, module_name)
self.transaction_builder = TransactionBuilder(self)
@property
def default_block(self):
return self._default_block
@default_block.setter
def default_block(self, block_id):
if block_id in ('latest', 'earliest', 0):
self._default_block = block_id
return
if not is_integer(block_id) or not block_id:
raise ValueError('Invalid block ID provided')
self._default_block = abs(block_id)
@property
def providers(self):
return self.manager.providers
@property
def private_key(self):
return self._private_key
@private_key.setter
def private_key(self, value: str) -> None:
try:
private_key = PrivateKey(value)
except ValueError:
raise TronError('Invalid private key provided')
self._private_key = str(private_key).lower()
@property
def default_address(self) -> AttributeDict:
return self._default_address
@default_address.setter
def default_address(self, address: str) -> None:
if not self.isAddress(address):
raise InvalidTronError('Invalid address provided')
_hex = self.address.to_hex(address)
_base58 = self.address.from_hex(address)
_private_base58 = self.address.from_private_key(self._private_key).base58
if self._private_key and _private_base58 != _base58:
self._private_key = None
self._default_address = AttributeDict({
'hex': _hex,
'base58': _base58
})
def get_event_result(self, **kwargs):
since_timestamp = kwargs.setdefault('since_timestamp', 0)
event_name = kwargs.setdefault('event_name', 'Notify')
block_number = kwargs.setdefault('block_number', '')
size = kwargs.setdefault('size', 20)
page = kwargs.setdefault('page', 1)
only_confirmed = kwargs.setdefault('only_confirmed', None)
only_unconfirmed = kwargs.setdefault('only_unconfirmed', None)
previous_last = kwargs.setdefault('previous_last_event_fingerprint', None)
contract_address = kwargs.setdefault('contract_address', self.default_address.hex)
if not self.isAddress(contract_address):
raise InvalidTronError('Invalid contract address provided')
if event_name and not contract_address:
raise TronError('Usage of event name filtering requires a contract address')
if block_number and event_name is None:
raise TronError('Usage of block number filtering requires an event name')
if not is_integer(page):
raise ValueError('Invalid size provided')
if not is_integer(since_timestamp):
raise ValueError('Invalid sinceTimestamp provided')
if size > 200:
raise ValueError('Defaulting to maximum accepted size: 200')
route_params = []
if contract_address:
route_params.append(contract_address)
if event_name:
route_params.append(event_name)
if block_number:
route_params.append(block_number)
route = '/'.join(route_params)
qs = {
'since': since_timestamp,
'page': page,
'size': size
}
if only_confirmed is not None:
qs.update({'onlyConfirmed': only_confirmed})
if only_unconfirmed is not None and not only_confirmed:
qs.update({'onlyUnconfirmed': only_unconfirmed})
if previous_last is not None:
qs.update({'previousLastEventFingerprint': previous_last})
return self.manager.request("/event/contract/{0}?{1}"
.format(route, urlencode(qs)), method='get')
def get_event_transaction_id(self, tx_id):
response = self.manager.request('/event/transaction/' + tx_id, method='get')
return response
@property
def address(self) -> Address:
return Address()
@property
def create_account(self) -> PrivateKey:
return Account.create()
@staticmethod
def is_valid_provider(provider) -> bool:
return isinstance(provider, HttpProvider)
def solidity_sha3(self, abi_types, values):
if len(abi_types) != len(values):
raise ValueError(
"Length mismatch between provided abi types and values. Got "
"{0} types and {1} values.".format(len(abi_types), len(values))
)
normalized_values = map_abi_data([abi_resolver()], abi_types, values)
hex_string = add_0x_prefix(''.join(
remove_0x_prefix(hex_encode_abi_type(abi_type, value))
for abi_type, value
in zip(abi_types, normalized_values)
))
return self.keccak(hexstr=hex_string)
@staticmethod
@apply_to_return_value(HexBytes)
def keccak(primitive=None, text=None, hexstr=None):
if isinstance(primitive, (bytes, int, type(None))):
input_bytes = to_bytes(primitive, hexstr=hexstr, text=text)
return tron_keccak(input_bytes)
raise TypeError(
"You called keccak with first arg %r and keywords %r. You must call it with one of "
"these approaches: keccak(text='txt'), keccak(hexstr='0x747874'), "
"keccak(b'\\x74\\x78\\x74'), or keccak(0x747874)." % (
primitive,
{'text': text, 'hexstr': hexstr}
)
)
def is_connected(self):
return self.manager.is_connected()
| true | true |
f7f72c248b7243f2a88b8f9e81a1db3360dec1e2 | 13,446 | py | Python | main.py | williamscott701/ETM-1 | 4a8f5b139df4567527b3ec99a27b310be6a65a6d | [
"MIT"
] | null | null | null | main.py | williamscott701/ETM-1 | 4a8f5b139df4567527b3ec99a27b310be6a65a6d | [
"MIT"
] | null | null | null | main.py | williamscott701/ETM-1 | 4a8f5b139df4567527b3ec99a27b310be6a65a6d | [
"MIT"
] | null | null | null | #/usr/bin/python
from __future__ import print_function
import argparse
import torch
import pickle
import numpy as np
import os
import math
import random
import sys
import matplotlib.pyplot as plt
import data
import scipy.io
from torch import nn, optim
from torch.nn import functional as F
from etm import ETM
from utils import nearest_neighbors, get_topic_coherence, get_topic_diversity
parser = argparse.ArgumentParser(description='The Embedded Topic Model')
### data and file related arguments
dataset ='ah20k'
data_path ='data/ah20k'
emb_path ='data/ah20k_embeddings.txt'
save_path ='./results'
batch_size =1000
### model-related arguments
num_topics =50
rho_size =300
emb_size =300
t_hidden_size =800
theta_act ='relu'
train_embeddings =0
### optimization-related arguments
lr lt=0.005
lr_factor lt=4.0
epochs =20
mode ='train'
optimizer ='adam'
seed =2019
enc_drop lt=0.0
clip lt=0.0
nonmono =10
wdecay lt=1.2e-6
anneal_lr =0
bow_norm =1
### evaluation, visualization, and logging-related arguments
num_words =10
log_interval =2
visualize_every =10
eval_batch_size =1000
load_from =''
tc =0
td =0
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('\n')
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
## get data
# 1. vocabulary
vocab, train, valid, test = data.get_data(os.path.join(args.data_path))
vocab_size = len(vocab)
args.vocab_size = vocab_size
# 1. training data
train_tokens = train['tokens']
train_counts = train['counts']
args.num_docs_train = len(train_tokens)
# 2. dev set
valid_tokens = valid['tokens']
valid_counts = valid['counts']
args.num_docs_valid = len(valid_tokens)
# 3. test data
test_tokens = test['tokens']
test_counts = test['counts']
args.num_docs_test = len(test_tokens)
test_1_tokens = test['tokens_1']
test_1_counts = test['counts_1']
args.num_docs_test_1 = len(test_1_tokens)
test_2_tokens = test['tokens_2']
test_2_counts = test['counts_2']
args.num_docs_test_2 = len(test_2_tokens)
embeddings = None
if not args.train_embeddings:
emb_path = args.emb_path
vect_path = os.path.join(args.data_path.split('/')[0], 'embeddings.pkl')
vectors = {}
with open(emb_path, 'rb') as f:
for l in f:
line = l.decode().split()
word = line[0]
if word in vocab:
vect = np.array(line[1:]).astype(np.float)
vectors[word] = vect
embeddings = np.zeros((vocab_size, args.emb_size))
words_found = 0
for i, word in enumerate(vocab):
try:
embeddings[i] = vectors[word]
words_found += 1
except KeyError:
embeddings[i] = np.random.normal(scale=0.6, size=(args.emb_size, ))
embeddings = torch.from_numpy(embeddings).to(device)
args.embeddings_dim = embeddings.size()
print('=*'*100)
print('Training an Embedded Topic Model on {} with the following settings: {}'.format(args.dataset.upper(), args))
print('=*'*100)
## define checkpoint
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
if args.mode == 'eval':
ckpt = args.load_from
else:
ckpt = os.path.join(args.save_path,
'etm_{}_K_{}_Htheta_{}_Optim_{}_Clip_{}_ThetaAct_{}_Lr_{}_Bsz_{}_RhoSize_{}_trainEmbeddings_{}'.format(
args.dataset, args.num_topics, args.t_hidden_size, args.optimizer, args.clip, args.theta_act,
args.lr, args.batch_size, args.rho_size, args.train_embeddings))
## define model and optimizer
model = ETM(args.num_topics, vocab_size, args.t_hidden_size, args.rho_size, args.emb_size,
args.theta_act, embeddings, args.train_embeddings, args.enc_drop).to(device)
print('model: {}'.format(model))
if args.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optimizer == 'adagrad':
optimizer = optim.Adagrad(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optimizer == 'adadelta':
optimizer = optim.Adadelta(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optimizer == 'rmsprop':
optimizer = optim.RMSprop(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optimizer == 'asgd':
optimizer = optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
print('Defaulting to vanilla SGD')
optimizer = optim.SGD(model.parameters(), lr=args.lr)
def train(epoch):
model.train()
acc_loss = 0
acc_kl_theta_loss = 0
cnt = 0
indices = torch.randperm(args.num_docs_train)
indices = torch.split(indices, args.batch_size)
for idx, ind in enumerate(indices):
optimizer.zero_grad()
model.zero_grad()
data_batch = data.get_batch(train_tokens, train_counts, ind, args.vocab_size, device)
sums = data_batch.sum(1).unsqueeze(1)
if args.bow_norm:
normalized_data_batch = data_batch / sums
else:
normalized_data_batch = data_batch
recon_loss, kld_theta = model(data_batch, normalized_data_batch)
total_loss = recon_loss + kld_theta
total_loss.backward()
if args.clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
acc_loss += torch.sum(recon_loss).item()
acc_kl_theta_loss += torch.sum(kld_theta).item()
cnt += 1
if idx % args.log_interval == 0 and idx > 0:
cur_loss = round(acc_loss / cnt, 2)
cur_kl_theta = round(acc_kl_theta_loss / cnt, 2)
cur_real_loss = round(cur_loss + cur_kl_theta, 2)
print('Epoch: {} .. batch: {}/{} .. LR: {} .. KL_theta: {} .. Rec_loss: {} .. NELBO: {}'.format(
epoch, idx, len(indices), optimizer.param_groups[0]['lr'], cur_kl_theta, cur_loss, cur_real_loss))
cur_loss = round(acc_loss / cnt, 2)
cur_kl_theta = round(acc_kl_theta_loss / cnt, 2)
cur_real_loss = round(cur_loss + cur_kl_theta, 2)
print('*'*100)
print('Epoch----->{} .. LR: {} .. KL_theta: {} .. Rec_loss: {} .. NELBO: {}'.format(
epoch, optimizer.param_groups[0]['lr'], cur_kl_theta, cur_loss, cur_real_loss))
print('*'*100)
def visualize(m, show_emb=True):
if not os.path.exists('./results'):
os.makedirs('./results')
m.eval()
queries = ['cleaner', 'refrigerate', 'tupperware', 'curry', 'baby', 'weather', 'buffet',
'ninja', 'fingernail']
## visualize topics using monte carlo
with torch.no_grad():
print('#'*100)
print('Visualize topics...')
topics_words = []
gammas = m.get_beta()
for k in range(args.num_topics):
gamma = gammas[k]
top_words = list(gamma.cpu().numpy().argsort()[-args.num_words+1:][::-1])
topic_words = [vocab[a] for a in top_words]
topics_words.append(' '.join(topic_words))
print('Topic {}: {}'.format(k, topic_words))
if show_emb:
## visualize word embeddings by using V to get nearest neighbors
print('#'*100)
print('Visualize word embeddings by using output embedding matrix')
try:
embeddings = m.rho.weight # Vocab_size x E
except:
embeddings = m.rho # Vocab_size x E
neighbors = []
for word in queries:
print('word: {} .. neighbors: {}'.format(
word, nearest_neighbors(word, embeddings, vocab)))
print('#'*100)
def evaluate(m, source, tc=False, td=False):
"""Compute perplexity on document completion.
"""
m.eval()
with torch.no_grad():
if source == 'val':
indices = torch.split(torch.tensor(range(args.num_docs_valid)), args.eval_batch_size)
tokens = valid_tokens
counts = valid_counts
else:
indices = torch.split(torch.tensor(range(args.num_docs_test)), args.eval_batch_size)
tokens = test_tokens
counts = test_counts
## get \beta here
beta = m.get_beta()
### do dc and tc here
acc_loss = 0
cnt = 0
indices_1 = torch.split(torch.tensor(range(args.num_docs_test_1)), args.eval_batch_size)
for idx, ind in enumerate(indices_1):
## get theta from first half of docs
data_batch_1 = data.get_batch(test_1_tokens, test_1_counts, ind, args.vocab_size, device)
sums_1 = data_batch_1.sum(1).unsqueeze(1)
if args.bow_norm:
normalized_data_batch_1 = data_batch_1 / sums_1
else:
normalized_data_batch_1 = data_batch_1
theta, _ = m.get_theta(normalized_data_batch_1)
## get prediction loss using second half
data_batch_2 = data.get_batch(test_2_tokens, test_2_counts, ind, args.vocab_size, device)
sums_2 = data_batch_2.sum(1).unsqueeze(1)
res = torch.mm(theta, beta)
preds = torch.log(res)
recon_loss = -(preds * data_batch_2).sum(1)
loss = recon_loss / sums_2.squeeze()
loss = loss.mean().item()
acc_loss += loss
cnt += 1
cur_loss = acc_loss / cnt
ppl_dc = round(math.exp(cur_loss), 1)
print('*'*100)
print('{} Doc Completion PPL: {}'.format(source.upper(), ppl_dc))
print('*'*100)
if tc or td:
beta = beta.data.cpu().numpy()
if tc:
print('Computing topic coherence...')
get_topic_coherence(beta, train_tokens, vocab)
if td:
print('Computing topic diversity...')
get_topic_diversity(beta, 25)
return ppl_dc
if args.mode == 'train':
## train model on data
best_epoch = 0
best_val_ppl = 1e9
all_val_ppls = []
print('\n')
print('Visualizing model quality before training...')
visualize(model)
print('\n')
for epoch in range(1, args.epochs):
train(epoch)
val_ppl = evaluate(model, 'val')
if val_ppl < best_val_ppl:
with open(ckpt, 'wb') as f:
torch.save(model, f)
best_epoch = epoch
best_val_ppl = val_ppl
else:
## check whether to anneal lr
lr = optimizer.param_groups[0]['lr']
if args.anneal_lr and (len(all_val_ppls) > args.nonmono and val_ppl > min(all_val_ppls[:-args.nonmono]) and lr > 1e-5):
optimizer.param_groups[0]['lr'] /= args.lr_factor
if epoch % args.visualize_every == 0:
visualize(model)
all_val_ppls.append(val_ppl)
with open(ckpt, 'rb') as f:
model = torch.load(f)
model = model.to(device)
val_ppl = evaluate(model, 'val')
else:
with open(ckpt, 'rb') as f:
model = torch.load(f)
model = model.to(device)
model.eval()
with torch.no_grad():
## get document completion perplexities
test_ppl = evaluate(model, 'test', tc=args.tc, td=args.td)
## get most used topics
indices = torch.tensor(range(args.num_docs_train))
indices = torch.split(indices, args.batch_size)
thetaAvg = torch.zeros(1, args.num_topics).to(device)
thetaWeightedAvg = torch.zeros(1, args.num_topics).to(device)
cnt = 0
for idx, ind in enumerate(indices):
data_batch = data.get_batch(train_tokens, train_counts, ind, args.vocab_size, device)
sums = data_batch.sum(1).unsqueeze(1)
cnt += sums.sum(0).squeeze().cpu().numpy()
if args.bow_norm:
normalized_data_batch = data_batch / sums
else:
normalized_data_batch = data_batch
theta, _ = model.get_theta(normalized_data_batch)
thetaAvg += theta.sum(0).unsqueeze(0) / args.num_docs_train
weighed_theta = sums * theta
thetaWeightedAvg += weighed_theta.sum(0).unsqueeze(0)
if idx % 100 == 0 and idx > 0:
print('batch: {}/{}'.format(idx, len(indices)))
thetaWeightedAvg = thetaWeightedAvg.squeeze().cpu().numpy() / cnt
print('\nThe 10 most used topics are {}'.format(thetaWeightedAvg.argsort()[::-1][:10]))
## show topics
beta = model.get_beta()
topic_indices = list(np.random.choice(args.num_topics, 10)) # 10 random topics
print('\n')
for k in range(args.num_topics):#topic_indices:
gamma = beta[k]
top_words = list(gamma.cpu().numpy().argsort()[-args.num_words+1:][::-1])
topic_words = [vocab[a] for a in top_words]
print('Topic {}: {}'.format(k, topic_words))
if args.train_embeddings:
## show etm embeddings
try:
rho_etm = model.rho.weight.cpu()
except:
rho_etm = model.rho.cpu()
queries = ['cleaner', 'refrigerate', 'tupperware', 'curry', 'baby', 'weather', 'buffet',
'ninja', 'fingernail']
print('\n')
print('ETM embeddings...')
for word in queries:
print('word: {} .. etm neighbors: {}'.format(word, nearest_neighbors(word, rho_etm, vocab)))
print('\n')
| 35.291339 | 131 | 0.615722 |
from __future__ import print_function
import argparse
import torch
import pickle
import numpy as np
import os
import math
import random
import sys
import matplotlib.pyplot as plt
import data
import scipy.io
from torch import nn, optim
from torch.nn import functional as F
from etm import ETM
from utils import nearest_neighbors, get_topic_coherence, get_topic_diversity
parser = argparse.ArgumentParser(description='The Embedded Topic Model')
eddings.txt'
save_path ='./results'
batch_size =1000
n_size =800
theta_act ='relu'
train_embeddings =0
='adam'
seed =2019
enc_drop lt=0.0
clip lt=0.0
nonmono =10
wdecay lt=1.2e-6
anneal_lr =0
bow_norm =1
se_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('\n')
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
rain, valid, test = data.get_data(os.path.join(args.data_path))
vocab_size = len(vocab)
args.vocab_size = vocab_size
train_tokens = train['tokens']
train_counts = train['counts']
args.num_docs_train = len(train_tokens)
valid_tokens = valid['tokens']
valid_counts = valid['counts']
args.num_docs_valid = len(valid_tokens)
test_tokens = test['tokens']
test_counts = test['counts']
args.num_docs_test = len(test_tokens)
test_1_tokens = test['tokens_1']
test_1_counts = test['counts_1']
args.num_docs_test_1 = len(test_1_tokens)
test_2_tokens = test['tokens_2']
test_2_counts = test['counts_2']
args.num_docs_test_2 = len(test_2_tokens)
embeddings = None
if not args.train_embeddings:
emb_path = args.emb_path
vect_path = os.path.join(args.data_path.split('/')[0], 'embeddings.pkl')
vectors = {}
with open(emb_path, 'rb') as f:
for l in f:
line = l.decode().split()
word = line[0]
if word in vocab:
vect = np.array(line[1:]).astype(np.float)
vectors[word] = vect
embeddings = np.zeros((vocab_size, args.emb_size))
words_found = 0
for i, word in enumerate(vocab):
try:
embeddings[i] = vectors[word]
words_found += 1
except KeyError:
embeddings[i] = np.random.normal(scale=0.6, size=(args.emb_size, ))
embeddings = torch.from_numpy(embeddings).to(device)
args.embeddings_dim = embeddings.size()
print('=*'*100)
print('Training an Embedded Topic Model on {} with the following settings: {}'.format(args.dataset.upper(), args))
print('=*'*100)
sts(args.save_path):
os.makedirs(args.save_path)
if args.mode == 'eval':
ckpt = args.load_from
else:
ckpt = os.path.join(args.save_path,
'etm_{}_K_{}_Htheta_{}_Optim_{}_Clip_{}_ThetaAct_{}_Lr_{}_Bsz_{}_RhoSize_{}_trainEmbeddings_{}'.format(
args.dataset, args.num_topics, args.t_hidden_size, args.optimizer, args.clip, args.theta_act,
args.lr, args.batch_size, args.rho_size, args.train_embeddings))
, vocab_size, args.t_hidden_size, args.rho_size, args.emb_size,
args.theta_act, embeddings, args.train_embeddings, args.enc_drop).to(device)
print('model: {}'.format(model))
if args.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optimizer == 'adagrad':
optimizer = optim.Adagrad(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optimizer == 'adadelta':
optimizer = optim.Adadelta(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optimizer == 'rmsprop':
optimizer = optim.RMSprop(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optimizer == 'asgd':
optimizer = optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
print('Defaulting to vanilla SGD')
optimizer = optim.SGD(model.parameters(), lr=args.lr)
def train(epoch):
model.train()
acc_loss = 0
acc_kl_theta_loss = 0
cnt = 0
indices = torch.randperm(args.num_docs_train)
indices = torch.split(indices, args.batch_size)
for idx, ind in enumerate(indices):
optimizer.zero_grad()
model.zero_grad()
data_batch = data.get_batch(train_tokens, train_counts, ind, args.vocab_size, device)
sums = data_batch.sum(1).unsqueeze(1)
if args.bow_norm:
normalized_data_batch = data_batch / sums
else:
normalized_data_batch = data_batch
recon_loss, kld_theta = model(data_batch, normalized_data_batch)
total_loss = recon_loss + kld_theta
total_loss.backward()
if args.clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
acc_loss += torch.sum(recon_loss).item()
acc_kl_theta_loss += torch.sum(kld_theta).item()
cnt += 1
if idx % args.log_interval == 0 and idx > 0:
cur_loss = round(acc_loss / cnt, 2)
cur_kl_theta = round(acc_kl_theta_loss / cnt, 2)
cur_real_loss = round(cur_loss + cur_kl_theta, 2)
print('Epoch: {} .. batch: {}/{} .. LR: {} .. KL_theta: {} .. Rec_loss: {} .. NELBO: {}'.format(
epoch, idx, len(indices), optimizer.param_groups[0]['lr'], cur_kl_theta, cur_loss, cur_real_loss))
cur_loss = round(acc_loss / cnt, 2)
cur_kl_theta = round(acc_kl_theta_loss / cnt, 2)
cur_real_loss = round(cur_loss + cur_kl_theta, 2)
print('*'*100)
print('Epoch----->{} .. LR: {} .. KL_theta: {} .. Rec_loss: {} .. NELBO: {}'.format(
epoch, optimizer.param_groups[0]['lr'], cur_kl_theta, cur_loss, cur_real_loss))
print('*'*100)
def visualize(m, show_emb=True):
if not os.path.exists('./results'):
os.makedirs('./results')
m.eval()
queries = ['cleaner', 'refrigerate', 'tupperware', 'curry', 'baby', 'weather', 'buffet',
'ninja', 'fingernail']
rint('#'*100)
print('Visualize topics...')
topics_words = []
gammas = m.get_beta()
for k in range(args.num_topics):
gamma = gammas[k]
top_words = list(gamma.cpu().numpy().argsort()[-args.num_words+1:][::-1])
topic_words = [vocab[a] for a in top_words]
topics_words.append(' '.join(topic_words))
print('Topic {}: {}'.format(k, topic_words))
if show_emb:
mbeddings by using output embedding matrix')
try:
embeddings = m.rho.weight
except:
embeddings = m.rho
neighbors = []
for word in queries:
print('word: {} .. neighbors: {}'.format(
word, nearest_neighbors(word, embeddings, vocab)))
print('#'*100)
def evaluate(m, source, tc=False, td=False):
"""Compute perplexity on document completion.
"""
m.eval()
with torch.no_grad():
if source == 'val':
indices = torch.split(torch.tensor(range(args.num_docs_valid)), args.eval_batch_size)
tokens = valid_tokens
counts = valid_counts
else:
indices = torch.split(torch.tensor(range(args.num_docs_test)), args.eval_batch_size)
tokens = test_tokens
counts = test_counts
m.get_beta()
indices_1 = torch.split(torch.tensor(range(args.num_docs_test_1)), args.eval_batch_size)
for idx, ind in enumerate(indices_1):
t_batch(test_1_tokens, test_1_counts, ind, args.vocab_size, device)
sums_1 = data_batch_1.sum(1).unsqueeze(1)
if args.bow_norm:
normalized_data_batch_1 = data_batch_1 / sums_1
else:
normalized_data_batch_1 = data_batch_1
theta, _ = m.get_theta(normalized_data_batch_1)
tch(test_2_tokens, test_2_counts, ind, args.vocab_size, device)
sums_2 = data_batch_2.sum(1).unsqueeze(1)
res = torch.mm(theta, beta)
preds = torch.log(res)
recon_loss = -(preds * data_batch_2).sum(1)
loss = recon_loss / sums_2.squeeze()
loss = loss.mean().item()
acc_loss += loss
cnt += 1
cur_loss = acc_loss / cnt
ppl_dc = round(math.exp(cur_loss), 1)
print('*'*100)
print('{} Doc Completion PPL: {}'.format(source.upper(), ppl_dc))
print('*'*100)
if tc or td:
beta = beta.data.cpu().numpy()
if tc:
print('Computing topic coherence...')
get_topic_coherence(beta, train_tokens, vocab)
if td:
print('Computing topic diversity...')
get_topic_diversity(beta, 25)
return ppl_dc
if args.mode == 'train':
best_val_ppl = 1e9
all_val_ppls = []
print('\n')
print('Visualizing model quality before training...')
visualize(model)
print('\n')
for epoch in range(1, args.epochs):
train(epoch)
val_ppl = evaluate(model, 'val')
if val_ppl < best_val_ppl:
with open(ckpt, 'wb') as f:
torch.save(model, f)
best_epoch = epoch
best_val_ppl = val_ppl
else:
param_groups[0]['lr']
if args.anneal_lr and (len(all_val_ppls) > args.nonmono and val_ppl > min(all_val_ppls[:-args.nonmono]) and lr > 1e-5):
optimizer.param_groups[0]['lr'] /= args.lr_factor
if epoch % args.visualize_every == 0:
visualize(model)
all_val_ppls.append(val_ppl)
with open(ckpt, 'rb') as f:
model = torch.load(f)
model = model.to(device)
val_ppl = evaluate(model, 'val')
else:
with open(ckpt, 'rb') as f:
model = torch.load(f)
model = model.to(device)
model.eval()
with torch.no_grad():
est', tc=args.tc, td=args.td)
ch.tensor(range(args.num_docs_train))
indices = torch.split(indices, args.batch_size)
thetaAvg = torch.zeros(1, args.num_topics).to(device)
thetaWeightedAvg = torch.zeros(1, args.num_topics).to(device)
cnt = 0
for idx, ind in enumerate(indices):
data_batch = data.get_batch(train_tokens, train_counts, ind, args.vocab_size, device)
sums = data_batch.sum(1).unsqueeze(1)
cnt += sums.sum(0).squeeze().cpu().numpy()
if args.bow_norm:
normalized_data_batch = data_batch / sums
else:
normalized_data_batch = data_batch
theta, _ = model.get_theta(normalized_data_batch)
thetaAvg += theta.sum(0).unsqueeze(0) / args.num_docs_train
weighed_theta = sums * theta
thetaWeightedAvg += weighed_theta.sum(0).unsqueeze(0)
if idx % 100 == 0 and idx > 0:
print('batch: {}/{}'.format(idx, len(indices)))
thetaWeightedAvg = thetaWeightedAvg.squeeze().cpu().numpy() / cnt
print('\nThe 10 most used topics are {}'.format(thetaWeightedAvg.argsort()[::-1][:10]))
= model.get_beta()
topic_indices = list(np.random.choice(args.num_topics, 10))
print('\n')
for k in range(args.num_topics):
gamma = beta[k]
top_words = list(gamma.cpu().numpy().argsort()[-args.num_words+1:][::-1])
topic_words = [vocab[a] for a in top_words]
print('Topic {}: {}'.format(k, topic_words))
if args.train_embeddings:
rho_etm = model.rho.weight.cpu()
except:
rho_etm = model.rho.cpu()
queries = ['cleaner', 'refrigerate', 'tupperware', 'curry', 'baby', 'weather', 'buffet',
'ninja', 'fingernail']
print('\n')
print('ETM embeddings...')
for word in queries:
print('word: {} .. etm neighbors: {}'.format(word, nearest_neighbors(word, rho_etm, vocab)))
print('\n')
| false | true |
f7f72d01bf1c5e7a126de3643ca43a450f7ce40c | 8,590 | py | Python | tests/test_scanner.py | eigenein/python-as3 | 323b58fd19359842332a1b045857f793cd124aa3 | [
"MIT"
] | 1 | 2019-05-27T11:25:18.000Z | 2019-05-27T11:25:18.000Z | tests/test_scanner.py | eigenein/python-as3 | 323b58fd19359842332a1b045857f793cd124aa3 | [
"MIT"
] | null | null | null | tests/test_scanner.py | eigenein/python-as3 | 323b58fd19359842332a1b045857f793cd124aa3 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import List
from pytest import mark, param
from as3.enums import TokenType
from as3.scanner import Token, scan
def make_test_params(value: str, type_: TokenType, xfail: bool = False):
marks = mark.xfail(strict=True) if xfail else ()
return param(value, Token(type_=type_, value=value, line_number=1, position=1), marks=marks)
def test_empty():
assert list(scan('')) == []
@mark.parametrize('source, expected', [
# Positive cases.
make_test_params('just_some_identifier', TokenType.IDENTIFIER),
make_test_params('package', TokenType.PACKAGE),
make_test_params('{', TokenType.CURLY_BRACKET_OPEN),
make_test_params('(', TokenType.PARENTHESIS_OPEN),
make_test_params('[', TokenType.BRACKET_OPEN),
make_test_params(';', TokenType.SEMICOLON),
make_test_params(':', TokenType.COLON),
make_test_params('+', TokenType.PLUS),
make_test_params('-', TokenType.MINUS),
make_test_params('<', TokenType.LESS),
make_test_params('<<', TokenType.LEFT_SHIFT),
make_test_params('+=', TokenType.ASSIGN_ADD),
make_test_params('/', TokenType.DIVIDE),
make_test_params('*', TokenType.MULTIPLY),
make_test_params('!', TokenType.LOGICAL_NOT),
make_test_params('!=', TokenType.NOT_EQUALS),
make_test_params('>>', TokenType.RIGHT_SHIFT),
make_test_params('==', TokenType.EQUALS),
make_test_params('++', TokenType.INCREMENT),
make_test_params('--', TokenType.DECREMENT),
make_test_params('.', TokenType.DOT),
make_test_params('/* ololo */', TokenType.COMMENT),
make_test_params('// abc', TokenType.COMMENT),
make_test_params('"string"', TokenType.STRING),
make_test_params("'string'", TokenType.STRING),
make_test_params(r'"string\n"', TokenType.STRING),
make_test_params(r'"string\""', TokenType.STRING),
make_test_params(r"'string\''", TokenType.STRING),
make_test_params('<=', TokenType.LESS_OR_EQUAL),
make_test_params('>', TokenType.GREATER),
make_test_params('>=', TokenType.GREATER_OR_EQUAL),
make_test_params('===', TokenType.STRICTLY_EQUAL),
make_test_params('||', TokenType.LOGICAL_OR),
make_test_params('&&', TokenType.LOGICAL_AND),
make_test_params('new', TokenType.NEW),
make_test_params('42', TokenType.NUMBER),
make_test_params('0xABCDEF', TokenType.NUMBER),
make_test_params('0777', TokenType.NUMBER),
make_test_params('0.75', TokenType.NUMBER),
make_test_params('1.', TokenType.NUMBER),
make_test_params('.9', TokenType.NUMBER),
make_test_params('1e-10', TokenType.NUMBER),
make_test_params('?', TokenType.QUESTION_MARK),
make_test_params('§§push', TokenType.IDENTIFIER),
make_test_params('§§pop', TokenType.IDENTIFIER),
make_test_params('/*\n * * */', TokenType.COMMENT),
make_test_params('in', TokenType.IN),
make_test_params('implements', TokenType.IMPLEMENTS),
make_test_params('^', TokenType.BITWISE_XOR),
make_test_params('%', TokenType.PERCENT),
# Expected failures.
make_test_params('>>>', TokenType.UNSIGNED_RIGHT_SHIFT, True),
])
def test_single_token(source: str, expected: Token):
tokens = list(scan(source))
assert len(tokens) == 1
assert tokens[0] == expected
@mark.parametrize('source, expected', [
(
'a = 42;',
[
Token(type_=TokenType.IDENTIFIER, value='a', line_number=1, position=1),
Token(type_=TokenType.ASSIGN, value='=', line_number=1, position=3),
Token(type_=TokenType.NUMBER, value='42', line_number=1, position=5),
Token(type_=TokenType.SEMICOLON, value=';', line_number=1, position=7),
],
),
(
'a = /* what? */ 42',
[
Token(type_=TokenType.IDENTIFIER, value='a', line_number=1, position=1),
Token(type_=TokenType.ASSIGN, value='=', line_number=1, position=3),
Token(type_=TokenType.COMMENT, value='/* what? */', line_number=1, position=5),
Token(type_=TokenType.NUMBER, value='42', line_number=1, position=17),
],
),
(
' \n/**/',
[
Token(type_=TokenType.COMMENT, value='/**/', line_number=2, position=1),
],
),
])
def test_multiple_tokens(source: str, expected: List[Token]):
assert list(scan(source)) == expected
def test_scanner_get_elemental_penetration():
text = '''
public static function getElementalPenetration(param1:Number, param2:Number) : int
{
if(param2 < 0)
{
param2 = 0;
}
return int(param1 / (Number(1 + param2 / 300000)));
}
'''
assert list(scan(text)) == [
Token(type_=TokenType.PUBLIC, value='public', line_number=2, position=9),
Token(type_=TokenType.STATIC, value='static', line_number=2, position=16),
Token(type_=TokenType.FUNCTION, value='function', line_number=2, position=23),
Token(type_=TokenType.IDENTIFIER, value='getElementalPenetration', line_number=2, position=32),
Token(type_=TokenType.PARENTHESIS_OPEN, value='(', line_number=2, position=55),
Token(type_=TokenType.IDENTIFIER, value='param1', line_number=2, position=56),
Token(type_=TokenType.COLON, value=':', line_number=2, position=62),
Token(type_=TokenType.IDENTIFIER, value='Number', line_number=2, position=63),
Token(type_=TokenType.COMMA, value=',', line_number=2, position=69),
Token(type_=TokenType.IDENTIFIER, value='param2', line_number=2, position=71),
Token(type_=TokenType.COLON, value=':', line_number=2, position=77),
Token(type_=TokenType.IDENTIFIER, value='Number', line_number=2, position=78),
Token(type_=TokenType.PARENTHESIS_CLOSE, value=')', line_number=2, position=84),
Token(type_=TokenType.COLON, value=':', line_number=2, position=86),
Token(type_=TokenType.IDENTIFIER, value='int', line_number=2, position=88),
Token(type_=TokenType.CURLY_BRACKET_OPEN, value='{', line_number=3, position=9),
Token(type_=TokenType.IF, value='if', line_number=4, position=13),
Token(type_=TokenType.PARENTHESIS_OPEN, value='(', line_number=4, position=15),
Token(type_=TokenType.IDENTIFIER, value='param2', line_number=4, position=16),
Token(type_=TokenType.LESS, value='<', line_number=4, position=23),
Token(type_=TokenType.NUMBER, value='0', line_number=4, position=25),
Token(type_=TokenType.PARENTHESIS_CLOSE, value=')', line_number=4, position=26),
Token(type_=TokenType.CURLY_BRACKET_OPEN, value='{', line_number=5, position=13),
Token(type_=TokenType.IDENTIFIER, value='param2', line_number=6, position=17),
Token(type_=TokenType.ASSIGN, value='=', line_number=6, position=24),
Token(type_=TokenType.NUMBER, value='0', line_number=6, position=26),
Token(type_=TokenType.SEMICOLON, value=';', line_number=6, position=27),
Token(type_=TokenType.CURLY_BRACKET_CLOSE, value='}', line_number=7, position=13),
Token(type_=TokenType.RETURN, value='return', line_number=8, position=13),
Token(type_=TokenType.IDENTIFIER, value='int', line_number=8, position=20),
Token(type_=TokenType.PARENTHESIS_OPEN, value='(', line_number=8, position=23),
Token(type_=TokenType.IDENTIFIER, value='param1', line_number=8, position=24),
Token(type_=TokenType.DIVIDE, value='/', line_number=8, position=31),
Token(type_=TokenType.PARENTHESIS_OPEN, value='(', line_number=8, position=33),
Token(type_=TokenType.IDENTIFIER, value='Number', line_number=8, position=34),
Token(type_=TokenType.PARENTHESIS_OPEN, value='(', line_number=8, position=40),
Token(type_=TokenType.NUMBER, value='1', line_number=8, position=41),
Token(type_=TokenType.PLUS, value='+', line_number=8, position=43),
Token(type_=TokenType.IDENTIFIER, value='param2', line_number=8, position=45),
Token(type_=TokenType.DIVIDE, value='/', line_number=8, position=52),
Token(type_=TokenType.NUMBER, value='300000', line_number=8, position=54),
Token(type_=TokenType.PARENTHESIS_CLOSE, value=')', line_number=8, position=60),
Token(type_=TokenType.PARENTHESIS_CLOSE, value=')', line_number=8, position=61),
Token(type_=TokenType.PARENTHESIS_CLOSE, value=')', line_number=8, position=62),
Token(type_=TokenType.SEMICOLON, value=';', line_number=8, position=63),
Token(type_=TokenType.CURLY_BRACKET_CLOSE, value='}', line_number=9, position=9),
]
| 48.258427 | 103 | 0.674738 | from __future__ import annotations
from typing import List
from pytest import mark, param
from as3.enums import TokenType
from as3.scanner import Token, scan
def make_test_params(value: str, type_: TokenType, xfail: bool = False):
marks = mark.xfail(strict=True) if xfail else ()
return param(value, Token(type_=type_, value=value, line_number=1, position=1), marks=marks)
def test_empty():
assert list(scan('')) == []
@mark.parametrize('source, expected', [
make_test_params('just_some_identifier', TokenType.IDENTIFIER),
make_test_params('package', TokenType.PACKAGE),
make_test_params('{', TokenType.CURLY_BRACKET_OPEN),
make_test_params('(', TokenType.PARENTHESIS_OPEN),
make_test_params('[', TokenType.BRACKET_OPEN),
make_test_params(';', TokenType.SEMICOLON),
make_test_params(':', TokenType.COLON),
make_test_params('+', TokenType.PLUS),
make_test_params('-', TokenType.MINUS),
make_test_params('<', TokenType.LESS),
make_test_params('<<', TokenType.LEFT_SHIFT),
make_test_params('+=', TokenType.ASSIGN_ADD),
make_test_params('/', TokenType.DIVIDE),
make_test_params('*', TokenType.MULTIPLY),
make_test_params('!', TokenType.LOGICAL_NOT),
make_test_params('!=', TokenType.NOT_EQUALS),
make_test_params('>>', TokenType.RIGHT_SHIFT),
make_test_params('==', TokenType.EQUALS),
make_test_params('++', TokenType.INCREMENT),
make_test_params('--', TokenType.DECREMENT),
make_test_params('.', TokenType.DOT),
make_test_params('/* ololo */', TokenType.COMMENT),
make_test_params('// abc', TokenType.COMMENT),
make_test_params('"string"', TokenType.STRING),
make_test_params("'string'", TokenType.STRING),
make_test_params(r'"string\n"', TokenType.STRING),
make_test_params(r'"string\""', TokenType.STRING),
make_test_params(r"'string\''", TokenType.STRING),
make_test_params('<=', TokenType.LESS_OR_EQUAL),
make_test_params('>', TokenType.GREATER),
make_test_params('>=', TokenType.GREATER_OR_EQUAL),
make_test_params('===', TokenType.STRICTLY_EQUAL),
make_test_params('||', TokenType.LOGICAL_OR),
make_test_params('&&', TokenType.LOGICAL_AND),
make_test_params('new', TokenType.NEW),
make_test_params('42', TokenType.NUMBER),
make_test_params('0xABCDEF', TokenType.NUMBER),
make_test_params('0777', TokenType.NUMBER),
make_test_params('0.75', TokenType.NUMBER),
make_test_params('1.', TokenType.NUMBER),
make_test_params('.9', TokenType.NUMBER),
make_test_params('1e-10', TokenType.NUMBER),
make_test_params('?', TokenType.QUESTION_MARK),
make_test_params('§§push', TokenType.IDENTIFIER),
make_test_params('§§pop', TokenType.IDENTIFIER),
make_test_params('/*\n * * */', TokenType.COMMENT),
make_test_params('in', TokenType.IN),
make_test_params('implements', TokenType.IMPLEMENTS),
make_test_params('^', TokenType.BITWISE_XOR),
make_test_params('%', TokenType.PERCENT),
# Expected failures.
make_test_params('>>>', TokenType.UNSIGNED_RIGHT_SHIFT, True),
])
def test_single_token(source: str, expected: Token):
tokens = list(scan(source))
assert len(tokens) == 1
assert tokens[0] == expected
@mark.parametrize('source, expected', [
(
'a = 42;',
[
Token(type_=TokenType.IDENTIFIER, value='a', line_number=1, position=1),
Token(type_=TokenType.ASSIGN, value='=', line_number=1, position=3),
Token(type_=TokenType.NUMBER, value='42', line_number=1, position=5),
Token(type_=TokenType.SEMICOLON, value=';', line_number=1, position=7),
],
),
(
'a = /* what? */ 42',
[
Token(type_=TokenType.IDENTIFIER, value='a', line_number=1, position=1),
Token(type_=TokenType.ASSIGN, value='=', line_number=1, position=3),
Token(type_=TokenType.COMMENT, value='/* what? */', line_number=1, position=5),
Token(type_=TokenType.NUMBER, value='42', line_number=1, position=17),
],
),
(
' \n/**/',
[
Token(type_=TokenType.COMMENT, value='/**/', line_number=2, position=1),
],
),
])
def test_multiple_tokens(source: str, expected: List[Token]):
assert list(scan(source)) == expected
def test_scanner_get_elemental_penetration():
text = '''
public static function getElementalPenetration(param1:Number, param2:Number) : int
{
if(param2 < 0)
{
param2 = 0;
}
return int(param1 / (Number(1 + param2 / 300000)));
}
'''
assert list(scan(text)) == [
Token(type_=TokenType.PUBLIC, value='public', line_number=2, position=9),
Token(type_=TokenType.STATIC, value='static', line_number=2, position=16),
Token(type_=TokenType.FUNCTION, value='function', line_number=2, position=23),
Token(type_=TokenType.IDENTIFIER, value='getElementalPenetration', line_number=2, position=32),
Token(type_=TokenType.PARENTHESIS_OPEN, value='(', line_number=2, position=55),
Token(type_=TokenType.IDENTIFIER, value='param1', line_number=2, position=56),
Token(type_=TokenType.COLON, value=':', line_number=2, position=62),
Token(type_=TokenType.IDENTIFIER, value='Number', line_number=2, position=63),
Token(type_=TokenType.COMMA, value=',', line_number=2, position=69),
Token(type_=TokenType.IDENTIFIER, value='param2', line_number=2, position=71),
Token(type_=TokenType.COLON, value=':', line_number=2, position=77),
Token(type_=TokenType.IDENTIFIER, value='Number', line_number=2, position=78),
Token(type_=TokenType.PARENTHESIS_CLOSE, value=')', line_number=2, position=84),
Token(type_=TokenType.COLON, value=':', line_number=2, position=86),
Token(type_=TokenType.IDENTIFIER, value='int', line_number=2, position=88),
Token(type_=TokenType.CURLY_BRACKET_OPEN, value='{', line_number=3, position=9),
Token(type_=TokenType.IF, value='if', line_number=4, position=13),
Token(type_=TokenType.PARENTHESIS_OPEN, value='(', line_number=4, position=15),
Token(type_=TokenType.IDENTIFIER, value='param2', line_number=4, position=16),
Token(type_=TokenType.LESS, value='<', line_number=4, position=23),
Token(type_=TokenType.NUMBER, value='0', line_number=4, position=25),
Token(type_=TokenType.PARENTHESIS_CLOSE, value=')', line_number=4, position=26),
Token(type_=TokenType.CURLY_BRACKET_OPEN, value='{', line_number=5, position=13),
Token(type_=TokenType.IDENTIFIER, value='param2', line_number=6, position=17),
Token(type_=TokenType.ASSIGN, value='=', line_number=6, position=24),
Token(type_=TokenType.NUMBER, value='0', line_number=6, position=26),
Token(type_=TokenType.SEMICOLON, value=';', line_number=6, position=27),
Token(type_=TokenType.CURLY_BRACKET_CLOSE, value='}', line_number=7, position=13),
Token(type_=TokenType.RETURN, value='return', line_number=8, position=13),
Token(type_=TokenType.IDENTIFIER, value='int', line_number=8, position=20),
Token(type_=TokenType.PARENTHESIS_OPEN, value='(', line_number=8, position=23),
Token(type_=TokenType.IDENTIFIER, value='param1', line_number=8, position=24),
Token(type_=TokenType.DIVIDE, value='/', line_number=8, position=31),
Token(type_=TokenType.PARENTHESIS_OPEN, value='(', line_number=8, position=33),
Token(type_=TokenType.IDENTIFIER, value='Number', line_number=8, position=34),
Token(type_=TokenType.PARENTHESIS_OPEN, value='(', line_number=8, position=40),
Token(type_=TokenType.NUMBER, value='1', line_number=8, position=41),
Token(type_=TokenType.PLUS, value='+', line_number=8, position=43),
Token(type_=TokenType.IDENTIFIER, value='param2', line_number=8, position=45),
Token(type_=TokenType.DIVIDE, value='/', line_number=8, position=52),
Token(type_=TokenType.NUMBER, value='300000', line_number=8, position=54),
Token(type_=TokenType.PARENTHESIS_CLOSE, value=')', line_number=8, position=60),
Token(type_=TokenType.PARENTHESIS_CLOSE, value=')', line_number=8, position=61),
Token(type_=TokenType.PARENTHESIS_CLOSE, value=')', line_number=8, position=62),
Token(type_=TokenType.SEMICOLON, value=';', line_number=8, position=63),
Token(type_=TokenType.CURLY_BRACKET_CLOSE, value='}', line_number=9, position=9),
]
| true | true |
f7f72d4ff545d7ee1d873925fa199a288cf5a5ef | 9,521 | py | Python | bin/validator/transitfeed/stoptime.py | conveyal/otp-deployer | 4628a1cdcc68a0477ef11e77253afcb649b99075 | [
"MIT"
] | 1 | 2017-03-21T15:31:11.000Z | 2017-03-21T15:31:11.000Z | bin/validator/transitfeed/stoptime.py | conveyal/otp-deployer | 4628a1cdcc68a0477ef11e77253afcb649b99075 | [
"MIT"
] | null | null | null | bin/validator/transitfeed/stoptime.py | conveyal/otp-deployer | 4628a1cdcc68a0477ef11e77253afcb649b99075 | [
"MIT"
] | null | null | null | #!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import problems as problems_module
from stop import Stop
import util
class StopTime(object):
"""
Represents a single stop of a trip. StopTime contains most of the columns
from the stop_times.txt file. It does not contain trip_id, which is implied
by the Trip used to access it.
See the Google Transit Feed Specification for the semantic details.
stop: A Stop object
arrival_time: str in the form HH:MM:SS; readonly after __init__
departure_time: str in the form HH:MM:SS; readonly after __init__
arrival_secs: int number of seconds since midnight
departure_secs: int number of seconds since midnight
stop_headsign: str
pickup_type: int
drop_off_type: int
shape_dist_traveled: float
stop_id: str; readonly
stop_time: The only time given for this stop. If present, it is used
for both arrival and departure time.
stop_sequence: int
"""
_REQUIRED_FIELD_NAMES = ['trip_id', 'arrival_time', 'departure_time',
'stop_id', 'stop_sequence']
_OPTIONAL_FIELD_NAMES = ['stop_headsign', 'pickup_type',
'drop_off_type', 'shape_dist_traveled']
_FIELD_NAMES = _REQUIRED_FIELD_NAMES + _OPTIONAL_FIELD_NAMES
_DEPRECATED_FIELD_NAMES = []
_SQL_FIELD_NAMES = ['trip_id', 'arrival_secs', 'departure_secs',
'stop_id', 'stop_sequence', 'stop_headsign',
'pickup_type', 'drop_off_type', 'shape_dist_traveled']
_STOP_CLASS = Stop
__slots__ = ('arrival_secs', 'departure_secs', 'stop_headsign', 'stop',
'stop_headsign', 'pickup_type', 'drop_off_type',
'shape_dist_traveled', 'stop_sequence')
def __init__(self, problems, stop,
arrival_time=None, departure_time=None,
stop_headsign=None, pickup_type=None, drop_off_type=None,
shape_dist_traveled=None, arrival_secs=None,
departure_secs=None, stop_time=None, stop_sequence=None):
# Implementation note from Andre, July 22, 2010:
# The checks performed here should be in their own Validate* methods to
# keep consistency. Unfortunately the performance degradation is too great,
# so the validation was left in __init__.
# Performance is also the reason why we don't use the GtfsFactory, but
# have StopTime._STOP_CLASS instead. If a Stop class that does not inherit
# from transitfeed.Stop is used, the extension should also provide a
# StopTime class that updates _STOP_CLASS accordingly.
#
# For more details see the discussion at
# http://codereview.appspot.com/1713041
if stop_time != None:
arrival_time = departure_time = stop_time
if arrival_secs != None:
self.arrival_secs = arrival_secs
elif arrival_time in (None, ""):
self.arrival_secs = None # Untimed
arrival_time = None
else:
try:
self.arrival_secs = util.TimeToSecondsSinceMidnight(arrival_time)
except problems_module.Error:
problems.InvalidValue('arrival_time', arrival_time)
self.arrival_secs = None
if departure_secs != None:
self.departure_secs = departure_secs
elif departure_time in (None, ""):
self.departure_secs = None
departure_time = None
else:
try:
self.departure_secs = util.TimeToSecondsSinceMidnight(departure_time)
except problems_module.Error:
problems.InvalidValue('departure_time', departure_time)
self.departure_secs = None
if not isinstance(stop, self._STOP_CLASS):
# Not quite correct, but better than letting the problem propagate
problems.InvalidValue('stop', stop)
self.stop = stop
self.stop_headsign = stop_headsign
if pickup_type in (None, ""):
self.pickup_type = None
else:
try:
pickup_type = int(pickup_type)
except ValueError:
problems.InvalidValue('pickup_type', pickup_type)
else:
if pickup_type < 0 or pickup_type > 3:
problems.InvalidValue('pickup_type', pickup_type)
self.pickup_type = pickup_type
if drop_off_type in (None, ""):
self.drop_off_type = None
else:
try:
drop_off_type = int(drop_off_type)
except ValueError:
problems.InvalidValue('drop_off_type', drop_off_type)
else:
if drop_off_type < 0 or drop_off_type > 3:
problems.InvalidValue('drop_off_type', drop_off_type)
self.drop_off_type = drop_off_type
if (self.pickup_type == 1 and self.drop_off_type == 1 and
self.arrival_secs == None and self.departure_secs == None):
problems.OtherProblem('This stop time has a pickup_type and '
'drop_off_type of 1, indicating that riders '
'can\'t get on or off here. Since it doesn\'t '
'define a timepoint either, this entry serves no '
'purpose and should be excluded from the trip.',
type=problems_module.TYPE_WARNING)
if ((self.arrival_secs != None) and (self.departure_secs != None) and
(self.departure_secs < self.arrival_secs)):
problems.InvalidValue('departure_time', departure_time,
'The departure time at this stop (%s) is before '
'the arrival time (%s). This is often caused by '
'problems in the feed exporter\'s time conversion')
# If the caller passed a valid arrival time but didn't attempt to pass a
# departure time complain
if (self.arrival_secs != None and
self.departure_secs == None and departure_time == None):
# self.departure_secs might be None because departure_time was invalid,
# so we need to check both
problems.MissingValue('departure_time',
'arrival_time and departure_time should either '
'both be provided or both be left blank. '
'It\'s OK to set them both to the same value.')
# If the caller passed a valid departure time but didn't attempt to pass a
# arrival time complain
if (self.departure_secs != None and
self.arrival_secs == None and arrival_time == None):
problems.MissingValue('arrival_time',
'arrival_time and departure_time should either '
'both be provided or both be left blank. '
'It\'s OK to set them both to the same value.')
if shape_dist_traveled in (None, ""):
self.shape_dist_traveled = None
else:
try:
self.shape_dist_traveled = float(shape_dist_traveled)
except ValueError:
problems.InvalidValue('shape_dist_traveled', shape_dist_traveled)
if stop_sequence is not None:
self.stop_sequence = stop_sequence
def GetFieldValuesTuple(self, trip_id):
"""Return a tuple that outputs a row of _FIELD_NAMES to be written to a
GTFS file.
Arguments:
trip_id: The trip_id of the trip to which this StopTime corresponds.
It must be provided, as it is not stored in StopTime.
"""
result = []
for fn in self._FIELD_NAMES:
if fn == 'trip_id':
result.append(trip_id)
else:
# Since we'll be writting to an output file, we want empty values to be
# outputted as an empty string
result.append(getattr(self, fn) or '' )
return tuple(result)
def GetSqlValuesTuple(self, trip_id):
"""Return a tuple that outputs a row of _FIELD_NAMES to be written to a
SQLite database.
Arguments:
trip_id: The trip_id of the trip to which this StopTime corresponds.
It must be provided, as it is not stored in StopTime.
"""
result = []
for fn in self._SQL_FIELD_NAMES:
if fn == 'trip_id':
result.append(trip_id)
else:
# Since we'll be writting to SQLite, we want empty values to be
# outputted as NULL string (contrary to what happens in
# GetFieldValuesTuple)
result.append(getattr(self, fn))
return tuple(result)
def GetTimeSecs(self):
"""Return the first of arrival_secs and departure_secs that is not None.
If both are None return None."""
if self.arrival_secs != None:
return self.arrival_secs
elif self.departure_secs != None:
return self.departure_secs
else:
return None
def __getattr__(self, name):
if name == 'stop_id':
return self.stop.stop_id
elif name == 'arrival_time':
return (self.arrival_secs != None and
util.FormatSecondsSinceMidnight(self.arrival_secs) or '')
elif name == 'departure_time':
return (self.departure_secs != None and
util.FormatSecondsSinceMidnight(self.departure_secs) or '')
elif name == 'shape_dist_traveled':
return ''
raise AttributeError(name)
| 40.34322 | 79 | 0.659174 |
import problems as problems_module
from stop import Stop
import util
class StopTime(object):
_REQUIRED_FIELD_NAMES = ['trip_id', 'arrival_time', 'departure_time',
'stop_id', 'stop_sequence']
_OPTIONAL_FIELD_NAMES = ['stop_headsign', 'pickup_type',
'drop_off_type', 'shape_dist_traveled']
_FIELD_NAMES = _REQUIRED_FIELD_NAMES + _OPTIONAL_FIELD_NAMES
_DEPRECATED_FIELD_NAMES = []
_SQL_FIELD_NAMES = ['trip_id', 'arrival_secs', 'departure_secs',
'stop_id', 'stop_sequence', 'stop_headsign',
'pickup_type', 'drop_off_type', 'shape_dist_traveled']
_STOP_CLASS = Stop
__slots__ = ('arrival_secs', 'departure_secs', 'stop_headsign', 'stop',
'stop_headsign', 'pickup_type', 'drop_off_type',
'shape_dist_traveled', 'stop_sequence')
def __init__(self, problems, stop,
arrival_time=None, departure_time=None,
stop_headsign=None, pickup_type=None, drop_off_type=None,
shape_dist_traveled=None, arrival_secs=None,
departure_secs=None, stop_time=None, stop_sequence=None):
# have StopTime._STOP_CLASS instead. If a Stop class that does not inherit
# from transitfeed.Stop is used, the extension should also provide a
# StopTime class that updates _STOP_CLASS accordingly.
#
# For more details see the discussion at
# http://codereview.appspot.com/1713041
if stop_time != None:
arrival_time = departure_time = stop_time
if arrival_secs != None:
self.arrival_secs = arrival_secs
elif arrival_time in (None, ""):
self.arrival_secs = None # Untimed
arrival_time = None
else:
try:
self.arrival_secs = util.TimeToSecondsSinceMidnight(arrival_time)
except problems_module.Error:
problems.InvalidValue('arrival_time', arrival_time)
self.arrival_secs = None
if departure_secs != None:
self.departure_secs = departure_secs
elif departure_time in (None, ""):
self.departure_secs = None
departure_time = None
else:
try:
self.departure_secs = util.TimeToSecondsSinceMidnight(departure_time)
except problems_module.Error:
problems.InvalidValue('departure_time', departure_time)
self.departure_secs = None
if not isinstance(stop, self._STOP_CLASS):
# Not quite correct, but better than letting the problem propagate
problems.InvalidValue('stop', stop)
self.stop = stop
self.stop_headsign = stop_headsign
if pickup_type in (None, ""):
self.pickup_type = None
else:
try:
pickup_type = int(pickup_type)
except ValueError:
problems.InvalidValue('pickup_type', pickup_type)
else:
if pickup_type < 0 or pickup_type > 3:
problems.InvalidValue('pickup_type', pickup_type)
self.pickup_type = pickup_type
if drop_off_type in (None, ""):
self.drop_off_type = None
else:
try:
drop_off_type = int(drop_off_type)
except ValueError:
problems.InvalidValue('drop_off_type', drop_off_type)
else:
if drop_off_type < 0 or drop_off_type > 3:
problems.InvalidValue('drop_off_type', drop_off_type)
self.drop_off_type = drop_off_type
if (self.pickup_type == 1 and self.drop_off_type == 1 and
self.arrival_secs == None and self.departure_secs == None):
problems.OtherProblem('This stop time has a pickup_type and '
'drop_off_type of 1, indicating that riders '
'can\'t get on or off here. Since it doesn\'t '
'define a timepoint either, this entry serves no '
'purpose and should be excluded from the trip.',
type=problems_module.TYPE_WARNING)
if ((self.arrival_secs != None) and (self.departure_secs != None) and
(self.departure_secs < self.arrival_secs)):
problems.InvalidValue('departure_time', departure_time,
'The departure time at this stop (%s) is before '
'the arrival time (%s). This is often caused by '
'problems in the feed exporter\'s time conversion')
# departure time complain
if (self.arrival_secs != None and
self.departure_secs == None and departure_time == None):
# self.departure_secs might be None because departure_time was invalid,
# so we need to check both
problems.MissingValue('departure_time',
'arrival_time and departure_time should either '
'both be provided or both be left blank. '
'It\'s OK to set them both to the same value.')
# arrival time complain
if (self.departure_secs != None and
self.arrival_secs == None and arrival_time == None):
problems.MissingValue('arrival_time',
'arrival_time and departure_time should either '
'both be provided or both be left blank. '
'It\'s OK to set them both to the same value.')
if shape_dist_traveled in (None, ""):
self.shape_dist_traveled = None
else:
try:
self.shape_dist_traveled = float(shape_dist_traveled)
except ValueError:
problems.InvalidValue('shape_dist_traveled', shape_dist_traveled)
if stop_sequence is not None:
self.stop_sequence = stop_sequence
def GetFieldValuesTuple(self, trip_id):
result = []
for fn in self._FIELD_NAMES:
if fn == 'trip_id':
result.append(trip_id)
else:
# outputted as an empty string
result.append(getattr(self, fn) or '' )
return tuple(result)
def GetSqlValuesTuple(self, trip_id):
result = []
for fn in self._SQL_FIELD_NAMES:
if fn == 'trip_id':
result.append(trip_id)
else:
# Since we'll be writting to SQLite, we want empty values to be
result.append(getattr(self, fn))
return tuple(result)
def GetTimeSecs(self):
if self.arrival_secs != None:
return self.arrival_secs
elif self.departure_secs != None:
return self.departure_secs
else:
return None
def __getattr__(self, name):
if name == 'stop_id':
return self.stop.stop_id
elif name == 'arrival_time':
return (self.arrival_secs != None and
util.FormatSecondsSinceMidnight(self.arrival_secs) or '')
elif name == 'departure_time':
return (self.departure_secs != None and
util.FormatSecondsSinceMidnight(self.departure_secs) or '')
elif name == 'shape_dist_traveled':
return ''
raise AttributeError(name)
| true | true |
f7f72da001f7c3708cce704d8989f8e9c9550fe4 | 488 | py | Python | Dataset/Leetcode/test/13/17.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/test/13/17.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/test/13/17.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution:
def XXX(self, s: str) -> int:
single = {
'I':1,
'V':5,
'X':10,
'L':50,
'C':100,
'D':500,
'M':1000
}
double = ('IV', 'IX', 'XL', 'XC', 'CD', 'CM')
res = 0
for i, ch in enumerate(s):
if s[i:i+2] in double:
res = res - single[ch]
else:
res = res + single[ch]
return res
| 19.52 | 53 | 0.315574 | class Solution:
def XXX(self, s: str) -> int:
single = {
'I':1,
'V':5,
'X':10,
'L':50,
'C':100,
'D':500,
'M':1000
}
double = ('IV', 'IX', 'XL', 'XC', 'CD', 'CM')
res = 0
for i, ch in enumerate(s):
if s[i:i+2] in double:
res = res - single[ch]
else:
res = res + single[ch]
return res
| true | true |
f7f72e3807dcb369f998fa23f5bcc3552f568c12 | 40,618 | py | Python | python/paddle/tensor/search.py | Liyulingyue/Paddle | f3f0824df52b6051ac365268a3a8cd2682c6c8d5 | [
"Apache-2.0"
] | null | null | null | python/paddle/tensor/search.py | Liyulingyue/Paddle | f3f0824df52b6051ac365268a3a8cd2682c6c8d5 | [
"Apache-2.0"
] | null | null | null | python/paddle/tensor/search.py | Liyulingyue/Paddle | f3f0824df52b6051ac365268a3a8cd2682c6c8d5 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import paddle
from ..framework import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype
from ..fluid import layers
from ..framework import core, in_dygraph_mode, _non_static_mode
from ..fluid.framework import _in_legacy_dygraph
from paddle.common_ops_import import convert_np_dtype_to_dtype_
from paddle.common_ops_import import Variable
from paddle.common_ops_import import VarDesc
from paddle import _C_ops
from .logic import logical_not
# TODO: define searching & indexing functions of a tensor
# from ..fluid.layers import has_inf #DEFINE_ALIAS
# from ..fluid.layers import has_nan #DEFINE_ALIAS
__all__ = []
def argsort(x, axis=-1, descending=False, name=None):
"""
This OP sorts the input along the given axis, and returns the corresponding index tensor for the sorted output values. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.
Args:
x(Tensor): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is -1.
descending(bool, optional) : Descending is a flag, if set to true,
algorithm will sort by descending order, else sort by
ascending order. Default is false.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: sorted indices(with the same shape as ``x``
and with data type int64).
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]],
dtype='float32')
out1 = paddle.argsort(x, axis=-1)
out2 = paddle.argsort(x, axis=0)
out3 = paddle.argsort(x, axis=1)
print(out1)
#[[[0 3 1 2]
# [0 1 2 3]
# [2 3 0 1]]
# [[1 3 2 0]
# [0 1 2 3]
# [2 0 3 1]]]
print(out2)
#[[[0 1 1 1]
# [0 0 0 0]
# [1 1 1 0]]
# [[1 0 0 0]
# [1 1 1 1]
# [0 0 0 1]]]
print(out3)
#[[[1 1 1 2]
# [0 0 2 0]
# [2 2 0 1]]
# [[2 0 2 0]
# [1 1 0 2]
# [0 2 1 1]]]
"""
if in_dygraph_mode():
_, ids = _C_ops.final_state_argsort(x, axis, descending)
return ids
if _in_legacy_dygraph():
_, ids = _C_ops.argsort(x, 'axis', axis, 'descending', descending)
return ids
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
'argsort')
helper = LayerHelper("argsort", **locals())
out = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True)
ids = helper.create_variable_for_type_inference(
VarDesc.VarType.INT64, stop_gradient=True)
helper.append_op(
type='argsort',
inputs={'X': x},
outputs={'Out': out,
'Indices': ids},
attrs={'axis': axis,
'descending': descending})
return ids
def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
"""
Computes the indices of the max elements of the input tensor's
element along the provided axis.
Args:
x(Tensor): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is x.ndim. when axis < 0, it works the same way
as axis + R. Default is None, the input `x` will be into the flatten tensor, and selecting the min value index.
keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
dtype(str|np.dtype, optional): Data type of the output tensor which can
be int32, int64. The default value is ``int64`` , and it will
return the int64 indices.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, return the tensor of int32 if set :attr:`dtype` is int32, otherwise return the tensor of int64.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[5,8,9,5],
[0,0,1,7],
[6,9,2,4]])
out1 = paddle.argmax(x)
print(out1) # 2
out2 = paddle.argmax(x, axis=0)
print(out2)
# [2, 2, 0, 1]
out3 = paddle.argmax(x, axis=-1)
print(out3)
# [2, 3, 1]
out4 = paddle.argmax(x, axis=0, keepdim=True)
print(out4)
# [[2, 2, 0, 1]]
"""
if axis is not None and not isinstance(axis, int):
raise TypeError(
"The type of 'axis' must be int or None in argmax, but received %s."
% (type(axis)))
if dtype is None:
raise ValueError(
"the value of 'dtype' in argmax could not be None, but received None"
)
var_dtype = convert_np_dtype_to_dtype_(dtype)
flatten = False
if axis is None:
flatten = True
axis = 0
if in_dygraph_mode():
return _C_ops.final_state_argmax(x, axis, keepdim, flatten, var_dtype)
if _in_legacy_dygraph():
out = _C_ops.arg_max(x, 'axis', axis, 'dtype', var_dtype, 'keepdims',
keepdim, 'flatten', flatten)
return out
helper = LayerHelper("argmax", **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
'paddle.argmax')
check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin')
attrs = {}
out = helper.create_variable_for_type_inference(var_dtype)
attrs['keepdims'] = keepdim
attrs['axis'] = axis
attrs['flatten'] = flatten
attrs['dtype'] = var_dtype
helper.append_op(
type='arg_max', inputs={'X': x}, outputs={'Out': [out]}, attrs=attrs)
out.stop_gradient = True
return out
def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
"""
Computing the indices of the min elements of the input tensor's
element along the provided axis.
Args:
x(Tensor): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is x.ndim. when axis < 0, it works the same way
as axis + R. Default is None, the input `x` will be into the flatten tensor, and selecting the min value index.
keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
dtype(str, optional): Data type of the output tensor which can
be int32, int64. The default value is 'int64', and it will
return the int64 indices.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor, return the tensor of `int32` if set :attr:`dtype` is `int32`, otherwise return the tensor of `int64`.
Examples:
.. code-block:: python
:name: code-example1
import paddle
x = paddle.to_tensor([[5,8,9,5],
[0,0,1,7],
[6,9,2,4]])
out1 = paddle.argmin(x)
print(out1) # 4
out2 = paddle.argmin(x, axis=0)
print(out2)
# [1, 1, 1, 2]
out3 = paddle.argmin(x, axis=-1)
print(out3)
# [0, 0, 2]
out4 = paddle.argmin(x, axis=0, keepdim=True)
print(out4)
# [[1, 1, 1, 2]]
"""
if axis is not None and not isinstance(axis, int):
raise TypeError(
"The type of 'axis' must be int or None in argmin, but received %s."
% (type(axis)))
if dtype is None:
raise ValueError(
"the value of 'dtype' in argmin could not be None, but received None"
)
var_dtype = convert_np_dtype_to_dtype_(dtype)
flatten = False
if axis is None:
flatten = True
axis = 0
if in_dygraph_mode():
return _C_ops.final_state_argmin(x, axis, keepdim, flatten, var_dtype)
if _in_legacy_dygraph():
out = _C_ops.arg_min(x, 'axis', axis, 'dtype', var_dtype, 'keepdims',
keepdim, 'flatten', flatten)
return out
helper = LayerHelper("argmin", **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
'paddle.argmin')
check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin')
out = helper.create_variable_for_type_inference(var_dtype)
attrs = {}
attrs['keepdims'] = keepdim
attrs['axis'] = axis
attrs['flatten'] = flatten
attrs['dtype'] = var_dtype
helper.append_op(
type='arg_min', inputs={'X': x}, outputs={'Out': [out]}, attrs=attrs)
out.stop_gradient = True
return out
def index_select(x, index, axis=0, name=None):
"""
Returns a new tensor which indexes the ``input`` tensor along dimension ``axis`` using
the entries in ``index`` which is a Tensor. The returned tensor has the same number
of dimensions as the original ``x`` tensor. The dim-th dimension has the same
size as the length of ``index``; other dimensions have the same size as in the ``x`` tensor.
Args:
x (Tensor): The input Tensor to be operated. The data of ``x`` can be one of float32, float64, int32, int64.
index (Tensor): The 1-D Tensor containing the indices to index. The data type of ``index`` must be int32 or int64.
axis (int, optional): The dimension in which we index. Default: if None, the ``axis`` is 0.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor with same data type as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]])
index = paddle.to_tensor([0, 1, 1], dtype='int32')
out_z1 = paddle.index_select(x=x, index=index)
#[[1. 2. 3. 4.]
# [5. 6. 7. 8.]
# [5. 6. 7. 8.]]
out_z2 = paddle.index_select(x=x, index=index, axis=1)
#[[ 1. 2. 2.]
# [ 5. 6. 6.]
# [ 9. 10. 10.]]
"""
if in_dygraph_mode():
return _C_ops.final_state_index_select(x, index, axis)
if _in_legacy_dygraph():
return _C_ops.index_select(x, index, 'dim', axis)
helper = LayerHelper("index_select", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'paddle.tensor.search.index_select')
check_variable_and_dtype(index, 'index', ['int32', 'int64'],
'paddle.tensor.search.index_select')
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='index_select',
inputs={'X': x,
'Index': index},
outputs={'Out': out},
attrs={'dim': axis})
return out
def nonzero(x, as_tuple=False):
"""
Return a tensor containing the indices of all non-zero elements of the `input`
tensor. If as_tuple is True, return a tuple of 1-D tensors, one for each dimension
in `input`, each containing the indices (in that dimension) of all non-zero elements
of `input`. Given a n-Dimensional `input` tensor with shape [x_1, x_2, ..., x_n], If
as_tuple is False, we can get a output tensor with shape [z, n], where `z` is the
number of all non-zero elements in the `input` tensor. If as_tuple is True, we can get
a 1-D tensor tuple of length `n`, and the shape of each 1-D tensor is [z, 1].
Args:
x (Tensor): The input tensor variable.
as_tuple (bool): Return type, Tensor or tuple of Tensor.
Returns:
Tensor. The data type is int64.
Examples:
.. code-block:: python
import paddle
x1 = paddle.to_tensor([[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 3.0]])
x2 = paddle.to_tensor([0.0, 1.0, 0.0, 3.0])
out_z1 = paddle.nonzero(x1)
print(out_z1)
#[[0 0]
# [1 1]
# [2 2]]
out_z1_tuple = paddle.nonzero(x1, as_tuple=True)
for out in out_z1_tuple:
print(out)
#[[0]
# [1]
# [2]]
#[[0]
# [1]
# [2]]
out_z2 = paddle.nonzero(x2)
print(out_z2)
#[[1]
# [3]]
out_z2_tuple = paddle.nonzero(x2, as_tuple=True)
for out in out_z2_tuple:
print(out)
#[[1]
# [3]]
"""
list_out = []
shape = x.shape
rank = len(shape)
if in_dygraph_mode():
outs = _C_ops.final_state_where_index(x)
elif paddle.in_dynamic_mode():
outs = _C_ops.where_index(x)
else:
helper = LayerHelper("where_index", **locals())
outs = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.INT64)
helper.append_op(
type='where_index',
inputs={'Condition': x},
outputs={'Out': [outs]})
if not as_tuple:
return outs
elif rank == 1:
return tuple([outs])
else:
for i in range(rank):
list_out.append(
paddle.slice(
outs, axes=[1], starts=[i], ends=[i + 1]))
return tuple(list_out)
def sort(x, axis=-1, descending=False, name=None):
"""
This OP sorts the input along the given axis, and returns the sorted output tensor. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.
Args:
x(Tensor): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
descending(bool, optional) : Descending is a flag, if set to true,
algorithm will sort by descending order, else sort by
ascending order. Default is false.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: sorted tensor(with the same shape and data type as ``x``).
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]],
dtype='float32')
out1 = paddle.sort(x=x, axis=-1)
out2 = paddle.sort(x=x, axis=0)
out3 = paddle.sort(x=x, axis=1)
print(out1)
#[[[5. 5. 8. 9.]
# [0. 0. 1. 7.]
# [2. 4. 6. 9.]]
# [[2. 2. 4. 5.]
# [4. 7. 7. 9.]
# [0. 1. 6. 7.]]]
print(out2)
#[[[5. 2. 4. 2.]
# [0. 0. 1. 7.]
# [1. 7. 0. 4.]]
# [[5. 8. 9. 5.]
# [4. 7. 7. 9.]
# [6. 9. 2. 6.]]]
print(out3)
#[[[0. 0. 1. 4.]
# [5. 8. 2. 5.]
# [6. 9. 9. 7.]]
# [[1. 2. 0. 2.]
# [4. 7. 4. 6.]
# [5. 7. 7. 9.]]]
"""
if in_dygraph_mode():
outs, _ = _C_ops.final_state_argsort(x, axis, descending)
return outs
if _in_legacy_dygraph():
outs, _ = _C_ops.argsort(x, 'axis', axis, 'descending', descending)
return outs
helper = LayerHelper("sort", **locals())
out = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=False)
ids = helper.create_variable_for_type_inference(
VarDesc.VarType.INT64, stop_gradient=True)
helper.append_op(
type='argsort',
inputs={'X': x},
outputs={'Out': out,
'Indices': ids},
attrs={'axis': axis,
'descending': descending})
return out
def mode(x, axis=-1, keepdim=False, name=None):
"""
This OP is used to find values and indices of the modes at the optional axis.
Args:
x(Tensor): Tensor, an input N-D Tensor with type float32, float64, int32, int64.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is x.ndim. when axis < 0, it works the same way
as axis + R. Default is -1.
keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
tuple(Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.
Examples:
.. code-block:: python
import paddle
tensor = paddle.to_tensor([[[1,2,2],[2,3,3]],[[0,5,5],[9,9,0]]], dtype=paddle.float32)
res = paddle.mode(tensor, 2)
print(res)
# (Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[2., 3.],
# [5., 9.]]), Tensor(shape=[2, 2], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
# [[1, 1],
# [1, 0]]))
"""
if in_dygraph_mode():
return _C_ops.final_state_mode(x, axis, keepdim)
if _in_legacy_dygraph():
return _C_ops.mode(x, "axis", axis, "keepdim", keepdim)
helper = LayerHelper("mode", **locals())
inputs = {"X": [x]}
attrs = {}
attrs['axis'] = axis
attrs['keepdim'] = keepdim
values = helper.create_variable_for_type_inference(dtype=x.dtype)
indices = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op(
type="mode",
inputs=inputs,
outputs={"Out": [values],
"Indices": [indices]},
attrs=attrs)
indices.stop_gradient = True
return values, indices
def where(condition, x=None, y=None, name=None):
r"""
Return a tensor of elements selected from either $x$ or $y$, depending on $condition$.
**Note**:
``paddle.where(condition)`` is identical to ``paddle.nonzero(condition, as_tuple=True)``.
.. math::
out_i =
\begin{cases}
x_i, \quad \text{if} \ condition_i \ is \ True \\
y_i, \quad \text{if} \ condition_i \ is \ False \\
\end{cases}
Args:
condition(Tensor): The condition to choose x or y. When True(nonzero), yield x, otherwise yield y.
x(Tensor or Scalar, optional): x is a Tensor or Scalar with data type float32, float64, int32, int64. Either both or neither of x and y should be given.
y(Tensor or Scalar, optional): y is a Tensor or Scalar with data type float32, float64, int32, int64. Either both or neither of x and y should be given.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor with the same data dype as x.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([0.9383, 0.1983, 3.2, 1.2])
y = paddle.to_tensor([1.0, 1.0, 1.0, 1.0])
out = paddle.where(x>1, x, y)
print(out)
#out: [1.0, 1.0, 3.2, 1.2]
out = paddle.where(x>1)
print(out)
#out: (Tensor(shape=[2, 1], dtype=int64, place=CPUPlace, stop_gradient=True,
# [[2],
# [3]]),)
"""
if np.isscalar(x):
x = paddle.full([1], x, np.array([x]).dtype.name)
if np.isscalar(y):
y = paddle.full([1], y, np.array([y]).dtype.name)
if x is None and y is None:
return nonzero(condition, as_tuple=True)
if x is None or y is None:
raise ValueError("either both or neither of x and y should be given")
if not paddle.in_dynamic_mode():
check_variable_and_dtype(condition, 'condition', ['bool'], 'where')
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'where')
check_variable_and_dtype(
y, 'y', ['float32', 'float64', 'int32', 'int64'], 'where')
condition_shape = list(condition.shape)
x_shape = list(x.shape)
y_shape = list(y.shape)
if x_shape == y_shape and condition_shape == x_shape:
broadcast_condition = condition
broadcast_x = x
broadcast_y = y
else:
if core.is_compiled_with_xpu():
cond_int = paddle.cast(condition, x.dtype)
cond_not_int = paddle.cast(logical_not(condition), x.dtype)
out1 = paddle.multiply(x, cond_int)
out2 = paddle.multiply(y, cond_not_int)
out = paddle.add(out1, out2)
return out
zeros_like_x = paddle.zeros_like(x)
zeros_like_y = paddle.zeros_like(y)
zeros_like_condition = paddle.zeros_like(condition)
zeros_like_condition = paddle.cast(zeros_like_condition, x.dtype)
cast_cond = paddle.cast(condition, x.dtype)
broadcast_zeros = paddle.add(zeros_like_x, zeros_like_y)
broadcast_zeros = paddle.add(broadcast_zeros, zeros_like_condition)
broadcast_x = paddle.add(x, broadcast_zeros)
broadcast_y = paddle.add(y, broadcast_zeros)
broadcast_condition = paddle.add(cast_cond, broadcast_zeros)
broadcast_condition = paddle.cast(broadcast_condition, 'bool')
if in_dygraph_mode():
return _C_ops.final_state_where(broadcast_condition, broadcast_x,
broadcast_y)
else:
if _in_legacy_dygraph():
return _C_ops.where(broadcast_condition, broadcast_x, broadcast_y)
else:
helper = LayerHelper("where", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='where',
inputs={
'Condition': broadcast_condition,
'X': broadcast_x,
'Y': broadcast_y
},
outputs={'Out': [out]})
return out
def index_sample(x, index):
"""
**IndexSample Layer**
IndexSample OP returns the element of the specified location of X,
and the location is specified by Index.
.. code-block:: text
Given:
X = [[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]]
Index = [[0, 1, 3],
[0, 2, 4]]
Then:
Out = [[1, 2, 4],
[6, 8, 10]]
Args:
x (Tensor): The source input tensor with 2-D shape. Supported data type is
int32, int64, float32, float64.
index (Tensor): The index input tensor with 2-D shape, first dimension should be same with X.
Data type is int32 or int64.
Returns:
output (Tensor): The output is a tensor with the same shape as index.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]], dtype='float32')
index = paddle.to_tensor([[0, 1, 2],
[1, 2, 3],
[0, 0, 0]], dtype='int32')
target = paddle.to_tensor([[100, 200, 300, 400],
[500, 600, 700, 800],
[900, 1000, 1100, 1200]], dtype='int32')
out_z1 = paddle.index_sample(x, index)
print(out_z1)
#[[1. 2. 3.]
# [6. 7. 8.]
# [9. 9. 9.]]
# Use the index of the maximum value by topk op
# get the value of the element of the corresponding index in other tensors
top_value, top_index = paddle.topk(x, k=2)
out_z2 = paddle.index_sample(target, top_index)
print(top_value)
#[[ 4. 3.]
# [ 8. 7.]
# [12. 11.]]
print(top_index)
#[[3 2]
# [3 2]
# [3 2]]
print(out_z2)
#[[ 400 300]
# [ 800 700]
# [1200 1100]]
"""
if in_dygraph_mode():
return _C_ops.final_state_index_sample(x, index)
else:
if _in_legacy_dygraph():
return _C_ops.index_sample(x, index)
else:
helper = LayerHelper("index_sample", **locals())
check_variable_and_dtype(x, 'x',
['float32', 'float64', 'int32', 'int64'],
'paddle.tensor.search.index_sample')
check_variable_and_dtype(index, 'index', ['int32', 'int64'],
'paddle.tensor.search.index_sample')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='index_sample',
inputs={'X': x,
'Index': index},
outputs={'Out': out})
return out
def masked_select(x, mask, name=None):
"""
Returns a new 1-D tensor which indexes the input tensor according to the ``mask``
which is a tensor with data type of bool.
Args:
x (Tensor): The input Tensor, the data type can be int32, int64, float32, float64.
mask (Tensor): The Tensor containing the binary mask to index with, it's data type is bool.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns: A 1-D Tensor which is the same data type as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]])
mask = paddle.to_tensor([[True, False, False, False],
[True, True, False, False],
[True, False, False, False]])
out = paddle.masked_select(x, mask)
#[1.0 5.0 6.0 9.0]
"""
if in_dygraph_mode():
return _C_ops.final_state_masked_select(x, mask)
if _in_legacy_dygraph():
return _C_ops.masked_select(x, mask)
helper = LayerHelper("masked_select", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'paddle.tensor.search.mask_select')
check_variable_and_dtype(mask, 'mask', ['bool'],
'paddle.tensor.search.masked_select')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='masked_select', inputs={'X': x,
'Mask': mask}, outputs={'Y': out})
return out
def topk(x, k, axis=None, largest=True, sorted=True, name=None):
"""
Return values and indices of the k largest or smallest at the optional axis.
If the input is a 1-D Tensor, finds the k largest or smallest values and indices.
If the input is a Tensor with higher rank, this operator computes the top k values and indices along the :attr:`axis`.
Args:
x(Tensor): Tensor, an input N-D Tensor with type float32, float64, int32, int64.
k(int, Tensor): The number of top elements to look for along the axis.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is x.ndim. when axis < 0, it works the same way
as axis + R. Default is -1.
largest(bool, optional) : largest is a flag, if set to true,
algorithm will sort by descending order, otherwise sort by
ascending order. Default is True.
sorted(bool, optional): controls whether to return the elements in sorted order, default value is True. In gpu device, it always return the sorted value.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
tuple(Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.
Examples:
.. code-block:: python
:name: code-example1
import paddle
data_1 = paddle.to_tensor([1, 4, 5, 7])
value_1, indices_1 = paddle.topk(data_1, k=1)
print(value_1) # [7]
print(indices_1) # [3]
data_2 = paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]])
value_2, indices_2 = paddle.topk(data_2, k=1)
print(value_2) # [[7], [6]]
print(indices_2) # [[3], [1]]
value_3, indices_3 = paddle.topk(data_2, k=1, axis=-1)
print(value_3) # [[7], [6]]
print(indices_3) # [[3], [1]]
value_4, indices_4 = paddle.topk(data_2, k=1, axis=0)
print(value_4) # [[2, 6, 5, 7]]
print(indices_4) # [[1, 1, 0, 0]]
"""
if in_dygraph_mode():
if axis == None:
axis = -1
out, indices = _C_ops.final_state_top_k(x, k, axis, largest, sorted)
return out, indices
if _non_static_mode():
if axis is None:
out, indices = _C_ops.top_k_v2(x, 'k',
int(k), 'largest', largest, 'sorted',
sorted)
else:
out, indices = _C_ops.top_k_v2(x, 'k',
int(k), 'axis', axis, 'largest',
largest, 'sorted', sorted)
return out, indices
helper = LayerHelper("top_k_v2", **locals())
inputs = {"X": [x]}
attrs = {}
if isinstance(k, Variable):
inputs['K'] = [k]
else:
attrs = {'k': k}
attrs['largest'] = largest
attrs['sorted'] = sorted
if axis is not None:
attrs['axis'] = axis
values = helper.create_variable_for_type_inference(dtype=x.dtype)
indices = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op(
type="top_k_v2",
inputs=inputs,
outputs={"Out": [values],
"Indices": [indices]},
attrs=attrs)
indices.stop_gradient = True
return values, indices
def searchsorted(sorted_sequence,
values,
out_int32=False,
right=False,
name=None):
"""
This OP is used to find the index of the corresponding `sorted_sequence` in the innermost dimension based on the given `values`.
Args:
sorted_sequence(Tensor): An input N-D or 1-D tensor with type int32, int64, float32, float64. The value of the tensor monotonically increases in the innermost dimension.
values(Tensor): An input N-D tensor value with type int32, int64, float32, float64.
out_int32(bool, optional): Data type of the output tensor which can be int32, int64. The default value is False, and it indicates that the output data type is int64.
right(bool, optional): Find the upper or lower bounds of the sorted_sequence range in the innermost dimension based on the given `values`. If the value of the sorted_sequence is nan or inf, return the size of the innermost dimension.
The default value is False and it shows the lower bounds.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor(the same sizes of the `values`), return the tensor of int32 if set :attr:`out_int32` is True, otherwise return the tensor of int64.
Examples:
.. code-block:: python
import paddle
sorted_sequence = paddle.to_tensor([[1, 3, 5, 7, 9, 11],
[2, 4, 6, 8, 10, 12]], dtype='int32')
values = paddle.to_tensor([[3, 6, 9, 10], [3, 6, 9, 10]], dtype='int32')
out1 = paddle.searchsorted(sorted_sequence, values)
print(out1)
# Tensor(shape=[2, 4], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
# [[1, 3, 4, 5],
# [1, 2, 4, 4]])
out2 = paddle.searchsorted(sorted_sequence, values, right=True)
print(out2)
# Tensor(shape=[2, 4], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
# [[2, 3, 5, 5],
# [1, 3, 4, 5]])
sorted_sequence_1d = paddle.to_tensor([1, 3, 5, 7, 9, 11, 13])
out3 = paddle.searchsorted(sorted_sequence_1d, values)
print(out3)
# Tensor(shape=[2, 4], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
# [[1, 3, 4, 5],
# [1, 3, 4, 5]])
"""
if in_dygraph_mode():
return _C_ops.final_state_searchsorted(sorted_sequence, values,
out_int32, right)
if _in_legacy_dygraph():
return _C_ops.searchsorted(sorted_sequence, values, "out_int32",
out_int32, "right", right)
check_variable_and_dtype(sorted_sequence, 'SortedSequence',
['float32', 'float64', 'int32', 'int64'],
'paddle.searchsorted')
check_variable_and_dtype(values, 'Values',
['float32', 'float64', 'int32', 'int64'],
'paddle.searchsorted')
helper = LayerHelper('searchsorted', **locals())
out_type = 'int32' if out_int32 else 'int64'
out = helper.create_variable_for_type_inference(dtype=out_type)
helper.append_op(
type='searchsorted',
inputs={'SortedSequence': sorted_sequence,
"Values": values},
outputs={'Out': out},
attrs={"out_int32": out_int32,
"right": right})
return out
def kthvalue(x, k, axis=None, keepdim=False, name=None):
"""
This OP is used to find values and indices of the k-th smallest at the axis.
Args:
x(Tensor): A N-D Tensor with type float32, float64, int32, int64.
k(int): The k for the k-th smallest number to look for along the axis.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is x.ndim. when axis < 0, it works the same way
as axis + R. The default is None. And if the axis is None, it will computed as -1 by default.
keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
tuple(Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.
Examples:
.. code-block:: python
import paddle
x = paddle.randn((2,3,2))
# Tensor(shape=[2, 3, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[[ 0.22954939, -0.01296274],
# [ 1.17135799, -0.34493217],
# [-0.19550551, -0.17573971]],
#
# [[ 0.15104349, -0.93965352],
# [ 0.14745511, 0.98209465],
# [ 0.10732264, -0.55859774]]])
y = paddle.kthvalue(x, 2, 1)
# (Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[ 0.22954939, -0.17573971],
# [ 0.14745511, -0.55859774]]), Tensor(shape=[2, 2], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
# [[0, 2],
# [1, 2]]))
"""
if _non_static_mode():
if axis is not None:
if _in_legacy_dygraph():
return _C_ops.kthvalue(x, 'k', k, "axis", axis, "keepdim",
keepdim)
return _C_ops.final_state_kthvalue(x, k, axis, keepdim)
else:
if _in_legacy_dygraph():
return _C_ops.kthvalue(x, 'k', k, "keepdim", keepdim)
return _C_ops.final_state_kthvalue(x, k, -1, keepdim)
helper = LayerHelper("kthvalue", **locals())
inputs = {"X": [x]}
attrs = {'k': k}
if axis is not None:
attrs['axis'] = axis
values = helper.create_variable_for_type_inference(dtype=x.dtype)
indices = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op(
type="kthvalue",
inputs=inputs,
outputs={"Out": [values],
"Indices": [indices]},
attrs=attrs)
indices.stop_gradient = True
return values, indices
| 38.573599 | 257 | 0.551824 |
from __future__ import print_function
import numpy as np
import paddle
from ..framework import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype
from ..fluid import layers
from ..framework import core, in_dygraph_mode, _non_static_mode
from ..fluid.framework import _in_legacy_dygraph
from paddle.common_ops_import import convert_np_dtype_to_dtype_
from paddle.common_ops_import import Variable
from paddle.common_ops_import import VarDesc
from paddle import _C_ops
from .logic import logical_not
ort(x, axis=-1, descending=False, name=None):
if in_dygraph_mode():
_, ids = _C_ops.final_state_argsort(x, axis, descending)
return ids
if _in_legacy_dygraph():
_, ids = _C_ops.argsort(x, 'axis', axis, 'descending', descending)
return ids
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
'argsort')
helper = LayerHelper("argsort", **locals())
out = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True)
ids = helper.create_variable_for_type_inference(
VarDesc.VarType.INT64, stop_gradient=True)
helper.append_op(
type='argsort',
inputs={'X': x},
outputs={'Out': out,
'Indices': ids},
attrs={'axis': axis,
'descending': descending})
return ids
def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
if axis is not None and not isinstance(axis, int):
raise TypeError(
"The type of 'axis' must be int or None in argmax, but received %s."
% (type(axis)))
if dtype is None:
raise ValueError(
"the value of 'dtype' in argmax could not be None, but received None"
)
var_dtype = convert_np_dtype_to_dtype_(dtype)
flatten = False
if axis is None:
flatten = True
axis = 0
if in_dygraph_mode():
return _C_ops.final_state_argmax(x, axis, keepdim, flatten, var_dtype)
if _in_legacy_dygraph():
out = _C_ops.arg_max(x, 'axis', axis, 'dtype', var_dtype, 'keepdims',
keepdim, 'flatten', flatten)
return out
helper = LayerHelper("argmax", **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
'paddle.argmax')
check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin')
attrs = {}
out = helper.create_variable_for_type_inference(var_dtype)
attrs['keepdims'] = keepdim
attrs['axis'] = axis
attrs['flatten'] = flatten
attrs['dtype'] = var_dtype
helper.append_op(
type='arg_max', inputs={'X': x}, outputs={'Out': [out]}, attrs=attrs)
out.stop_gradient = True
return out
def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
if axis is not None and not isinstance(axis, int):
raise TypeError(
"The type of 'axis' must be int or None in argmin, but received %s."
% (type(axis)))
if dtype is None:
raise ValueError(
"the value of 'dtype' in argmin could not be None, but received None"
)
var_dtype = convert_np_dtype_to_dtype_(dtype)
flatten = False
if axis is None:
flatten = True
axis = 0
if in_dygraph_mode():
return _C_ops.final_state_argmin(x, axis, keepdim, flatten, var_dtype)
if _in_legacy_dygraph():
out = _C_ops.arg_min(x, 'axis', axis, 'dtype', var_dtype, 'keepdims',
keepdim, 'flatten', flatten)
return out
helper = LayerHelper("argmin", **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
'paddle.argmin')
check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin')
out = helper.create_variable_for_type_inference(var_dtype)
attrs = {}
attrs['keepdims'] = keepdim
attrs['axis'] = axis
attrs['flatten'] = flatten
attrs['dtype'] = var_dtype
helper.append_op(
type='arg_min', inputs={'X': x}, outputs={'Out': [out]}, attrs=attrs)
out.stop_gradient = True
return out
def index_select(x, index, axis=0, name=None):
if in_dygraph_mode():
return _C_ops.final_state_index_select(x, index, axis)
if _in_legacy_dygraph():
return _C_ops.index_select(x, index, 'dim', axis)
helper = LayerHelper("index_select", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'paddle.tensor.search.index_select')
check_variable_and_dtype(index, 'index', ['int32', 'int64'],
'paddle.tensor.search.index_select')
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='index_select',
inputs={'X': x,
'Index': index},
outputs={'Out': out},
attrs={'dim': axis})
return out
def nonzero(x, as_tuple=False):
list_out = []
shape = x.shape
rank = len(shape)
if in_dygraph_mode():
outs = _C_ops.final_state_where_index(x)
elif paddle.in_dynamic_mode():
outs = _C_ops.where_index(x)
else:
helper = LayerHelper("where_index", **locals())
outs = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.INT64)
helper.append_op(
type='where_index',
inputs={'Condition': x},
outputs={'Out': [outs]})
if not as_tuple:
return outs
elif rank == 1:
return tuple([outs])
else:
for i in range(rank):
list_out.append(
paddle.slice(
outs, axes=[1], starts=[i], ends=[i + 1]))
return tuple(list_out)
def sort(x, axis=-1, descending=False, name=None):
if in_dygraph_mode():
outs, _ = _C_ops.final_state_argsort(x, axis, descending)
return outs
if _in_legacy_dygraph():
outs, _ = _C_ops.argsort(x, 'axis', axis, 'descending', descending)
return outs
helper = LayerHelper("sort", **locals())
out = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=False)
ids = helper.create_variable_for_type_inference(
VarDesc.VarType.INT64, stop_gradient=True)
helper.append_op(
type='argsort',
inputs={'X': x},
outputs={'Out': out,
'Indices': ids},
attrs={'axis': axis,
'descending': descending})
return out
def mode(x, axis=-1, keepdim=False, name=None):
if in_dygraph_mode():
return _C_ops.final_state_mode(x, axis, keepdim)
if _in_legacy_dygraph():
return _C_ops.mode(x, "axis", axis, "keepdim", keepdim)
helper = LayerHelper("mode", **locals())
inputs = {"X": [x]}
attrs = {}
attrs['axis'] = axis
attrs['keepdim'] = keepdim
values = helper.create_variable_for_type_inference(dtype=x.dtype)
indices = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op(
type="mode",
inputs=inputs,
outputs={"Out": [values],
"Indices": [indices]},
attrs=attrs)
indices.stop_gradient = True
return values, indices
def where(condition, x=None, y=None, name=None):
if np.isscalar(x):
x = paddle.full([1], x, np.array([x]).dtype.name)
if np.isscalar(y):
y = paddle.full([1], y, np.array([y]).dtype.name)
if x is None and y is None:
return nonzero(condition, as_tuple=True)
if x is None or y is None:
raise ValueError("either both or neither of x and y should be given")
if not paddle.in_dynamic_mode():
check_variable_and_dtype(condition, 'condition', ['bool'], 'where')
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'where')
check_variable_and_dtype(
y, 'y', ['float32', 'float64', 'int32', 'int64'], 'where')
condition_shape = list(condition.shape)
x_shape = list(x.shape)
y_shape = list(y.shape)
if x_shape == y_shape and condition_shape == x_shape:
broadcast_condition = condition
broadcast_x = x
broadcast_y = y
else:
if core.is_compiled_with_xpu():
cond_int = paddle.cast(condition, x.dtype)
cond_not_int = paddle.cast(logical_not(condition), x.dtype)
out1 = paddle.multiply(x, cond_int)
out2 = paddle.multiply(y, cond_not_int)
out = paddle.add(out1, out2)
return out
zeros_like_x = paddle.zeros_like(x)
zeros_like_y = paddle.zeros_like(y)
zeros_like_condition = paddle.zeros_like(condition)
zeros_like_condition = paddle.cast(zeros_like_condition, x.dtype)
cast_cond = paddle.cast(condition, x.dtype)
broadcast_zeros = paddle.add(zeros_like_x, zeros_like_y)
broadcast_zeros = paddle.add(broadcast_zeros, zeros_like_condition)
broadcast_x = paddle.add(x, broadcast_zeros)
broadcast_y = paddle.add(y, broadcast_zeros)
broadcast_condition = paddle.add(cast_cond, broadcast_zeros)
broadcast_condition = paddle.cast(broadcast_condition, 'bool')
if in_dygraph_mode():
return _C_ops.final_state_where(broadcast_condition, broadcast_x,
broadcast_y)
else:
if _in_legacy_dygraph():
return _C_ops.where(broadcast_condition, broadcast_x, broadcast_y)
else:
helper = LayerHelper("where", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='where',
inputs={
'Condition': broadcast_condition,
'X': broadcast_x,
'Y': broadcast_y
},
outputs={'Out': [out]})
return out
def index_sample(x, index):
if in_dygraph_mode():
return _C_ops.final_state_index_sample(x, index)
else:
if _in_legacy_dygraph():
return _C_ops.index_sample(x, index)
else:
helper = LayerHelper("index_sample", **locals())
check_variable_and_dtype(x, 'x',
['float32', 'float64', 'int32', 'int64'],
'paddle.tensor.search.index_sample')
check_variable_and_dtype(index, 'index', ['int32', 'int64'],
'paddle.tensor.search.index_sample')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='index_sample',
inputs={'X': x,
'Index': index},
outputs={'Out': out})
return out
def masked_select(x, mask, name=None):
if in_dygraph_mode():
return _C_ops.final_state_masked_select(x, mask)
if _in_legacy_dygraph():
return _C_ops.masked_select(x, mask)
helper = LayerHelper("masked_select", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'paddle.tensor.search.mask_select')
check_variable_and_dtype(mask, 'mask', ['bool'],
'paddle.tensor.search.masked_select')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='masked_select', inputs={'X': x,
'Mask': mask}, outputs={'Y': out})
return out
def topk(x, k, axis=None, largest=True, sorted=True, name=None):
if in_dygraph_mode():
if axis == None:
axis = -1
out, indices = _C_ops.final_state_top_k(x, k, axis, largest, sorted)
return out, indices
if _non_static_mode():
if axis is None:
out, indices = _C_ops.top_k_v2(x, 'k',
int(k), 'largest', largest, 'sorted',
sorted)
else:
out, indices = _C_ops.top_k_v2(x, 'k',
int(k), 'axis', axis, 'largest',
largest, 'sorted', sorted)
return out, indices
helper = LayerHelper("top_k_v2", **locals())
inputs = {"X": [x]}
attrs = {}
if isinstance(k, Variable):
inputs['K'] = [k]
else:
attrs = {'k': k}
attrs['largest'] = largest
attrs['sorted'] = sorted
if axis is not None:
attrs['axis'] = axis
values = helper.create_variable_for_type_inference(dtype=x.dtype)
indices = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op(
type="top_k_v2",
inputs=inputs,
outputs={"Out": [values],
"Indices": [indices]},
attrs=attrs)
indices.stop_gradient = True
return values, indices
def searchsorted(sorted_sequence,
values,
out_int32=False,
right=False,
name=None):
if in_dygraph_mode():
return _C_ops.final_state_searchsorted(sorted_sequence, values,
out_int32, right)
if _in_legacy_dygraph():
return _C_ops.searchsorted(sorted_sequence, values, "out_int32",
out_int32, "right", right)
check_variable_and_dtype(sorted_sequence, 'SortedSequence',
['float32', 'float64', 'int32', 'int64'],
'paddle.searchsorted')
check_variable_and_dtype(values, 'Values',
['float32', 'float64', 'int32', 'int64'],
'paddle.searchsorted')
helper = LayerHelper('searchsorted', **locals())
out_type = 'int32' if out_int32 else 'int64'
out = helper.create_variable_for_type_inference(dtype=out_type)
helper.append_op(
type='searchsorted',
inputs={'SortedSequence': sorted_sequence,
"Values": values},
outputs={'Out': out},
attrs={"out_int32": out_int32,
"right": right})
return out
def kthvalue(x, k, axis=None, keepdim=False, name=None):
if _non_static_mode():
if axis is not None:
if _in_legacy_dygraph():
return _C_ops.kthvalue(x, 'k', k, "axis", axis, "keepdim",
keepdim)
return _C_ops.final_state_kthvalue(x, k, axis, keepdim)
else:
if _in_legacy_dygraph():
return _C_ops.kthvalue(x, 'k', k, "keepdim", keepdim)
return _C_ops.final_state_kthvalue(x, k, -1, keepdim)
helper = LayerHelper("kthvalue", **locals())
inputs = {"X": [x]}
attrs = {'k': k}
if axis is not None:
attrs['axis'] = axis
values = helper.create_variable_for_type_inference(dtype=x.dtype)
indices = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op(
type="kthvalue",
inputs=inputs,
outputs={"Out": [values],
"Indices": [indices]},
attrs=attrs)
indices.stop_gradient = True
return values, indices
| true | true |
f7f72efec349bd940b76f6e67fb9d4c1e6556ae2 | 8,808 | py | Python | Sketches/RJL/bittorrent/BitTorrent/BitTorrent/language.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 12 | 2015-10-20T10:22:01.000Z | 2021-07-19T10:09:44.000Z | Sketches/RJL/bittorrent/BitTorrent/BitTorrent/language.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 2 | 2015-10-20T10:22:55.000Z | 2017-02-13T11:05:25.000Z | Sketches/RJL/bittorrent/BitTorrent/BitTorrent/language.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 6 | 2015-03-09T12:51:59.000Z | 2020-03-01T13:06:21.000Z | # -*- coding: UTF-8 -*-
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# http://people.w3.org/rishida/names/languages.html
language_names = {
'af' :u'Afrikaans' , 'bg' :u'Български' ,
'da' :u'Dansk' , 'ca' :u'Català' ,
'cs' :u'Čeština' , 'de' :u'Deutsch' ,
'en' :u'English' , 'es' :u'Español' ,
'es_MX':u'Español de Mexico ' , 'fr' :u'Français' ,
'el' :u'Ελληνικά' , 'he' :u'עברית' ,
'hu' :u'Magyar' , 'it' :u'Italiano' ,
'is' :u'Íslenska' , 'ja' :u'日本語' ,
'ko' :u'한국어' ,'nl' :u'Nederlands' ,
'nb_NO':u'Norsk bokmål' , 'pl' :u'Polski' ,
'pt' :u'Português' , 'pt_BR':u'Português do Brasil' ,
'ro' :u'Română' , 'ru' :u'Русский' ,
'sk' :u'Slovenský' , 'sl' :u'Slovensko' ,
'sv' :u'Svenska' , 'tr' :u'Türkçe' ,
'vi' :u'Tiê?ng Viê?t' ,
'zh_CN':u'简体中文' , # Simplified
'zh_TW':u'繁體中文' , # Traditional
}
unfinished_language_names = {
'ar' :u'العربية' , 'bs' :u'Bosanski' ,
'eo' :u'Esperanto' , 'eu' :u'Euskara' ,
'et' :u'Eesti' , 'fi' :u'Suomi' ,
'fa' :u'فارسی' , 'ga' :u'Gaeilge' ,
'gl' :u'Galego' , 'hr' :u'Hrvatski' ,
'hy' :u'Հայերեն' , 'in' :u'Bahasa indonesia' ,
'ka' :u'ქართული ენა', 'lt' :u'Lietuvių' ,
'ms' :u'Bahasa melayu' , 'ml' :u'Malayalam' ,
'sq' :u'Shqipe' , 'th' :u'ภาษาไทย' ,
'tlh' :u'tlhIngan-Hol' , 'uk' :u'Українська' ,
'hi' :u'हिंदी' , 'cy' :u'Cymraeg' ,
'nn_NO':u'Norsk Nynorsk' , 'te' :u' తెలుగు' ,
}
#language_names.update(unfinished_language_names)
languages = language_names.keys()
languages.sort()
# windows codepage to locale mapping
locale_sucks = {
0x0436: "af", # Afrikaans
0x3801: "ar_ae", # Arabic - United Arab Emirates
0x3C01: "ar_bh", # Arabic - Bahrain
0x1401: "ar_dz", # Arabic - Algeria
0x0C01: "ar_eg", # Arabic - Egypt
0x0801: "ar_iq", # Arabic - Iraq
0x2C01: "ar_jo", # Arabic - Jordan
0x3401: "ar_kw", # Arabic - Kuwait
0x3001: "ar_lb", # Arabic - Lebanon
0x1001: "ar_ly", # Arabic - Libya
0x1801: "ar_ma", # Arabic - Morocco
0x2001: "ar_om", # Arabic - Oman
0x4001: "ar_qa", # Arabic - Qatar
0x0401: "ar_sa", # Arabic - Saudi Arabia
0x2801: "ar_sy", # Arabic - Syria
0x1C01: "ar_tn", # Arabic - Tunisia
0x2401: "ar_ye", # Arabic - Yemen
0x082C: "az_az", # Azeri - Cyrillic
0x0423: "be", # Belarusian
0x0402: "bg", # Bulgarian
0x0403: "ca", # Catalan
0x0405: "cs", # Czech
0x0406: "da", # Danish
0x0007: "de", # German
0x0C07: "de_at", # German - Austria
0x0807: "de_ch", # German - Switzerland
0x0407: "de_de", # German - Germany
0x1407: "de_li", # German - Liechtenstein
0x1007: "de_lu", # German - Luxembourg
0x0408: "el", # Greek
0x0C09: "en_au", # English - Australia
0x2809: "en_bz", # English - Belize
0x1009: "en_ca", # English - Canada
0x2409: "en_cb", # English - Carribbean
0x0809: "en_gb", # English - United Kingdom
0x1809: "en_ie", # English - Ireland
0x2009: "en_jm", # English - Jamaica
0x1409: "en_nz", # English - New Zealand
0x3409: "en_ph", # English - Phillippines
0x2C09: "en_tt", # English - Trinidad
0x0409: "en_us", # English - United States
0x1C09: "en_za", # English - South Africa
0x000A: "es", # Spanish (added)
0x2C0A: "es_ar", # Spanish - Argentina
0x400A: "es_bo", # Spanish - Bolivia
0x340A: "es_cl", # Spanish - Chile
0x240A: "es_co", # Spanish - Colombia
0x140A: "es_cr", # Spanish - Costa Rica
0x1C0A: "es_do", # Spanish - Dominican Republic
0x300A: "es_ec", # Spanish - Ecuador
0x040a: "es_es", # Spanish - Spain
0x100A: "es_gt", # Spanish - Guatemala
0x480A: "es_hn", # Spanish - Honduras
0x080A: "es_mx", # Spanish - Mexico
0x4C0A: "es_ni", # Spanish - Nicaragua
0x180A: "es_pa", # Spanish - Panama
0x280A: "es_pe", # Spanish - Peru
0x500A: "es_pr", # Spanish - Puerto Rico
0x3C0A: "es_py", # Spanish - Paraguay
0x440A: "es_sv", # Spanish - El Salvador
0x380A: "es_uy", # Spanish - Uruguay
0x200A: "es_ve", # Spanish - Venezuela
0x0425: "et", # Estonian
0x0009: "en", # English (added)
0x042D: "eu", # Basque
0x0429: "fa", # Farsi
0x040B: "fi", # Finnish
0x0438: "fo", # Faroese
0x000C: "fr", # French (added)
0x080C: "fr_be", # French - Belgium
0x0C0C: "fr_ca", # French - Canada
0x100C: "fr_ch", # French - Switzerland
0x040C: "fr_fr", # French - France
0x140C: "fr_lu", # French - Luxembourg
0x043C: "gd", # Gaelic - Scotland
0x083C: "gd_ie", # Gaelic - Ireland
0x040D: "he", # Hebrew
0x0439: "hi", # Hindi
0x041A: "hr", # Croatian
0x040E: "hu", # Hungarian
0x042B: "hy", # Armenian
0x0421: "id", # Indonesian
0x040F: "is", # Icelandic
0x0010: "it", # Italian (added)
0x0810: "it_ch", # Italian - Switzerland
0x0410: "it_it", # Italian - Italy
0x0411: "ja", # Japanese
0x0412: "ko", # Korean
0x0427: "lt", # Lithuanian
0x0426: "lv", # Latvian
0x042F: "mk", # FYRO Macedonian
0x044E: "mr", # Marathi
0x083E: "ms_bn", # Malay - Brunei
0x043E: "ms_my", # Malay - Malaysia
0x043A: "mt", # Maltese
0x0013: "nl", # Dutch (added)
0x0813: "nl_be", # Dutch - Belgium
0x0413: "nl_nl", # Dutch - The Netherlands
0x0814: "no_no", # Norwegian - Nynorsk
0x0414: "nb_no", # Norwegian - Bokmal (?)
0x0415: "pl", # Polish
0x0016: "pt", # Portuguese (added)
0x0416: "pt_br", # Portuguese - Brazil
0x0816: "pt_pt", # Portuguese - Portugal
0x0417: "rm", # Raeto-Romance
0x0418: "ro", # Romanian - Romania
0x0818: "ro_mo", # Romanian - Moldova
0x0419: "ru", # Russian
0x0819: "ru_mo", # Russian - Moldova
0x044F: "sa", # Sanskrit
0x042E: "sb", # Sorbian
0x041B: "sk", # Slovak
0x0424: "sl", # Slovenian
0x041C: "sq", # Albanian
0x081A: "sr_sp", # Serbian - Latin
0x001D: "sv", # Swedish (added)
0x081D: "sv_fi", # Swedish - Finland
0x041D: "sv_se", # Swedish - Sweden
0x0441: "sw", # Swahili
0x0430: "sx", # Sutu
0x0449: "ta", # Tamil
0x041E: "th", # Thai
0x0432: "tn", # Setsuana
0x041F: "tr", # Turkish
0x0431: "ts", # Tsonga
0X0444: "tt", # Tatar
0x0422: "uk", # Ukrainian
0x0420: "ur", # Urdu
0x0443: "uz_uz", # Uzbek - Latin
0x042A: "vi", # Vietnamese
0x0434: "xh", # Xhosa
0x043D: "yi", # Yiddish
0x0804: "zh_cn", # Chinese - China
0x0C04: "zh_hk", # Chinese - Hong Kong S.A.R.
0x1404: "zh_mo", # Chinese - Macau S.A.R
0x1004: "zh_sg", # Chinese - Singapore
0x0404: "zh_tw", # Chinese - Taiwan
0x0435: "zu", # Zulu
}
if __name__ == '__main__':
from sets import Set
internal = Set([x.lower() for x in languages])
windows = Set(locale_sucks.values())
if not windows.issuperset(internal):
diff = list(internal.difference(windows))
diff.sort()
print diff
| 43.389163 | 77 | 0.500114 |
language_names = {
'af' :u'Afrikaans' , 'bg' :u'Български' ,
'da' :u'Dansk' , 'ca' :u'Català' ,
'cs' :u'Čeština' , 'de' :u'Deutsch' ,
'en' :u'English' , 'es' :u'Español' ,
'es_MX':u'Español de Mexico ' , 'fr' :u'Français' ,
'el' :u'Ελληνικά' , 'he' :u'עברית' ,
'hu' :u'Magyar' , 'it' :u'Italiano' ,
'is' :u'Íslenska' , 'ja' :u'日本語' ,
'ko' :u'한국어' ,'nl' :u'Nederlands' ,
'nb_NO':u'Norsk bokmål' , 'pl' :u'Polski' ,
'pt' :u'Português' , 'pt_BR':u'Português do Brasil' ,
'ro' :u'Română' , 'ru' :u'Русский' ,
'sk' :u'Slovenský' , 'sl' :u'Slovensko' ,
'sv' :u'Svenska' , 'tr' :u'Türkçe' ,
'vi' :u'Tiê?ng Viê?t' ,
'zh_CN':u'简体中文' ,
'zh_TW':u'繁體中文' ,
}
unfinished_language_names = {
'ar' :u'العربية' , 'bs' :u'Bosanski' ,
'eo' :u'Esperanto' , 'eu' :u'Euskara' ,
'et' :u'Eesti' , 'fi' :u'Suomi' ,
'fa' :u'فارسی' , 'ga' :u'Gaeilge' ,
'gl' :u'Galego' , 'hr' :u'Hrvatski' ,
'hy' :u'Հայերեն' , 'in' :u'Bahasa indonesia' ,
'ka' :u'ქართული ენა', 'lt' :u'Lietuvių' ,
'ms' :u'Bahasa melayu' , 'ml' :u'Malayalam' ,
'sq' :u'Shqipe' , 'th' :u'ภาษาไทย' ,
'tlh' :u'tlhIngan-Hol' , 'uk' :u'Українська' ,
'hi' :u'हिंदी' , 'cy' :u'Cymraeg' ,
'nn_NO':u'Norsk Nynorsk' , 'te' :u' తెలుగు' ,
}
languages = language_names.keys()
languages.sort()
locale_sucks = {
0x0436: "af",
0x3801: "ar_ae",
0x3C01: "ar_bh",
0x1401: "ar_dz",
0x0C01: "ar_eg",
0x0801: "ar_iq",
0x2C01: "ar_jo",
0x3401: "ar_kw",
0x3001: "ar_lb",
0x1001: "ar_ly",
0x1801: "ar_ma",
0x2001: "ar_om",
0x4001: "ar_qa",
0x0401: "ar_sa",
0x2801: "ar_sy",
0x1C01: "ar_tn",
0x2401: "ar_ye",
0x082C: "az_az",
0x0423: "be",
0x0402: "bg",
0x0403: "ca",
0x0405: "cs",
0x0406: "da",
0x0007: "de",
0x0C07: "de_at",
0x0807: "de_ch",
0x0407: "de_de",
0x1407: "de_li",
0x1007: "de_lu",
0x0408: "el",
0x0C09: "en_au",
0x2809: "en_bz",
0x1009: "en_ca",
0x2409: "en_cb",
0x0809: "en_gb",
0x1809: "en_ie",
0x2009: "en_jm",
0x1409: "en_nz",
0x3409: "en_ph",
0x2C09: "en_tt",
0x0409: "en_us",
0x1C09: "en_za",
0x000A: "es",
0x2C0A: "es_ar",
0x400A: "es_bo",
0x340A: "es_cl",
0x240A: "es_co",
0x140A: "es_cr",
0x1C0A: "es_do",
0x300A: "es_ec",
0x040a: "es_es",
0x100A: "es_gt",
0x480A: "es_hn",
0x080A: "es_mx",
0x4C0A: "es_ni",
0x180A: "es_pa",
0x280A: "es_pe",
0x500A: "es_pr",
0x3C0A: "es_py",
0x440A: "es_sv",
0x380A: "es_uy",
0x200A: "es_ve",
0x0425: "et",
0x0009: "en",
0x042D: "eu",
0x0429: "fa",
0x040B: "fi",
0x0438: "fo",
0x000C: "fr",
0x080C: "fr_be",
0x0C0C: "fr_ca",
0x100C: "fr_ch",
0x040C: "fr_fr",
0x140C: "fr_lu",
0x043C: "gd",
0x083C: "gd_ie",
0x040D: "he",
0x0439: "hi",
0x041A: "hr",
0x040E: "hu",
0x042B: "hy",
0x0421: "id",
0x040F: "is",
0x0010: "it",
0x0810: "it_ch",
0x0410: "it_it",
0x0411: "ja",
0x0412: "ko",
0x0427: "lt",
0x0426: "lv",
0x042F: "mk",
0x044E: "mr",
0x083E: "ms_bn",
0x043E: "ms_my",
0x043A: "mt",
0x0013: "nl",
0x0813: "nl_be",
0x0413: "nl_nl",
0x0814: "no_no",
0x0414: "nb_no",
0x0415: "pl",
0x0016: "pt",
0x0416: "pt_br",
0x0816: "pt_pt",
0x0417: "rm",
0x0418: "ro",
0x0818: "ro_mo",
0x0419: "ru",
0x0819: "ru_mo",
0x044F: "sa",
0x042E: "sb",
0x041B: "sk",
0x0424: "sl",
0x041C: "sq",
0x081A: "sr_sp",
0x001D: "sv",
0x081D: "sv_fi",
0x041D: "sv_se",
0x0441: "sw",
0x0430: "sx",
0x0449: "ta",
0x041E: "th",
0x0432: "tn",
0x041F: "tr",
0x0431: "ts",
0X0444: "tt",
0x0422: "uk",
0x0420: "ur",
0x0443: "uz_uz",
0x042A: "vi",
0x0434: "xh",
0x043D: "yi",
0x0804: "zh_cn",
0x0C04: "zh_hk",
0x1404: "zh_mo",
0x1004: "zh_sg",
0x0404: "zh_tw",
0x0435: "zu",
}
if __name__ == '__main__':
from sets import Set
internal = Set([x.lower() for x in languages])
windows = Set(locale_sucks.values())
if not windows.issuperset(internal):
diff = list(internal.difference(windows))
diff.sort()
print diff
| false | true |
f7f72ff289edd2c3014caa3c0048999f749246e0 | 154 | py | Python | Afvaldienst/test_afvalstoffendienstkalender.py | xirixiz/python-afvalwijzer-afvalstoffendienst | ef76b07033848a6f7092e941c6c4a3ec214f2842 | [
"MIT"
] | 1 | 2019-10-28T12:26:14.000Z | 2019-10-28T12:26:14.000Z | Afvaldienst/test_afvalstoffendienstkalender.py | xirixiz/afvaldienst | ef76b07033848a6f7092e941c6c4a3ec214f2842 | [
"MIT"
] | 3 | 2020-09-11T08:38:50.000Z | 2020-09-23T07:08:44.000Z | Afvaldienst/test_afvalstoffendienstkalender.py | xirixiz/python-afvalwijzer-afvalstoffendienst | ef76b07033848a6f7092e941c6c4a3ec214f2842 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from Afvaldienst import Afvaldienst
trash = Afvaldienst('afvalstoffendienstkalender', 'api_token', '5061DR', '120', '', 'false')
| 25.666667 | 92 | 0.733766 |
from Afvaldienst import Afvaldienst
trash = Afvaldienst('afvalstoffendienstkalender', 'api_token', '5061DR', '120', '', 'false')
| true | true |
f7f730bf8842e3bd333770a584ed2aaa1cb4eafa | 5,134 | py | Python | ucsmsdk/mometa/bios/BiosVfInterleaveConfiguration.py | thinkitdata/ucsmsdk | da6599e1dbc1207a30eabe548a7e5791af5f476b | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/bios/BiosVfInterleaveConfiguration.py | thinkitdata/ucsmsdk | da6599e1dbc1207a30eabe548a7e5791af5f476b | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/bios/BiosVfInterleaveConfiguration.py | thinkitdata/ucsmsdk | da6599e1dbc1207a30eabe548a7e5791af5f476b | [
"Apache-2.0"
] | null | null | null | """This module contains the general information for BiosVfInterleaveConfiguration ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class BiosVfInterleaveConfigurationConsts:
SUPPORTED_BY_DEFAULT_NO = "no"
SUPPORTED_BY_DEFAULT_YES = "yes"
VP_CHANNEL_INTERLEAVING_1_WAY = "1-way"
VP_CHANNEL_INTERLEAVING_2_WAY = "2-way"
VP_CHANNEL_INTERLEAVING_3_WAY = "3-way"
VP_CHANNEL_INTERLEAVING_4_WAY = "4-way"
VP_CHANNEL_INTERLEAVING_AUTO = "auto"
VP_CHANNEL_INTERLEAVING_PLATFORM_DEFAULT = "platform-default"
VP_CHANNEL_INTERLEAVING_PLATFORM_RECOMMENDED = "platform-recommended"
VP_MEMORY_INTERLEAVING_2_WAY_NODE_INTERLEAVE = "2-way-node-interleave"
VP_MEMORY_INTERLEAVING_4_WAY_NODE_INTERLEAVE = "4-way-node-interleave"
VP_MEMORY_INTERLEAVING_8_WAY_INTERLEAVING_INTER_SOCKET = "8-way-interleaving-inter-socket"
VP_MEMORY_INTERLEAVING_NUMA_1_WAY_NODE_INTERLEAVE = "numa---1-way-node-interleave"
VP_MEMORY_INTERLEAVING_PLATFORM_DEFAULT = "platform-default"
VP_MEMORY_INTERLEAVING_PLATFORM_RECOMMENDED = "platform-recommended"
VP_RANK_INTERLEAVING_1_WAY = "1-way"
VP_RANK_INTERLEAVING_2_WAY = "2-way"
VP_RANK_INTERLEAVING_4_WAY = "4-way"
VP_RANK_INTERLEAVING_8_WAY = "8-way"
VP_RANK_INTERLEAVING_AUTO = "auto"
VP_RANK_INTERLEAVING_PLATFORM_DEFAULT = "platform-default"
VP_RANK_INTERLEAVING_PLATFORM_RECOMMENDED = "platform-recommended"
class BiosVfInterleaveConfiguration(ManagedObject):
"""This is BiosVfInterleaveConfiguration class."""
consts = BiosVfInterleaveConfigurationConsts()
naming_props = set([])
mo_meta = MoMeta("BiosVfInterleaveConfiguration", "biosVfInterleaveConfiguration", "Interleave-Configuration", VersionMeta.Version222c, "InputOutput", 0xff, [], ["admin", "ls-compute", "ls-config", "ls-server", "ls-server-policy", "pn-policy"], [u'biosSettings', u'biosVProfile'], [], ["Get", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version222c, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"prop_acl": MoPropertyMeta("prop_acl", "propAcl", "ulong", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version222c, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"supported_by_default": MoPropertyMeta("supported_by_default", "supportedByDefault", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["no", "yes"], []),
"vp_channel_interleaving": MoPropertyMeta("vp_channel_interleaving", "vpChannelInterleaving", "string", VersionMeta.Version222c, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["1-way", "2-way", "3-way", "4-way", "auto", "platform-default", "platform-recommended"], []),
"vp_memory_interleaving": MoPropertyMeta("vp_memory_interleaving", "vpMemoryInterleaving", "string", VersionMeta.Version222c, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["2-way-node-interleave", "4-way-node-interleave", "8-way-interleaving-inter-socket", "numa---1-way-node-interleave", "platform-default", "platform-recommended"], []),
"vp_rank_interleaving": MoPropertyMeta("vp_rank_interleaving", "vpRankInterleaving", "string", VersionMeta.Version222c, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["1-way", "2-way", "4-way", "8-way", "auto", "platform-default", "platform-recommended"], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"propAcl": "prop_acl",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"supportedByDefault": "supported_by_default",
"vpChannelInterleaving": "vp_channel_interleaving",
"vpMemoryInterleaving": "vp_memory_interleaving",
"vpRankInterleaving": "vp_rank_interleaving",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.prop_acl = None
self.sacl = None
self.status = None
self.supported_by_default = None
self.vp_channel_interleaving = None
self.vp_memory_interleaving = None
self.vp_rank_interleaving = None
ManagedObject.__init__(self, "BiosVfInterleaveConfiguration", parent_mo_or_dn, **kwargs)
| 64.987342 | 353 | 0.714842 |
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class BiosVfInterleaveConfigurationConsts:
SUPPORTED_BY_DEFAULT_NO = "no"
SUPPORTED_BY_DEFAULT_YES = "yes"
VP_CHANNEL_INTERLEAVING_1_WAY = "1-way"
VP_CHANNEL_INTERLEAVING_2_WAY = "2-way"
VP_CHANNEL_INTERLEAVING_3_WAY = "3-way"
VP_CHANNEL_INTERLEAVING_4_WAY = "4-way"
VP_CHANNEL_INTERLEAVING_AUTO = "auto"
VP_CHANNEL_INTERLEAVING_PLATFORM_DEFAULT = "platform-default"
VP_CHANNEL_INTERLEAVING_PLATFORM_RECOMMENDED = "platform-recommended"
VP_MEMORY_INTERLEAVING_2_WAY_NODE_INTERLEAVE = "2-way-node-interleave"
VP_MEMORY_INTERLEAVING_4_WAY_NODE_INTERLEAVE = "4-way-node-interleave"
VP_MEMORY_INTERLEAVING_8_WAY_INTERLEAVING_INTER_SOCKET = "8-way-interleaving-inter-socket"
VP_MEMORY_INTERLEAVING_NUMA_1_WAY_NODE_INTERLEAVE = "numa---1-way-node-interleave"
VP_MEMORY_INTERLEAVING_PLATFORM_DEFAULT = "platform-default"
VP_MEMORY_INTERLEAVING_PLATFORM_RECOMMENDED = "platform-recommended"
VP_RANK_INTERLEAVING_1_WAY = "1-way"
VP_RANK_INTERLEAVING_2_WAY = "2-way"
VP_RANK_INTERLEAVING_4_WAY = "4-way"
VP_RANK_INTERLEAVING_8_WAY = "8-way"
VP_RANK_INTERLEAVING_AUTO = "auto"
VP_RANK_INTERLEAVING_PLATFORM_DEFAULT = "platform-default"
VP_RANK_INTERLEAVING_PLATFORM_RECOMMENDED = "platform-recommended"
class BiosVfInterleaveConfiguration(ManagedObject):
consts = BiosVfInterleaveConfigurationConsts()
naming_props = set([])
mo_meta = MoMeta("BiosVfInterleaveConfiguration", "biosVfInterleaveConfiguration", "Interleave-Configuration", VersionMeta.Version222c, "InputOutput", 0xff, [], ["admin", "ls-compute", "ls-config", "ls-server", "ls-server-policy", "pn-policy"], [u'biosSettings', u'biosVProfile'], [], ["Get", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version222c, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"prop_acl": MoPropertyMeta("prop_acl", "propAcl", "ulong", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version222c, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"supported_by_default": MoPropertyMeta("supported_by_default", "supportedByDefault", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["no", "yes"], []),
"vp_channel_interleaving": MoPropertyMeta("vp_channel_interleaving", "vpChannelInterleaving", "string", VersionMeta.Version222c, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["1-way", "2-way", "3-way", "4-way", "auto", "platform-default", "platform-recommended"], []),
"vp_memory_interleaving": MoPropertyMeta("vp_memory_interleaving", "vpMemoryInterleaving", "string", VersionMeta.Version222c, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["2-way-node-interleave", "4-way-node-interleave", "8-way-interleaving-inter-socket", "numa---1-way-node-interleave", "platform-default", "platform-recommended"], []),
"vp_rank_interleaving": MoPropertyMeta("vp_rank_interleaving", "vpRankInterleaving", "string", VersionMeta.Version222c, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["1-way", "2-way", "4-way", "8-way", "auto", "platform-default", "platform-recommended"], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"propAcl": "prop_acl",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"supportedByDefault": "supported_by_default",
"vpChannelInterleaving": "vp_channel_interleaving",
"vpMemoryInterleaving": "vp_memory_interleaving",
"vpRankInterleaving": "vp_rank_interleaving",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.prop_acl = None
self.sacl = None
self.status = None
self.supported_by_default = None
self.vp_channel_interleaving = None
self.vp_memory_interleaving = None
self.vp_rank_interleaving = None
ManagedObject.__init__(self, "BiosVfInterleaveConfiguration", parent_mo_or_dn, **kwargs)
| true | true |
f7f73118b3173a14028afff5936a580bfccc2d1b | 3,435 | py | Python | VGG, ResNet, Inception (200821).py | Adrian123K/tensor | f5cb9bbb8b98d3ca52356454b1c6adf2851ed507 | [
"MIT"
] | null | null | null | VGG, ResNet, Inception (200821).py | Adrian123K/tensor | f5cb9bbb8b98d3ca52356454b1c6adf2851ed507 | [
"MIT"
] | null | null | null | VGG, ResNet, Inception (200821).py | Adrian123K/tensor | f5cb9bbb8b98d3ca52356454b1c6adf2851ed507 | [
"MIT"
] | null | null | null | # # 설명:
# from tensorflow.keras.applications import *
#
# mobilenet = MobileNet(weights = None, input_shape = None, include_top = True)
# resnet50 = ResNet50(weights = None, input_shape = None, include_top = True)
# xception = Xception(weights = None, input_shape = None, include_top = True)
#
# # 예제:
# from tensorflow.keras.models import Sequential
# from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, BatchNormalization, Activation
# from tensorflow.keras.optimizers import Adam
#
#
#
# # imagenet을 학습한 모델을 불러옵니다.
# vgg16 = VGG16(weights = 'imagenet', input_shape = (32, 32, 3), include_top = False)
# vgg16.summary()
# resnet_50 = ResNet50(weights = 'imagenet', input_shape = (224, 224, 3), include_top = False)
# resnet_50.summary()
-------------------------------------------------------------------------------------------------------------------------------
# 1. 데이터 준비하기
from tensorflow.keras.datasets import cifar10
import numpy as np
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# 평균과 표준편차는 채널별로 구해줍니다.
x_mean = np.mean(x_train, axis = (0, 1, 2))
x_std = np.std(x_train, axis = (0, 1, 2))
x_train = (x_train - x_mean) / x_std
x_test = (x_test - x_mean) / x_std
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.3, random_state = 777)
print('data ready~')
# 2. 데이터 증식시키기
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(horizontal_flip = True,
zoom_range = 0.2,
width_shift_range = 0.1,
height_shift_range = 0.1,
rotation_range = 30,
fill_mode = 'nearest')
val_datagen = ImageDataGenerator()
batch_size = 32
train_generator = train_datagen.flow(x_train, y_train,
batch_size = batch_size)
val_generator = val_datagen.flow(x_val, y_val,
batch_size = batch_size)
# 3. 사전 학습된 모델 사용하기
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, BatchNormalization, Activation
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications import VGG16
# imagenet을 학습한 모델을 불러옵니다.
vgg16 = VGG16(weights = 'imagenet', input_shape = (32, 32, 3), include_top = False)
vgg16.summary()
# 4. 모델 동결해제하기
# 끝의 4개의 층만 동결을 해제합니다.
for layer in vgg16.layers[:-4]:
layer.trainable = False
# 5. 모델 구성학습하기
model = Sequential()
# vgg16 모델을 사용합니다.
model.add(vgg16)
# 분류기를 직접 정의합니다.
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(10, activation = 'softmax'))
# model.summary() # 모델의 구조를 확인하세요!
model.compile(optimizer = Adam(1e-4),
loss = 'sparse_categorical_crossentropy',
metrics = ['acc'])
def get_step(train_len, batch_size):
if(train_len % batch_size > 0):
return train_len // batch_size + 1
else:
return train_len // batch_size
history = model.fit(train_generator,
epochs = 100,
steps_per_epoch = get_step(len(x_train), batch_size),
validation_data = val_generator,
validation_steps = get_step(len(x_val), batch_size)) | 31.227273 | 127 | 0.635226 |
------------------------------------------------------------------------------------------------------------
from tensorflow.keras.datasets import cifar10
import numpy as np
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_mean = np.mean(x_train, axis = (0, 1, 2))
x_std = np.std(x_train, axis = (0, 1, 2))
x_train = (x_train - x_mean) / x_std
x_test = (x_test - x_mean) / x_std
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.3, random_state = 777)
print('data ready~')
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(horizontal_flip = True,
zoom_range = 0.2,
width_shift_range = 0.1,
height_shift_range = 0.1,
rotation_range = 30,
fill_mode = 'nearest')
val_datagen = ImageDataGenerator()
batch_size = 32
train_generator = train_datagen.flow(x_train, y_train,
batch_size = batch_size)
val_generator = val_datagen.flow(x_val, y_val,
batch_size = batch_size)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, BatchNormalization, Activation
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications import VGG16
vgg16 = VGG16(weights = 'imagenet', input_shape = (32, 32, 3), include_top = False)
vgg16.summary()
for layer in vgg16.layers[:-4]:
layer.trainable = False
model = Sequential()
model.add(vgg16)
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(10, activation = 'softmax'))
optimizer = Adam(1e-4),
loss = 'sparse_categorical_crossentropy',
metrics = ['acc'])
def get_step(train_len, batch_size):
if(train_len % batch_size > 0):
return train_len // batch_size + 1
else:
return train_len // batch_size
history = model.fit(train_generator,
epochs = 100,
steps_per_epoch = get_step(len(x_train), batch_size),
validation_data = val_generator,
validation_steps = get_step(len(x_val), batch_size)) | false | true |
f7f7313b4d3c137275828a6b64b0f44fc1e7f7d0 | 7,106 | py | Python | bin/gComposite.py | liguowang/epage | 2ce60ddbcd23f06dc4d635681e8e52b66ba519f9 | [
"MIT"
] | null | null | null | bin/gComposite.py | liguowang/epage | 2ce60ddbcd23f06dc4d635681e8e52b66ba519f9 | [
"MIT"
] | null | null | null | bin/gComposite.py | liguowang/epage | 2ce60ddbcd23f06dc4d635681e8e52b66ba519f9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 30 10:04:05 2019
@author: m102324
Description
-----------
This program Calculates the Composite Expression Scores:
* Gene Set Variation Analysis (GSVA). (Hänzelmann et al, 2013)
* Single Sample GSEA (ssGSEA). (Barbie et al, 2009)
* zscore (Lee et al, 2008)
* plage (Tomfohr et al, 2005)
"""
import sys
#import numpy as np
#import pandas as pd
from time import strftime
import pandas as pd
from optparse import OptionParser
from pacmodule import iList,iMatrix,iPas,gsva,cpca
__author__ = "Liguo Wang"
__copyright__ = "Copyleft"
__credits__ = []
__license__ = "MIT"
__version__="1.0.0"
__maintainer__ = "Liguo Wang"
__email__ = "wang.liguo@mayo.edu"
__status__ = "Development"
def main():
usage="%prog [options]" + "\n"
parser = OptionParser(usage,version="%prog " + __version__)
parser.add_option("-e","--expr_matrix",action="store",type="string",dest="expr_file",help="Tab-separated data matrix file containing gene expression values. The 1st row containing sample/patient IDs and the 1st column containing gene symbols(mut be unique). File can be compressed (.gz, .Z, .z, .bz, .bz2, bzip2).")
parser.add_option("-g","--gene",action="store",type="string",dest="gene_file",help="GMT file. The GMT file format is a tab delimited file format that describes gene sets (Each gene set is described by a name, a description, and the genes in the gene set). In the GMT format, each row represents a gene set. The first column is get set name (must be unique). The second column is brief description (can be 'na').")
parser.add_option("-k","--group",action="store",type="string",dest="group_file",help="Group file (in CSV format). First column is sample ID, second column is group ID")
parser.add_option("-s","--sample",action="store",type='string', dest="sample_file",default=None, help="Sample list file containing sample IDs. Each row can be a single sample ID, a comma-separated sample IDs or a space-separated sample IDs. Sample IDs must match exactly to those in the data matrix file. If omitted, calculated activity scores for *all* the samples. File can be compressed (.gz, .Z, .z, .bz, .bz2, bzip2). default=%default (All samples will be used)")
parser.add_option("-l","--log",action="store_true",default=False,dest="log2",help="If True, will do log2(x+1) transformation for gene experssion values. Must set to 'True' if expressin values are RNA-seq count. default=%default")
parser.add_option("-p","--processor",action="store", type='int',default=0,dest="n_thread",help="Number of processors to use when doing the calculations in parallel. default=%default (use all available processors)")
parser.add_option("-o","--output",action="store",type='string', dest="out_file",help="The prefix of the output file.")
(options,args)=parser.parse_args()
if not (options.expr_file):
print ("-e/--expr_matrix: gene expression file must be specified.", file=sys.stderr)
parser.print_help()
sys.exit()
if not (options.gene_file):
print ("-g/--gene GMT file must be specified.", file=sys.stderr)
parser.print_help()
sys.exit()
if not (options.out_file):
print ("-o/--output: output prefix must be specified.", file=sys.stderr)
parser.print_help()
sys.exit()
if not (options.group_file):
print ("-k/--group: group must be specified.", file=sys.stderr)
parser.print_help()
sys.exit()
# read gene set(s)
print ("@ " + strftime("%Y-%m-%d %H:%M:%S : ") + "Read gene list from GMT file \"%s\" ..." % options.gene_file)
gene_sets = iList.get_list(options.gene_file)
all_genes = [] # combine gene sets
print ("\tTotal %d gene sets loaded." % len(gene_sets), file=sys.stderr)
for k in gene_sets:
print ("\tGene set '%s': Total genes = %d, Unique genes = %d" % (k, len(gene_sets[k]), len(set(gene_sets[k]))), file=sys.stderr)
for g in gene_sets[k]:
print ("\t" + g)
all_genes += gene_sets[k]
all_genes = list(set(all_genes))
# read sample list
sample_list = []
if (options.sample_file):
print ("@ " + strftime("%Y-%m-%d %H:%M:%S : ") + "Read sample list from \"%s\" ..." % options.sample_file)
sample_list = iList.get_list(options.sample_file)
print ("\tTotal %d samples loaded." % len(sample_list))
iList.print_list(sample_list)
else:
print ("@ " + strftime("%Y-%m-%d %H:%M:%S : ") + "Calculate activity score for **all samples** in \"%s\"" % options.expr_file)
# read gene expression matrix
print ("@ " + strftime("%Y-%m-%d %H:%M:%S : ") + "Read gene expression matrix from \"%s\" ..." % options.expr_file)
genes_not_found = iMatrix.read_matrix(infile = options.expr_file, g_list = all_genes, s_list = sample_list, outfile = options.out_file + '.mat.tsv', zfile = None,log = options.log2)
# run PCA
print ("@ " + strftime("%Y-%m-%d %H:%M:%S : ") + "Calculate the first two principal components (saved to '%s') ..." % ((options.out_file + '_pca.csv')))
cpca.run_PCA(options.out_file + '.mat.tsv', options.out_file)
# rebuild GMT file by removing unfound genes
if len(genes_not_found) > 0:
print ("@ " + strftime("%Y-%m-%d %H:%M:%S : ") + "Rebuild GMT file as \"%s\" ..." % (options.out_file + '.New.gmt'))
iList.rebuild_gmt(oldfile = options.gene_file, newfile = options.out_file + '.New.gmt', genes = genes_not_found)
print ("@ " + strftime("%Y-%m-%d %H:%M:%S : ") + "Calculate GSVA (saved to '%s'), ssGSEA (saved to '%s'), Z-SCORE (saved to '%s') and PLAGE (saved to '%s') ..." % ((options.out_file + '_gsva.csv'), (options.out_file + '_ssgsea.csv'), (options.out_file + '_zscore.csv'), (options.out_file + '_plage.csv')))
gsva.run_gsva(routfile = options.out_file + '.R', gmtfile = options.out_file + '.New.gmt', expr_file = options.out_file + '.mat.tsv', outfile = options.out_file, n_proc = options.n_thread)
else:
print ("@ " + strftime("%Y-%m-%d %H:%M:%S : ") + "Calculate GSVA (saved to '%s'), ssGSEA (saved to '%s'), Z-SCORE (saved to '%s') and PLAGE (saved to '%s') ..." % ((options.out_file + '_gsva.csv'), (options.out_file + '_ssgsea.csv'), (options.out_file + '_zscore.csv'), (options.out_file + '_plage.csv')))
gsva.run_gsva(routfile = options.out_file + '.R', gmtfile = options.gene_file, expr_file = options.out_file + '.mat.tsv', outfile = options.out_file, n_proc = options.n_thread)
# combine
df_group = pd.read_csv(options.group_file,index_col = 0)
df_gsva = pd.read_csv(options.out_file + '_gsva.csv',index_col = 0)
df_ssgsea = pd.read_csv(options.out_file + '_ssgsea.csv',index_col = 0)
df_zscore = pd.read_csv(options.out_file + '_zscore.csv',index_col = 0)
df_plage = pd.read_csv(options.out_file + '_plage.csv',index_col = 0)
df_pca = pd.read_csv(options.out_file + '_pca.csv',index_col = 0)
data_frames = pd.concat([df_group, df_gsva, df_ssgsea,df_pca, df_zscore, df_plage],axis=1, join='inner')
data_frames.to_csv(options.out_file + '_combined.tsv', index=True, sep="\t")
#data_frames = pd.concat([df_gsva, df_ssgsea,df_zscore, df_plage],axis=1, join='inner')
#data_frames.to_csv(options.out_file + '_combined.tsv', index=True,sep="\t")
if __name__=='__main__':
main()
| 55.085271 | 469 | 0.687729 |
import sys
from time import strftime
import pandas as pd
from optparse import OptionParser
from pacmodule import iList,iMatrix,iPas,gsva,cpca
__author__ = "Liguo Wang"
__copyright__ = "Copyleft"
__credits__ = []
__license__ = "MIT"
__version__="1.0.0"
__maintainer__ = "Liguo Wang"
__email__ = "wang.liguo@mayo.edu"
__status__ = "Development"
def main():
usage="%prog [options]" + "\n"
parser = OptionParser(usage,version="%prog " + __version__)
parser.add_option("-e","--expr_matrix",action="store",type="string",dest="expr_file",help="Tab-separated data matrix file containing gene expression values. The 1st row containing sample/patient IDs and the 1st column containing gene symbols(mut be unique). File can be compressed (.gz, .Z, .z, .bz, .bz2, bzip2).")
parser.add_option("-g","--gene",action="store",type="string",dest="gene_file",help="GMT file. The GMT file format is a tab delimited file format that describes gene sets (Each gene set is described by a name, a description, and the genes in the gene set). In the GMT format, each row represents a gene set. The first column is get set name (must be unique). The second column is brief description (can be 'na').")
parser.add_option("-k","--group",action="store",type="string",dest="group_file",help="Group file (in CSV format). First column is sample ID, second column is group ID")
parser.add_option("-s","--sample",action="store",type='string', dest="sample_file",default=None, help="Sample list file containing sample IDs. Each row can be a single sample ID, a comma-separated sample IDs or a space-separated sample IDs. Sample IDs must match exactly to those in the data matrix file. If omitted, calculated activity scores for *all* the samples. File can be compressed (.gz, .Z, .z, .bz, .bz2, bzip2). default=%default (All samples will be used)")
parser.add_option("-l","--log",action="store_true",default=False,dest="log2",help="If True, will do log2(x+1) transformation for gene experssion values. Must set to 'True' if expressin values are RNA-seq count. default=%default")
parser.add_option("-p","--processor",action="store", type='int',default=0,dest="n_thread",help="Number of processors to use when doing the calculations in parallel. default=%default (use all available processors)")
parser.add_option("-o","--output",action="store",type='string', dest="out_file",help="The prefix of the output file.")
(options,args)=parser.parse_args()
if not (options.expr_file):
print ("-e/--expr_matrix: gene expression file must be specified.", file=sys.stderr)
parser.print_help()
sys.exit()
if not (options.gene_file):
print ("-g/--gene GMT file must be specified.", file=sys.stderr)
parser.print_help()
sys.exit()
if not (options.out_file):
print ("-o/--output: output prefix must be specified.", file=sys.stderr)
parser.print_help()
sys.exit()
if not (options.group_file):
print ("-k/--group: group must be specified.", file=sys.stderr)
parser.print_help()
sys.exit()
print ("@ " + strftime("%Y-%m-%d %H:%M:%S : ") + "Read gene list from GMT file \"%s\" ..." % options.gene_file)
gene_sets = iList.get_list(options.gene_file)
all_genes = []
print ("\tTotal %d gene sets loaded." % len(gene_sets), file=sys.stderr)
for k in gene_sets:
print ("\tGene set '%s': Total genes = %d, Unique genes = %d" % (k, len(gene_sets[k]), len(set(gene_sets[k]))), file=sys.stderr)
for g in gene_sets[k]:
print ("\t" + g)
all_genes += gene_sets[k]
all_genes = list(set(all_genes))
sample_list = []
if (options.sample_file):
print ("@ " + strftime("%Y-%m-%d %H:%M:%S : ") + "Read sample list from \"%s\" ..." % options.sample_file)
sample_list = iList.get_list(options.sample_file)
print ("\tTotal %d samples loaded." % len(sample_list))
iList.print_list(sample_list)
else:
print ("@ " + strftime("%Y-%m-%d %H:%M:%S : ") + "Calculate activity score for **all samples** in \"%s\"" % options.expr_file)
print ("@ " + strftime("%Y-%m-%d %H:%M:%S : ") + "Read gene expression matrix from \"%s\" ..." % options.expr_file)
genes_not_found = iMatrix.read_matrix(infile = options.expr_file, g_list = all_genes, s_list = sample_list, outfile = options.out_file + '.mat.tsv', zfile = None,log = options.log2)
print ("@ " + strftime("%Y-%m-%d %H:%M:%S : ") + "Calculate the first two principal components (saved to '%s') ..." % ((options.out_file + '_pca.csv')))
cpca.run_PCA(options.out_file + '.mat.tsv', options.out_file)
if len(genes_not_found) > 0:
print ("@ " + strftime("%Y-%m-%d %H:%M:%S : ") + "Rebuild GMT file as \"%s\" ..." % (options.out_file + '.New.gmt'))
iList.rebuild_gmt(oldfile = options.gene_file, newfile = options.out_file + '.New.gmt', genes = genes_not_found)
print ("@ " + strftime("%Y-%m-%d %H:%M:%S : ") + "Calculate GSVA (saved to '%s'), ssGSEA (saved to '%s'), Z-SCORE (saved to '%s') and PLAGE (saved to '%s') ..." % ((options.out_file + '_gsva.csv'), (options.out_file + '_ssgsea.csv'), (options.out_file + '_zscore.csv'), (options.out_file + '_plage.csv')))
gsva.run_gsva(routfile = options.out_file + '.R', gmtfile = options.out_file + '.New.gmt', expr_file = options.out_file + '.mat.tsv', outfile = options.out_file, n_proc = options.n_thread)
else:
print ("@ " + strftime("%Y-%m-%d %H:%M:%S : ") + "Calculate GSVA (saved to '%s'), ssGSEA (saved to '%s'), Z-SCORE (saved to '%s') and PLAGE (saved to '%s') ..." % ((options.out_file + '_gsva.csv'), (options.out_file + '_ssgsea.csv'), (options.out_file + '_zscore.csv'), (options.out_file + '_plage.csv')))
gsva.run_gsva(routfile = options.out_file + '.R', gmtfile = options.gene_file, expr_file = options.out_file + '.mat.tsv', outfile = options.out_file, n_proc = options.n_thread)
df_group = pd.read_csv(options.group_file,index_col = 0)
df_gsva = pd.read_csv(options.out_file + '_gsva.csv',index_col = 0)
df_ssgsea = pd.read_csv(options.out_file + '_ssgsea.csv',index_col = 0)
df_zscore = pd.read_csv(options.out_file + '_zscore.csv',index_col = 0)
df_plage = pd.read_csv(options.out_file + '_plage.csv',index_col = 0)
df_pca = pd.read_csv(options.out_file + '_pca.csv',index_col = 0)
data_frames = pd.concat([df_group, df_gsva, df_ssgsea,df_pca, df_zscore, df_plage],axis=1, join='inner')
data_frames.to_csv(options.out_file + '_combined.tsv', index=True, sep="\t")
if __name__=='__main__':
main()
| true | true |
f7f7328cc96e45fe0471559aa23e6a9c17ca25f3 | 2,107 | py | Python | Step1.py | Byunggun/Project6-DL-Project-of-Classification | adddd17e93d46231cfa5b43c8cc2422d7fb92152 | [
"MIT"
] | null | null | null | Step1.py | Byunggun/Project6-DL-Project-of-Classification | adddd17e93d46231cfa5b43c8cc2422d7fb92152 | [
"MIT"
] | null | null | null | Step1.py | Byunggun/Project6-DL-Project-of-Classification | adddd17e93d46231cfa5b43c8cc2422d7fb92152 | [
"MIT"
] | null | null | null | #Q)불러오기 후 각각의 파일에 저장해 수많은 파일 생성?
#<자료 수집>
#네이버 뉴스, 2018.2.9.-2.25., 키워드: 스포츠 도핑
#cf.https://search.naver.com/search.naver?where=news&query=%EC%8A%A4%ED%8F%AC%EC%B8%A0%20%EB%8F%84%ED%95%91&sm=tab_opt&sort=0&photo=0&field=0&reporter_article=&pd=3&ds=2018.02.09&de=2018.02.25&docid=&nso=so%3Ar%2Cp%3Afrom20180209to20180225%2Ca%3Aall&mynews=0&mson=0&refresh_start=0&related=0
####
from bs4 import BeautifulSoup
import urllib.request as req
# 1. 스포츠 도핑 기사자료 스크랩
from bs4 import BeautifulSoup
import urllib.request as req
#OUTPUT_FILE_NAME->doping1.txt
doping1_OUTPUT_FILE_NAME='D:\Python\MyGitHub\Project6-DL-Project-of-Classification\Doping Data/doping1.txt'
doping1_URL="http://www.sportalkorea.com/news/view.php?gisa_uniq=2018022508475516§ion_code=20&cp=se&gomb=1"
# doping2_URL="http://www.seoul.co.kr/news/newsView.php?id=20180225500002&wlog_tag3=naver"
# 2. 텍스트 파일로 저장(doping1.txt)
# #1)크롤링 함수 get_text():
def get_text(SSS): #URL="https:..."이 SSS로 들어감
sourceFromURL=req.urlopen(SSS)
soup=BeautifulSoup(sourceFromURL,'lxml', from_encoding='utf-8') #'lxml'은 'html.parser'와 같은 기능 but 속도 높음
# print(soup)
# text=soup.find_all('div',id='CmAdContent')# div태그를 모두 찾고, id가 'CmAdContent'인 것 모두 찾아라.
# print(len(text))
#
text=""
for item in soup.find_all('div', id='CmAdContent'):
# print(item.find_all(text=True)) #text=True : 텍스트만 추출.
text=text+str(item.find_all(text=True)) #item이 리스트로 출력되니 스트링(str)으로 바꾸어야함(여러개의 div태그로 구성될 시 유용함).
print(text)
return text
# print(str(sourceFromURL))
# return text
# #2)메인함수
#프로그램의 시작과 끝은 main함수로 시작 끝남.
def main():
open_output_file=open(doping1_OUTPUT_FILE_NAME, "w")
# get_text(doping1_URL)
res=get_text(doping1_URL) #URL="https:..."이 res로 들어감
open_output_file.write(res) #url->text
# res2=get_text(doping2_URL) #URL="https:..."이 res로 들어감
# open_output_file.write(res2) #url->text
open_output_file.close()
# #함수 호출 부분:아래 main()->def main()을 호출함
# 8-2에서 import 하게 되면,__name__에 8-2가 들어감(cf.module1)
# 아래는 함수가 아니라 파이썬 문장임
if __name__=='__main__':
main() #main함수를 호출해라
| 35.116667 | 291 | 0.707167 |
bs4 import BeautifulSoup
import urllib.request as req
from bs4 import BeautifulSoup
import urllib.request as req
doping1_OUTPUT_FILE_NAME='D:\Python\MyGitHub\Project6-DL-Project-of-Classification\Doping Data/doping1.txt'
doping1_URL="http://www.sportalkorea.com/news/view.php?gisa_uniq=2018022508475516§ion_code=20&cp=se&gomb=1"
sourceFromURL=req.urlopen(SSS)
soup=BeautifulSoup(sourceFromURL,'lxml', from_encoding='utf-8')
find_all('div', id='CmAdContent'):
r(item.find_all(text=True))
print(text)
return text
ain():
open_output_file=open(doping1_OUTPUT_FILE_NAME, "w")
res=get_text(doping1_URL)
open_output_file.write(res)
()
| true | true |
f7f732bfd7ffad3e52cb8142ee180a25289c7b6e | 41 | py | Python | tests/__init__.py | ShipKore/libshipkore | e009b614952c32f9c23669ccf6e8ae7a9a58b1c6 | [
"MIT"
] | 6 | 2021-02-06T05:50:33.000Z | 2022-02-11T15:21:09.000Z | tests/__init__.py | ShipKore/libshipkore | e009b614952c32f9c23669ccf6e8ae7a9a58b1c6 | [
"MIT"
] | 1 | 2021-02-11T15:58:33.000Z | 2021-02-11T16:01:18.000Z | tests/__init__.py | ShipKore/libshipkore | e009b614952c32f9c23669ccf6e8ae7a9a58b1c6 | [
"MIT"
] | null | null | null | """Unit test package for libshipkore."""
| 20.5 | 40 | 0.707317 | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.