repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
oauthlib/oauthlib | oauthlib/openid/connect/core/grant_types/hybrid.py | HybridGrant.openid_authorization_validator | def openid_authorization_validator(self, request):
"""Additional validation when following the Authorization Code flow.
"""
request_info = super(HybridGrant, self).openid_authorization_validator(request)
if not request_info: # returns immediately if OAuth2.0
return request_info
# REQUIRED if the Response Type of the request is `code
# id_token` or `code id_token token` and OPTIONAL when the
# Response Type of the request is `code token`. It is a string
# value used to associate a Client session with an ID Token,
# and to mitigate replay attacks. The value is passed through
# unmodified from the Authentication Request to the ID
# Token. Sufficient entropy MUST be present in the `nonce`
# values used to prevent attackers from guessing values. For
# implementation notes, see Section 15.5.2.
if request.response_type in ["code id_token", "code id_token token"]:
if not request.nonce:
raise InvalidRequestError(
request=request,
description='Request is missing mandatory nonce parameter.'
)
return request_info | python | def openid_authorization_validator(self, request):
"""Additional validation when following the Authorization Code flow.
"""
request_info = super(HybridGrant, self).openid_authorization_validator(request)
if not request_info: # returns immediately if OAuth2.0
return request_info
# REQUIRED if the Response Type of the request is `code
# id_token` or `code id_token token` and OPTIONAL when the
# Response Type of the request is `code token`. It is a string
# value used to associate a Client session with an ID Token,
# and to mitigate replay attacks. The value is passed through
# unmodified from the Authentication Request to the ID
# Token. Sufficient entropy MUST be present in the `nonce`
# values used to prevent attackers from guessing values. For
# implementation notes, see Section 15.5.2.
if request.response_type in ["code id_token", "code id_token token"]:
if not request.nonce:
raise InvalidRequestError(
request=request,
description='Request is missing mandatory nonce parameter.'
)
return request_info | [
"def",
"openid_authorization_validator",
"(",
"self",
",",
"request",
")",
":",
"request_info",
"=",
"super",
"(",
"HybridGrant",
",",
"self",
")",
".",
"openid_authorization_validator",
"(",
"request",
")",
"if",
"not",
"request_info",
":",
"# returns immediately i... | Additional validation when following the Authorization Code flow. | [
"Additional",
"validation",
"when",
"following",
"the",
"Authorization",
"Code",
"flow",
"."
] | 30321dd3c0ca784d3508a1970cf90d9f76835c79 | https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/openid/connect/core/grant_types/hybrid.py#L39-L61 | train | 224,700 |
oauthlib/oauthlib | oauthlib/oauth1/rfc5849/utils.py | filter_oauth_params | def filter_oauth_params(params):
"""Removes all non oauth parameters from a dict or a list of params."""
is_oauth = lambda kv: kv[0].startswith("oauth_")
if isinstance(params, dict):
return list(filter(is_oauth, list(params.items())))
else:
return list(filter(is_oauth, params)) | python | def filter_oauth_params(params):
"""Removes all non oauth parameters from a dict or a list of params."""
is_oauth = lambda kv: kv[0].startswith("oauth_")
if isinstance(params, dict):
return list(filter(is_oauth, list(params.items())))
else:
return list(filter(is_oauth, params)) | [
"def",
"filter_oauth_params",
"(",
"params",
")",
":",
"is_oauth",
"=",
"lambda",
"kv",
":",
"kv",
"[",
"0",
"]",
".",
"startswith",
"(",
"\"oauth_\"",
")",
"if",
"isinstance",
"(",
"params",
",",
"dict",
")",
":",
"return",
"list",
"(",
"filter",
"(",... | Removes all non oauth parameters from a dict or a list of params. | [
"Removes",
"all",
"non",
"oauth",
"parameters",
"from",
"a",
"dict",
"or",
"a",
"list",
"of",
"params",
"."
] | 30321dd3c0ca784d3508a1970cf90d9f76835c79 | https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth1/rfc5849/utils.py#L38-L44 | train | 224,701 |
oauthlib/oauthlib | oauthlib/oauth1/rfc5849/utils.py | parse_authorization_header | def parse_authorization_header(authorization_header):
"""Parse an OAuth authorization header into a list of 2-tuples"""
auth_scheme = 'OAuth '.lower()
if authorization_header[:len(auth_scheme)].lower().startswith(auth_scheme):
items = parse_http_list(authorization_header[len(auth_scheme):])
try:
return list(parse_keqv_list(items).items())
except (IndexError, ValueError):
pass
raise ValueError('Malformed authorization header') | python | def parse_authorization_header(authorization_header):
"""Parse an OAuth authorization header into a list of 2-tuples"""
auth_scheme = 'OAuth '.lower()
if authorization_header[:len(auth_scheme)].lower().startswith(auth_scheme):
items = parse_http_list(authorization_header[len(auth_scheme):])
try:
return list(parse_keqv_list(items).items())
except (IndexError, ValueError):
pass
raise ValueError('Malformed authorization header') | [
"def",
"parse_authorization_header",
"(",
"authorization_header",
")",
":",
"auth_scheme",
"=",
"'OAuth '",
".",
"lower",
"(",
")",
"if",
"authorization_header",
"[",
":",
"len",
"(",
"auth_scheme",
")",
"]",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"... | Parse an OAuth authorization header into a list of 2-tuples | [
"Parse",
"an",
"OAuth",
"authorization",
"header",
"into",
"a",
"list",
"of",
"2",
"-",
"tuples"
] | 30321dd3c0ca784d3508a1970cf90d9f76835c79 | https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth1/rfc5849/utils.py#L81-L90 | train | 224,702 |
oauthlib/oauthlib | oauthlib/openid/connect/core/grant_types/implicit.py | ImplicitGrant.openid_authorization_validator | def openid_authorization_validator(self, request):
"""Additional validation when following the implicit flow.
"""
request_info = super(ImplicitGrant, self).openid_authorization_validator(request)
if not request_info: # returns immediately if OAuth2.0
return request_info
# REQUIRED. String value used to associate a Client session with an ID
# Token, and to mitigate replay attacks. The value is passed through
# unmodified from the Authentication Request to the ID Token.
# Sufficient entropy MUST be present in the nonce values used to
# prevent attackers from guessing values. For implementation notes, see
# Section 15.5.2.
if not request.nonce:
raise InvalidRequestError(
request=request,
description='Request is missing mandatory nonce parameter.'
)
return request_info | python | def openid_authorization_validator(self, request):
"""Additional validation when following the implicit flow.
"""
request_info = super(ImplicitGrant, self).openid_authorization_validator(request)
if not request_info: # returns immediately if OAuth2.0
return request_info
# REQUIRED. String value used to associate a Client session with an ID
# Token, and to mitigate replay attacks. The value is passed through
# unmodified from the Authentication Request to the ID Token.
# Sufficient entropy MUST be present in the nonce values used to
# prevent attackers from guessing values. For implementation notes, see
# Section 15.5.2.
if not request.nonce:
raise InvalidRequestError(
request=request,
description='Request is missing mandatory nonce parameter.'
)
return request_info | [
"def",
"openid_authorization_validator",
"(",
"self",
",",
"request",
")",
":",
"request_info",
"=",
"super",
"(",
"ImplicitGrant",
",",
"self",
")",
".",
"openid_authorization_validator",
"(",
"request",
")",
"if",
"not",
"request_info",
":",
"# returns immediately... | Additional validation when following the implicit flow. | [
"Additional",
"validation",
"when",
"following",
"the",
"implicit",
"flow",
"."
] | 30321dd3c0ca784d3508a1970cf90d9f76835c79 | https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/openid/connect/core/grant_types/implicit.py#L34-L52 | train | 224,703 |
oauthlib/oauthlib | oauthlib/oauth1/rfc5849/endpoints/access_token.py | AccessTokenEndpoint.create_access_token | def create_access_token(self, request, credentials):
"""Create and save a new access token.
Similar to OAuth 2, indication of granted scopes will be included as a
space separated list in ``oauth_authorized_realms``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: The token as an urlencoded string.
"""
request.realms = self.request_validator.get_realms(
request.resource_owner_key, request)
token = {
'oauth_token': self.token_generator(),
'oauth_token_secret': self.token_generator(),
# Backport the authorized scopes indication used in OAuth2
'oauth_authorized_realms': ' '.join(request.realms)
}
token.update(credentials)
self.request_validator.save_access_token(token, request)
return urlencode(token.items()) | python | def create_access_token(self, request, credentials):
"""Create and save a new access token.
Similar to OAuth 2, indication of granted scopes will be included as a
space separated list in ``oauth_authorized_realms``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: The token as an urlencoded string.
"""
request.realms = self.request_validator.get_realms(
request.resource_owner_key, request)
token = {
'oauth_token': self.token_generator(),
'oauth_token_secret': self.token_generator(),
# Backport the authorized scopes indication used in OAuth2
'oauth_authorized_realms': ' '.join(request.realms)
}
token.update(credentials)
self.request_validator.save_access_token(token, request)
return urlencode(token.items()) | [
"def",
"create_access_token",
"(",
"self",
",",
"request",
",",
"credentials",
")",
":",
"request",
".",
"realms",
"=",
"self",
".",
"request_validator",
".",
"get_realms",
"(",
"request",
".",
"resource_owner_key",
",",
"request",
")",
"token",
"=",
"{",
"'... | Create and save a new access token.
Similar to OAuth 2, indication of granted scopes will be included as a
space separated list in ``oauth_authorized_realms``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: The token as an urlencoded string. | [
"Create",
"and",
"save",
"a",
"new",
"access",
"token",
"."
] | 30321dd3c0ca784d3508a1970cf90d9f76835c79 | https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth1/rfc5849/endpoints/access_token.py#L34-L54 | train | 224,704 |
oauthlib/oauthlib | oauthlib/oauth1/rfc5849/endpoints/access_token.py | AccessTokenEndpoint.create_access_token_response | def create_access_token_response(self, uri, http_method='GET', body=None,
headers=None, credentials=None):
"""Create an access token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param credentials: A list of extra credentials to include in the token.
:returns: A tuple of 3 elements.
1. A dict of headers to set on the response.
2. The response body as a string.
3. The response status code as an integer.
An example of a valid request::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import AccessTokenEndpoint
>>> endpoint = AccessTokenEndpoint(your_validator)
>>> h, b, s = endpoint.create_access_token_response(
... 'https://your.provider/access_token?foo=bar',
... headers={
... 'Authorization': 'OAuth oauth_token=234lsdkf....'
... },
... credentials={
... 'my_specific': 'argument',
... })
>>> h
{'Content-Type': 'application/x-www-form-urlencoded'}
>>> b
'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_authorized_realms=movies+pics&my_specific=argument'
>>> s
200
An response to invalid request would have a different body and status::
>>> b
'error=invalid_request&description=missing+resource+owner+key'
>>> s
400
The same goes for an an unauthorized request:
>>> b
''
>>> s
401
"""
resp_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
try:
request = self._create_request(uri, http_method, body, headers)
valid, processed_request = self.validate_access_token_request(
request)
if valid:
token = self.create_access_token(request, credentials or {})
self.request_validator.invalidate_request_token(
request.client_key,
request.resource_owner_key,
request)
return resp_headers, token, 200
else:
return {}, None, 401
except errors.OAuth1Error as e:
return resp_headers, e.urlencoded, e.status_code | python | def create_access_token_response(self, uri, http_method='GET', body=None,
headers=None, credentials=None):
"""Create an access token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param credentials: A list of extra credentials to include in the token.
:returns: A tuple of 3 elements.
1. A dict of headers to set on the response.
2. The response body as a string.
3. The response status code as an integer.
An example of a valid request::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import AccessTokenEndpoint
>>> endpoint = AccessTokenEndpoint(your_validator)
>>> h, b, s = endpoint.create_access_token_response(
... 'https://your.provider/access_token?foo=bar',
... headers={
... 'Authorization': 'OAuth oauth_token=234lsdkf....'
... },
... credentials={
... 'my_specific': 'argument',
... })
>>> h
{'Content-Type': 'application/x-www-form-urlencoded'}
>>> b
'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_authorized_realms=movies+pics&my_specific=argument'
>>> s
200
An response to invalid request would have a different body and status::
>>> b
'error=invalid_request&description=missing+resource+owner+key'
>>> s
400
The same goes for an an unauthorized request:
>>> b
''
>>> s
401
"""
resp_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
try:
request = self._create_request(uri, http_method, body, headers)
valid, processed_request = self.validate_access_token_request(
request)
if valid:
token = self.create_access_token(request, credentials or {})
self.request_validator.invalidate_request_token(
request.client_key,
request.resource_owner_key,
request)
return resp_headers, token, 200
else:
return {}, None, 401
except errors.OAuth1Error as e:
return resp_headers, e.urlencoded, e.status_code | [
"def",
"create_access_token_response",
"(",
"self",
",",
"uri",
",",
"http_method",
"=",
"'GET'",
",",
"body",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"credentials",
"=",
"None",
")",
":",
"resp_headers",
"=",
"{",
"'Content-Type'",
":",
"'application... | Create an access token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param credentials: A list of extra credentials to include in the token.
:returns: A tuple of 3 elements.
1. A dict of headers to set on the response.
2. The response body as a string.
3. The response status code as an integer.
An example of a valid request::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import AccessTokenEndpoint
>>> endpoint = AccessTokenEndpoint(your_validator)
>>> h, b, s = endpoint.create_access_token_response(
... 'https://your.provider/access_token?foo=bar',
... headers={
... 'Authorization': 'OAuth oauth_token=234lsdkf....'
... },
... credentials={
... 'my_specific': 'argument',
... })
>>> h
{'Content-Type': 'application/x-www-form-urlencoded'}
>>> b
'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_authorized_realms=movies+pics&my_specific=argument'
>>> s
200
An response to invalid request would have a different body and status::
>>> b
'error=invalid_request&description=missing+resource+owner+key'
>>> s
400
The same goes for an an unauthorized request:
>>> b
''
>>> s
401 | [
"Create",
"an",
"access",
"token",
"response",
"with",
"a",
"new",
"request",
"token",
"if",
"valid",
"."
] | 30321dd3c0ca784d3508a1970cf90d9f76835c79 | https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth1/rfc5849/endpoints/access_token.py#L56-L119 | train | 224,705 |
oauthlib/oauthlib | oauthlib/oauth1/rfc5849/endpoints/access_token.py | AccessTokenEndpoint.validate_access_token_request | def validate_access_token_request(self, request):
"""Validate an access token request.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:raises: OAuth1Error if the request is invalid.
:returns: A tuple of 2 elements.
1. The validation result (True or False).
2. The request object.
"""
self._check_transport_security(request)
self._check_mandatory_parameters(request)
if not request.resource_owner_key:
raise errors.InvalidRequestError(
description='Missing resource owner.')
if not self.request_validator.check_request_token(
request.resource_owner_key):
raise errors.InvalidRequestError(
description='Invalid resource owner key format.')
if not request.verifier:
raise errors.InvalidRequestError(
description='Missing verifier.')
if not self.request_validator.check_verifier(request.verifier):
raise errors.InvalidRequestError(
description='Invalid verifier format.')
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
request_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid or expired token.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy token is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable resource owner enumeration
valid_resource_owner = self.request_validator.validate_request_token(
request.client_key, request.resource_owner_key, request)
if not valid_resource_owner:
request.resource_owner_key = self.request_validator.dummy_request_token
# The server MUST verify (Section 3.2) the validity of the request,
# ensure that the resource owner has authorized the provisioning of
# token credentials to the client, and ensure that the temporary
# credentials have not expired or been used before. The server MUST
# also verify the verification code received from the client.
# .. _`Section 3.2`: https://tools.ietf.org/html/rfc5849#section-3.2
#
# Note that early exit would enable resource owner authorization
# verifier enumertion.
valid_verifier = self.request_validator.validate_verifier(
request.client_key,
request.resource_owner_key,
request.verifier,
request)
valid_signature = self._check_signature(request, is_token_request=True)
# log the results to the validator_log
# this lets us handle internal reporting and analysis
request.validator_log['client'] = valid_client
request.validator_log['resource_owner'] = valid_resource_owner
request.validator_log['verifier'] = valid_verifier
request.validator_log['signature'] = valid_signature
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_resource_owner, valid_verifier,
valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client:, %s", valid_client)
log.info("Valid token:, %s", valid_resource_owner)
log.info("Valid verifier:, %s", valid_verifier)
log.info("Valid signature:, %s", valid_signature)
return v, request | python | def validate_access_token_request(self, request):
"""Validate an access token request.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:raises: OAuth1Error if the request is invalid.
:returns: A tuple of 2 elements.
1. The validation result (True or False).
2. The request object.
"""
self._check_transport_security(request)
self._check_mandatory_parameters(request)
if not request.resource_owner_key:
raise errors.InvalidRequestError(
description='Missing resource owner.')
if not self.request_validator.check_request_token(
request.resource_owner_key):
raise errors.InvalidRequestError(
description='Invalid resource owner key format.')
if not request.verifier:
raise errors.InvalidRequestError(
description='Missing verifier.')
if not self.request_validator.check_verifier(request.verifier):
raise errors.InvalidRequestError(
description='Invalid verifier format.')
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
request_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid or expired token.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy token is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable resource owner enumeration
valid_resource_owner = self.request_validator.validate_request_token(
request.client_key, request.resource_owner_key, request)
if not valid_resource_owner:
request.resource_owner_key = self.request_validator.dummy_request_token
# The server MUST verify (Section 3.2) the validity of the request,
# ensure that the resource owner has authorized the provisioning of
# token credentials to the client, and ensure that the temporary
# credentials have not expired or been used before. The server MUST
# also verify the verification code received from the client.
# .. _`Section 3.2`: https://tools.ietf.org/html/rfc5849#section-3.2
#
# Note that early exit would enable resource owner authorization
# verifier enumertion.
valid_verifier = self.request_validator.validate_verifier(
request.client_key,
request.resource_owner_key,
request.verifier,
request)
valid_signature = self._check_signature(request, is_token_request=True)
# log the results to the validator_log
# this lets us handle internal reporting and analysis
request.validator_log['client'] = valid_client
request.validator_log['resource_owner'] = valid_resource_owner
request.validator_log['verifier'] = valid_verifier
request.validator_log['signature'] = valid_signature
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_resource_owner, valid_verifier,
valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client:, %s", valid_client)
log.info("Valid token:, %s", valid_resource_owner)
log.info("Valid verifier:, %s", valid_verifier)
log.info("Valid signature:, %s", valid_signature)
return v, request | [
"def",
"validate_access_token_request",
"(",
"self",
",",
"request",
")",
":",
"self",
".",
"_check_transport_security",
"(",
"request",
")",
"self",
".",
"_check_mandatory_parameters",
"(",
"request",
")",
"if",
"not",
"request",
".",
"resource_owner_key",
":",
"... | Validate an access token request.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:raises: OAuth1Error if the request is invalid.
:returns: A tuple of 2 elements.
1. The validation result (True or False).
2. The request object. | [
"Validate",
"an",
"access",
"token",
"request",
"."
] | 30321dd3c0ca784d3508a1970cf90d9f76835c79 | https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth1/rfc5849/endpoints/access_token.py#L121-L217 | train | 224,706 |
oauthlib/oauthlib | oauthlib/oauth2/rfc6749/endpoints/resource.py | ResourceEndpoint.verify_request | def verify_request(self, uri, http_method='GET', body=None, headers=None,
scopes=None):
"""Validate client, code etc, return body + headers"""
request = Request(uri, http_method, body, headers)
request.token_type = self.find_token_type(request)
request.scopes = scopes
token_type_handler = self.tokens.get(request.token_type,
self.default_token_type_handler)
log.debug('Dispatching token_type %s request to %r.',
request.token_type, token_type_handler)
return token_type_handler.validate_request(request), request | python | def verify_request(self, uri, http_method='GET', body=None, headers=None,
scopes=None):
"""Validate client, code etc, return body + headers"""
request = Request(uri, http_method, body, headers)
request.token_type = self.find_token_type(request)
request.scopes = scopes
token_type_handler = self.tokens.get(request.token_type,
self.default_token_type_handler)
log.debug('Dispatching token_type %s request to %r.',
request.token_type, token_type_handler)
return token_type_handler.validate_request(request), request | [
"def",
"verify_request",
"(",
"self",
",",
"uri",
",",
"http_method",
"=",
"'GET'",
",",
"body",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"scopes",
"=",
"None",
")",
":",
"request",
"=",
"Request",
"(",
"uri",
",",
"http_method",
",",
"body",
"... | Validate client, code etc, return body + headers | [
"Validate",
"client",
"code",
"etc",
"return",
"body",
"+",
"headers"
] | 30321dd3c0ca784d3508a1970cf90d9f76835c79 | https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth2/rfc6749/endpoints/resource.py#L65-L75 | train | 224,707 |
oauthlib/oauthlib | oauthlib/oauth2/rfc6749/endpoints/resource.py | ResourceEndpoint.find_token_type | def find_token_type(self, request):
"""Token type identification.
RFC 6749 does not provide a method for easily differentiating between
different token types during protected resource access. We estimate
the most likely token type (if any) by asking each known token type
to give an estimation based on the request.
"""
estimates = sorted(((t.estimate_type(request), n)
for n, t in self.tokens.items()), reverse=True)
return estimates[0][1] if len(estimates) else None | python | def find_token_type(self, request):
"""Token type identification.
RFC 6749 does not provide a method for easily differentiating between
different token types during protected resource access. We estimate
the most likely token type (if any) by asking each known token type
to give an estimation based on the request.
"""
estimates = sorted(((t.estimate_type(request), n)
for n, t in self.tokens.items()), reverse=True)
return estimates[0][1] if len(estimates) else None | [
"def",
"find_token_type",
"(",
"self",
",",
"request",
")",
":",
"estimates",
"=",
"sorted",
"(",
"(",
"(",
"t",
".",
"estimate_type",
"(",
"request",
")",
",",
"n",
")",
"for",
"n",
",",
"t",
"in",
"self",
".",
"tokens",
".",
"items",
"(",
")",
... | Token type identification.
RFC 6749 does not provide a method for easily differentiating between
different token types during protected resource access. We estimate
the most likely token type (if any) by asking each known token type
to give an estimation based on the request. | [
"Token",
"type",
"identification",
"."
] | 30321dd3c0ca784d3508a1970cf90d9f76835c79 | https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth2/rfc6749/endpoints/resource.py#L77-L87 | train | 224,708 |
beelit94/python-terraform | python_terraform/tfstate.py | Tfstate.load_file | def load_file(file_path):
"""
Read the tfstate file and load its contents, parses then as JSON and put the result into the object
"""
log.debug('read data from {0}'.format(file_path))
if os.path.exists(file_path):
with open(file_path) as f:
json_data = json.load(f)
tf_state = Tfstate(json_data)
tf_state.tfstate_file = file_path
return tf_state
log.debug('{0} is not exist'.format(file_path))
return Tfstate() | python | def load_file(file_path):
"""
Read the tfstate file and load its contents, parses then as JSON and put the result into the object
"""
log.debug('read data from {0}'.format(file_path))
if os.path.exists(file_path):
with open(file_path) as f:
json_data = json.load(f)
tf_state = Tfstate(json_data)
tf_state.tfstate_file = file_path
return tf_state
log.debug('{0} is not exist'.format(file_path))
return Tfstate() | [
"def",
"load_file",
"(",
"file_path",
")",
":",
"log",
".",
"debug",
"(",
"'read data from {0}'",
".",
"format",
"(",
"file_path",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"with",
"open",
"(",
"file_path",
")",
"as"... | Read the tfstate file and load its contents, parses then as JSON and put the result into the object | [
"Read",
"the",
"tfstate",
"file",
"and",
"load",
"its",
"contents",
"parses",
"then",
"as",
"JSON",
"and",
"put",
"the",
"result",
"into",
"the",
"object"
] | 99950cb03c37abadb0d7e136452e43f4f17dd4e1 | https://github.com/beelit94/python-terraform/blob/99950cb03c37abadb0d7e136452e43f4f17dd4e1/python_terraform/tfstate.py#L19-L34 | train | 224,709 |
beelit94/python-terraform | python_terraform/__init__.py | Terraform.generate_cmd_string | def generate_cmd_string(self, cmd, *args, **kwargs):
"""
for any generate_cmd_string doesn't written as public method of terraform
examples:
1. call import command,
ref to https://www.terraform.io/docs/commands/import.html
--> generate_cmd_string call:
terraform import -input=true aws_instance.foo i-abcd1234
--> python call:
tf.generate_cmd_string('import', 'aws_instance.foo', 'i-abcd1234', input=True)
2. call apply command,
--> generate_cmd_string call:
terraform apply -var='a=b' -var='c=d' -no-color the_folder
--> python call:
tf.generate_cmd_string('apply', the_folder, no_color=IsFlagged, var={'a':'b', 'c':'d'})
:param cmd: command and sub-command of terraform, seperated with space
refer to https://www.terraform.io/docs/commands/index.html
:param args: arguments of a command
:param kwargs: same as kwags in method 'cmd'
:return: string of valid terraform command
"""
cmds = cmd.split()
cmds = [self.terraform_bin_path] + cmds
for option, value in kwargs.items():
if '_' in option:
option = option.replace('_', '-')
if type(value) is list:
for sub_v in value:
cmds += ['-{k}={v}'.format(k=option, v=sub_v)]
continue
if type(value) is dict:
if 'backend-config' in option:
for bk, bv in value.items():
cmds += ['-backend-config={k}={v}'.format(k=bk, v=bv)]
continue
# since map type sent in string won't work, create temp var file for
# variables, and clean it up later
else:
filename = self.temp_var_files.create(value)
cmds += ['-var-file={0}'.format(filename)]
continue
# simple flag,
if value is IsFlagged:
cmds += ['-{k}'.format(k=option)]
continue
if value is None or value is IsNotFlagged:
continue
if type(value) is bool:
value = 'true' if value else 'false'
cmds += ['-{k}={v}'.format(k=option, v=value)]
cmds += args
return cmds | python | def generate_cmd_string(self, cmd, *args, **kwargs):
"""
for any generate_cmd_string doesn't written as public method of terraform
examples:
1. call import command,
ref to https://www.terraform.io/docs/commands/import.html
--> generate_cmd_string call:
terraform import -input=true aws_instance.foo i-abcd1234
--> python call:
tf.generate_cmd_string('import', 'aws_instance.foo', 'i-abcd1234', input=True)
2. call apply command,
--> generate_cmd_string call:
terraform apply -var='a=b' -var='c=d' -no-color the_folder
--> python call:
tf.generate_cmd_string('apply', the_folder, no_color=IsFlagged, var={'a':'b', 'c':'d'})
:param cmd: command and sub-command of terraform, seperated with space
refer to https://www.terraform.io/docs/commands/index.html
:param args: arguments of a command
:param kwargs: same as kwags in method 'cmd'
:return: string of valid terraform command
"""
cmds = cmd.split()
cmds = [self.terraform_bin_path] + cmds
for option, value in kwargs.items():
if '_' in option:
option = option.replace('_', '-')
if type(value) is list:
for sub_v in value:
cmds += ['-{k}={v}'.format(k=option, v=sub_v)]
continue
if type(value) is dict:
if 'backend-config' in option:
for bk, bv in value.items():
cmds += ['-backend-config={k}={v}'.format(k=bk, v=bv)]
continue
# since map type sent in string won't work, create temp var file for
# variables, and clean it up later
else:
filename = self.temp_var_files.create(value)
cmds += ['-var-file={0}'.format(filename)]
continue
# simple flag,
if value is IsFlagged:
cmds += ['-{k}'.format(k=option)]
continue
if value is None or value is IsNotFlagged:
continue
if type(value) is bool:
value = 'true' if value else 'false'
cmds += ['-{k}={v}'.format(k=option, v=value)]
cmds += args
return cmds | [
"def",
"generate_cmd_string",
"(",
"self",
",",
"cmd",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cmds",
"=",
"cmd",
".",
"split",
"(",
")",
"cmds",
"=",
"[",
"self",
".",
"terraform_bin_path",
"]",
"+",
"cmds",
"for",
"option",
",",
"va... | for any generate_cmd_string doesn't written as public method of terraform
examples:
1. call import command,
ref to https://www.terraform.io/docs/commands/import.html
--> generate_cmd_string call:
terraform import -input=true aws_instance.foo i-abcd1234
--> python call:
tf.generate_cmd_string('import', 'aws_instance.foo', 'i-abcd1234', input=True)
2. call apply command,
--> generate_cmd_string call:
terraform apply -var='a=b' -var='c=d' -no-color the_folder
--> python call:
tf.generate_cmd_string('apply', the_folder, no_color=IsFlagged, var={'a':'b', 'c':'d'})
:param cmd: command and sub-command of terraform, seperated with space
refer to https://www.terraform.io/docs/commands/index.html
:param args: arguments of a command
:param kwargs: same as kwags in method 'cmd'
:return: string of valid terraform command | [
"for",
"any",
"generate_cmd_string",
"doesn",
"t",
"written",
"as",
"public",
"method",
"of",
"terraform"
] | 99950cb03c37abadb0d7e136452e43f4f17dd4e1 | https://github.com/beelit94/python-terraform/blob/99950cb03c37abadb0d7e136452e43f4f17dd4e1/python_terraform/__init__.py#L181-L244 | train | 224,710 |
onelogin/python-saml | src/onelogin/saml2/response.py | OneLogin_Saml2_Response.get_nameid_data | def get_nameid_data(self):
"""
Gets the NameID Data provided by the SAML Response from the IdP
:returns: Name ID Data (Value, Format, NameQualifier, SPNameQualifier)
:rtype: dict
"""
nameid = None
nameid_data = {}
encrypted_id_data_nodes = self.__query_assertion('/saml:Subject/saml:EncryptedID/xenc:EncryptedData')
if encrypted_id_data_nodes:
encrypted_data = encrypted_id_data_nodes[0]
key = self.__settings.get_sp_key()
nameid = OneLogin_Saml2_Utils.decrypt_element(encrypted_data, key)
else:
nameid_nodes = self.__query_assertion('/saml:Subject/saml:NameID')
if nameid_nodes:
nameid = nameid_nodes[0]
is_strict = self.__settings.is_strict()
want_nameid = self.__settings.get_security_data().get('wantNameId', True)
if nameid is None:
if is_strict and want_nameid:
raise OneLogin_Saml2_ValidationError(
'NameID not found in the assertion of the Response',
OneLogin_Saml2_ValidationError.NO_NAMEID
)
else:
if is_strict and want_nameid and not OneLogin_Saml2_Utils.element_text(nameid):
raise OneLogin_Saml2_ValidationError(
'An empty NameID value found',
OneLogin_Saml2_ValidationError.EMPTY_NAMEID
)
nameid_data = {'Value': OneLogin_Saml2_Utils.element_text(nameid)}
for attr in ['Format', 'SPNameQualifier', 'NameQualifier']:
value = nameid.get(attr, None)
if value:
if is_strict and attr == 'SPNameQualifier':
sp_data = self.__settings.get_sp_data()
sp_entity_id = sp_data.get('entityId', '')
if sp_entity_id != value:
raise OneLogin_Saml2_ValidationError(
'The SPNameQualifier value mistmatch the SP entityID value.',
OneLogin_Saml2_ValidationError.SP_NAME_QUALIFIER_NAME_MISMATCH
)
nameid_data[attr] = value
return nameid_data | python | def get_nameid_data(self):
"""
Gets the NameID Data provided by the SAML Response from the IdP
:returns: Name ID Data (Value, Format, NameQualifier, SPNameQualifier)
:rtype: dict
"""
nameid = None
nameid_data = {}
encrypted_id_data_nodes = self.__query_assertion('/saml:Subject/saml:EncryptedID/xenc:EncryptedData')
if encrypted_id_data_nodes:
encrypted_data = encrypted_id_data_nodes[0]
key = self.__settings.get_sp_key()
nameid = OneLogin_Saml2_Utils.decrypt_element(encrypted_data, key)
else:
nameid_nodes = self.__query_assertion('/saml:Subject/saml:NameID')
if nameid_nodes:
nameid = nameid_nodes[0]
is_strict = self.__settings.is_strict()
want_nameid = self.__settings.get_security_data().get('wantNameId', True)
if nameid is None:
if is_strict and want_nameid:
raise OneLogin_Saml2_ValidationError(
'NameID not found in the assertion of the Response',
OneLogin_Saml2_ValidationError.NO_NAMEID
)
else:
if is_strict and want_nameid and not OneLogin_Saml2_Utils.element_text(nameid):
raise OneLogin_Saml2_ValidationError(
'An empty NameID value found',
OneLogin_Saml2_ValidationError.EMPTY_NAMEID
)
nameid_data = {'Value': OneLogin_Saml2_Utils.element_text(nameid)}
for attr in ['Format', 'SPNameQualifier', 'NameQualifier']:
value = nameid.get(attr, None)
if value:
if is_strict and attr == 'SPNameQualifier':
sp_data = self.__settings.get_sp_data()
sp_entity_id = sp_data.get('entityId', '')
if sp_entity_id != value:
raise OneLogin_Saml2_ValidationError(
'The SPNameQualifier value mistmatch the SP entityID value.',
OneLogin_Saml2_ValidationError.SP_NAME_QUALIFIER_NAME_MISMATCH
)
nameid_data[attr] = value
return nameid_data | [
"def",
"get_nameid_data",
"(",
"self",
")",
":",
"nameid",
"=",
"None",
"nameid_data",
"=",
"{",
"}",
"encrypted_id_data_nodes",
"=",
"self",
".",
"__query_assertion",
"(",
"'/saml:Subject/saml:EncryptedID/xenc:EncryptedData'",
")",
"if",
"encrypted_id_data_nodes",
":",... | Gets the NameID Data provided by the SAML Response from the IdP
:returns: Name ID Data (Value, Format, NameQualifier, SPNameQualifier)
:rtype: dict | [
"Gets",
"the",
"NameID",
"Data",
"provided",
"by",
"the",
"SAML",
"Response",
"from",
"the",
"IdP"
] | 9fe7a72da5b4caa1529c1640b52d2649447ce49b | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/response.py#L438-L487 | train | 224,711 |
onelogin/python-saml | src/onelogin/saml2/response.py | OneLogin_Saml2_Response.validate_signed_elements | def validate_signed_elements(self, signed_elements):
"""
Verifies that the document has the expected signed nodes.
:param signed_elements: The signed elements to be checked
:type signed_elements: list
:param raise_exceptions: Whether to return false on failure or raise an exception
:type raise_exceptions: Boolean
"""
if len(signed_elements) > 2:
return False
response_tag = '{%s}Response' % OneLogin_Saml2_Constants.NS_SAMLP
assertion_tag = '{%s}Assertion' % OneLogin_Saml2_Constants.NS_SAML
if (response_tag in signed_elements and signed_elements.count(response_tag) > 1) or \
(assertion_tag in signed_elements and signed_elements.count(assertion_tag) > 1) or \
(response_tag not in signed_elements and assertion_tag not in signed_elements):
return False
# Check that the signed elements found here, are the ones that will be verified
# by OneLogin_Saml2_Utils.validate_sign
if response_tag in signed_elements:
expected_signature_nodes = OneLogin_Saml2_Utils.query(self.document, OneLogin_Saml2_Utils.RESPONSE_SIGNATURE_XPATH)
if len(expected_signature_nodes) != 1:
raise OneLogin_Saml2_ValidationError(
'Unexpected number of Response signatures found. SAML Response rejected.',
OneLogin_Saml2_ValidationError.WRONG_NUMBER_OF_SIGNATURES_IN_RESPONSE
)
if assertion_tag in signed_elements:
expected_signature_nodes = self.__query(OneLogin_Saml2_Utils.ASSERTION_SIGNATURE_XPATH)
if len(expected_signature_nodes) != 1:
raise OneLogin_Saml2_ValidationError(
'Unexpected number of Assertion signatures found. SAML Response rejected.',
OneLogin_Saml2_ValidationError.WRONG_NUMBER_OF_SIGNATURES_IN_ASSERTION
)
return True | python | def validate_signed_elements(self, signed_elements):
"""
Verifies that the document has the expected signed nodes.
:param signed_elements: The signed elements to be checked
:type signed_elements: list
:param raise_exceptions: Whether to return false on failure or raise an exception
:type raise_exceptions: Boolean
"""
if len(signed_elements) > 2:
return False
response_tag = '{%s}Response' % OneLogin_Saml2_Constants.NS_SAMLP
assertion_tag = '{%s}Assertion' % OneLogin_Saml2_Constants.NS_SAML
if (response_tag in signed_elements and signed_elements.count(response_tag) > 1) or \
(assertion_tag in signed_elements and signed_elements.count(assertion_tag) > 1) or \
(response_tag not in signed_elements and assertion_tag not in signed_elements):
return False
# Check that the signed elements found here, are the ones that will be verified
# by OneLogin_Saml2_Utils.validate_sign
if response_tag in signed_elements:
expected_signature_nodes = OneLogin_Saml2_Utils.query(self.document, OneLogin_Saml2_Utils.RESPONSE_SIGNATURE_XPATH)
if len(expected_signature_nodes) != 1:
raise OneLogin_Saml2_ValidationError(
'Unexpected number of Response signatures found. SAML Response rejected.',
OneLogin_Saml2_ValidationError.WRONG_NUMBER_OF_SIGNATURES_IN_RESPONSE
)
if assertion_tag in signed_elements:
expected_signature_nodes = self.__query(OneLogin_Saml2_Utils.ASSERTION_SIGNATURE_XPATH)
if len(expected_signature_nodes) != 1:
raise OneLogin_Saml2_ValidationError(
'Unexpected number of Assertion signatures found. SAML Response rejected.',
OneLogin_Saml2_ValidationError.WRONG_NUMBER_OF_SIGNATURES_IN_ASSERTION
)
return True | [
"def",
"validate_signed_elements",
"(",
"self",
",",
"signed_elements",
")",
":",
"if",
"len",
"(",
"signed_elements",
")",
">",
"2",
":",
"return",
"False",
"response_tag",
"=",
"'{%s}Response'",
"%",
"OneLogin_Saml2_Constants",
".",
"NS_SAMLP",
"assertion_tag",
... | Verifies that the document has the expected signed nodes.
:param signed_elements: The signed elements to be checked
:type signed_elements: list
:param raise_exceptions: Whether to return false on failure or raise an exception
:type raise_exceptions: Boolean | [
"Verifies",
"that",
"the",
"document",
"has",
"the",
"expected",
"signed",
"nodes",
"."
] | 9fe7a72da5b4caa1529c1640b52d2649447ce49b | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/response.py#L677-L716 | train | 224,712 |
onelogin/python-saml | src/onelogin/saml2/response.py | OneLogin_Saml2_Response.__decrypt_assertion | def __decrypt_assertion(self, dom):
"""
Decrypts the Assertion
:raises: Exception if no private key available
:param dom: Encrypted Assertion
:type dom: Element
:returns: Decrypted Assertion
:rtype: Element
"""
key = self.__settings.get_sp_key()
debug = self.__settings.is_debug_active()
if not key:
raise OneLogin_Saml2_Error(
'No private key available to decrypt the assertion, check settings',
OneLogin_Saml2_Error.PRIVATE_KEY_NOT_FOUND
)
encrypted_assertion_nodes = OneLogin_Saml2_Utils.query(dom, '/samlp:Response/saml:EncryptedAssertion')
if encrypted_assertion_nodes:
encrypted_data_nodes = OneLogin_Saml2_Utils.query(encrypted_assertion_nodes[0], '//saml:EncryptedAssertion/xenc:EncryptedData')
if encrypted_data_nodes:
keyinfo = OneLogin_Saml2_Utils.query(encrypted_assertion_nodes[0], '//saml:EncryptedAssertion/xenc:EncryptedData/ds:KeyInfo')
if not keyinfo:
raise OneLogin_Saml2_ValidationError(
'No KeyInfo present, invalid Assertion',
OneLogin_Saml2_ValidationError.KEYINFO_NOT_FOUND_IN_ENCRYPTED_DATA
)
keyinfo = keyinfo[0]
children = keyinfo.getchildren()
if not children:
raise OneLogin_Saml2_ValidationError(
'KeyInfo has no children nodes, invalid Assertion',
OneLogin_Saml2_ValidationError.CHILDREN_NODE_NOT_FOUND_IN_KEYINFO
)
for child in children:
if 'RetrievalMethod' in child.tag:
if child.attrib['Type'] != 'http://www.w3.org/2001/04/xmlenc#EncryptedKey':
raise OneLogin_Saml2_ValidationError(
'Unsupported Retrieval Method found',
OneLogin_Saml2_ValidationError.UNSUPPORTED_RETRIEVAL_METHOD
)
uri = child.attrib['URI']
if not uri.startswith('#'):
break
uri = uri.split('#')[1]
encrypted_key = OneLogin_Saml2_Utils.query(encrypted_assertion_nodes[0], './xenc:EncryptedKey[@Id=$tagid]', None, uri)
if encrypted_key:
keyinfo.append(encrypted_key[0])
encrypted_data = encrypted_data_nodes[0]
decrypted = OneLogin_Saml2_Utils.decrypt_element(encrypted_data, key, debug=debug, inplace=True)
dom.replace(encrypted_assertion_nodes[0], decrypted)
return dom | python | def __decrypt_assertion(self, dom):
"""
Decrypts the Assertion
:raises: Exception if no private key available
:param dom: Encrypted Assertion
:type dom: Element
:returns: Decrypted Assertion
:rtype: Element
"""
key = self.__settings.get_sp_key()
debug = self.__settings.is_debug_active()
if not key:
raise OneLogin_Saml2_Error(
'No private key available to decrypt the assertion, check settings',
OneLogin_Saml2_Error.PRIVATE_KEY_NOT_FOUND
)
encrypted_assertion_nodes = OneLogin_Saml2_Utils.query(dom, '/samlp:Response/saml:EncryptedAssertion')
if encrypted_assertion_nodes:
encrypted_data_nodes = OneLogin_Saml2_Utils.query(encrypted_assertion_nodes[0], '//saml:EncryptedAssertion/xenc:EncryptedData')
if encrypted_data_nodes:
keyinfo = OneLogin_Saml2_Utils.query(encrypted_assertion_nodes[0], '//saml:EncryptedAssertion/xenc:EncryptedData/ds:KeyInfo')
if not keyinfo:
raise OneLogin_Saml2_ValidationError(
'No KeyInfo present, invalid Assertion',
OneLogin_Saml2_ValidationError.KEYINFO_NOT_FOUND_IN_ENCRYPTED_DATA
)
keyinfo = keyinfo[0]
children = keyinfo.getchildren()
if not children:
raise OneLogin_Saml2_ValidationError(
'KeyInfo has no children nodes, invalid Assertion',
OneLogin_Saml2_ValidationError.CHILDREN_NODE_NOT_FOUND_IN_KEYINFO
)
for child in children:
if 'RetrievalMethod' in child.tag:
if child.attrib['Type'] != 'http://www.w3.org/2001/04/xmlenc#EncryptedKey':
raise OneLogin_Saml2_ValidationError(
'Unsupported Retrieval Method found',
OneLogin_Saml2_ValidationError.UNSUPPORTED_RETRIEVAL_METHOD
)
uri = child.attrib['URI']
if not uri.startswith('#'):
break
uri = uri.split('#')[1]
encrypted_key = OneLogin_Saml2_Utils.query(encrypted_assertion_nodes[0], './xenc:EncryptedKey[@Id=$tagid]', None, uri)
if encrypted_key:
keyinfo.append(encrypted_key[0])
encrypted_data = encrypted_data_nodes[0]
decrypted = OneLogin_Saml2_Utils.decrypt_element(encrypted_data, key, debug=debug, inplace=True)
dom.replace(encrypted_assertion_nodes[0], decrypted)
return dom | [
"def",
"__decrypt_assertion",
"(",
"self",
",",
"dom",
")",
":",
"key",
"=",
"self",
".",
"__settings",
".",
"get_sp_key",
"(",
")",
"debug",
"=",
"self",
".",
"__settings",
".",
"is_debug_active",
"(",
")",
"if",
"not",
"key",
":",
"raise",
"OneLogin_Sa... | Decrypts the Assertion
:raises: Exception if no private key available
:param dom: Encrypted Assertion
:type dom: Element
:returns: Decrypted Assertion
:rtype: Element | [
"Decrypts",
"the",
"Assertion"
] | 9fe7a72da5b4caa1529c1640b52d2649447ce49b | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/response.py#L799-L856 | train | 224,713 |
onelogin/python-saml | src/onelogin/saml2/idp_metadata_parser.py | OneLogin_Saml2_IdPMetadataParser.get_metadata | def get_metadata(url, validate_cert=True):
"""
Gets the metadata XML from the provided URL
:param url: Url where the XML of the Identity Provider Metadata is published.
:type url: string
:param validate_cert: If the url uses https schema, that flag enables or not the verification of the associated certificate.
:type validate_cert: bool
:returns: metadata XML
:rtype: string
"""
valid = False
if validate_cert:
response = urllib2.urlopen(url)
else:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
response = urllib2.urlopen(url, context=ctx)
xml = response.read()
if xml:
try:
dom = fromstring(xml, forbid_dtd=True)
idp_descriptor_nodes = OneLogin_Saml2_Utils.query(dom, '//md:IDPSSODescriptor')
if idp_descriptor_nodes:
valid = True
except Exception:
pass
if not valid:
raise Exception('Not valid IdP XML found from URL: %s' % (url))
return xml | python | def get_metadata(url, validate_cert=True):
"""
Gets the metadata XML from the provided URL
:param url: Url where the XML of the Identity Provider Metadata is published.
:type url: string
:param validate_cert: If the url uses https schema, that flag enables or not the verification of the associated certificate.
:type validate_cert: bool
:returns: metadata XML
:rtype: string
"""
valid = False
if validate_cert:
response = urllib2.urlopen(url)
else:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
response = urllib2.urlopen(url, context=ctx)
xml = response.read()
if xml:
try:
dom = fromstring(xml, forbid_dtd=True)
idp_descriptor_nodes = OneLogin_Saml2_Utils.query(dom, '//md:IDPSSODescriptor')
if idp_descriptor_nodes:
valid = True
except Exception:
pass
if not valid:
raise Exception('Not valid IdP XML found from URL: %s' % (url))
return xml | [
"def",
"get_metadata",
"(",
"url",
",",
"validate_cert",
"=",
"True",
")",
":",
"valid",
"=",
"False",
"if",
"validate_cert",
":",
"response",
"=",
"urllib2",
".",
"urlopen",
"(",
"url",
")",
"else",
":",
"ctx",
"=",
"ssl",
".",
"create_default_context",
... | Gets the metadata XML from the provided URL
:param url: Url where the XML of the Identity Provider Metadata is published.
:type url: string
:param validate_cert: If the url uses https schema, that flag enables or not the verification of the associated certificate.
:type validate_cert: bool
:returns: metadata XML
:rtype: string | [
"Gets",
"the",
"metadata",
"XML",
"from",
"the",
"provided",
"URL"
] | 9fe7a72da5b4caa1529c1640b52d2649447ce49b | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/idp_metadata_parser.py#L28-L63 | train | 224,714 |
onelogin/python-saml | src/onelogin/saml2/utils.py | print_xmlsec_errors | def print_xmlsec_errors(filename, line, func, error_object, error_subject, reason, msg):
"""
Auxiliary method. It overrides the default xmlsec debug message.
"""
info = []
if error_object != "unknown":
info.append("obj=" + error_object)
if error_subject != "unknown":
info.append("subject=" + error_subject)
if msg.strip():
info.append("msg=" + msg)
if reason != 1:
info.append("errno=%d" % reason)
if info:
print("%s:%d(%s)" % (filename, line, func), " ".join(info)) | python | def print_xmlsec_errors(filename, line, func, error_object, error_subject, reason, msg):
"""
Auxiliary method. It overrides the default xmlsec debug message.
"""
info = []
if error_object != "unknown":
info.append("obj=" + error_object)
if error_subject != "unknown":
info.append("subject=" + error_subject)
if msg.strip():
info.append("msg=" + msg)
if reason != 1:
info.append("errno=%d" % reason)
if info:
print("%s:%d(%s)" % (filename, line, func), " ".join(info)) | [
"def",
"print_xmlsec_errors",
"(",
"filename",
",",
"line",
",",
"func",
",",
"error_object",
",",
"error_subject",
",",
"reason",
",",
"msg",
")",
":",
"info",
"=",
"[",
"]",
"if",
"error_object",
"!=",
"\"unknown\"",
":",
"info",
".",
"append",
"(",
"\... | Auxiliary method. It overrides the default xmlsec debug message. | [
"Auxiliary",
"method",
".",
"It",
"overrides",
"the",
"default",
"xmlsec",
"debug",
"message",
"."
] | 9fe7a72da5b4caa1529c1640b52d2649447ce49b | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/utils.py#L63-L78 | train | 224,715 |
onelogin/python-saml | src/onelogin/saml2/utils.py | OneLogin_Saml2_Utils.get_self_host | def get_self_host(request_data):
"""
Returns the current host.
:param request_data: The request as a dict
:type: dict
:return: The current host
:rtype: string
"""
if 'http_host' in request_data:
current_host = request_data['http_host']
elif 'server_name' in request_data:
current_host = request_data['server_name']
else:
raise Exception('No hostname defined')
if ':' in current_host:
current_host_data = current_host.split(':')
possible_port = current_host_data[-1]
try:
possible_port = float(possible_port)
current_host = current_host_data[0]
except ValueError:
current_host = ':'.join(current_host_data)
return current_host | python | def get_self_host(request_data):
"""
Returns the current host.
:param request_data: The request as a dict
:type: dict
:return: The current host
:rtype: string
"""
if 'http_host' in request_data:
current_host = request_data['http_host']
elif 'server_name' in request_data:
current_host = request_data['server_name']
else:
raise Exception('No hostname defined')
if ':' in current_host:
current_host_data = current_host.split(':')
possible_port = current_host_data[-1]
try:
possible_port = float(possible_port)
current_host = current_host_data[0]
except ValueError:
current_host = ':'.join(current_host_data)
return current_host | [
"def",
"get_self_host",
"(",
"request_data",
")",
":",
"if",
"'http_host'",
"in",
"request_data",
":",
"current_host",
"=",
"request_data",
"[",
"'http_host'",
"]",
"elif",
"'server_name'",
"in",
"request_data",
":",
"current_host",
"=",
"request_data",
"[",
"'ser... | Returns the current host.
:param request_data: The request as a dict
:type: dict
:return: The current host
:rtype: string | [
"Returns",
"the",
"current",
"host",
"."
] | 9fe7a72da5b4caa1529c1640b52d2649447ce49b | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/utils.py#L315-L341 | train | 224,716 |
onelogin/python-saml | src/onelogin/saml2/utils.py | OneLogin_Saml2_Utils.parse_duration | def parse_duration(duration, timestamp=None):
"""
Interprets a ISO8601 duration value relative to a given timestamp.
:param duration: The duration, as a string.
:type: string
:param timestamp: The unix timestamp we should apply the duration to.
Optional, default to the current time.
:type: string
:return: The new timestamp, after the duration is applied.
:rtype: int
"""
assert isinstance(duration, basestring)
assert timestamp is None or isinstance(timestamp, int)
timedelta = duration_parser(duration)
if timestamp is None:
data = datetime.utcnow() + timedelta
else:
data = datetime.utcfromtimestamp(timestamp) + timedelta
return calendar.timegm(data.utctimetuple()) | python | def parse_duration(duration, timestamp=None):
"""
Interprets a ISO8601 duration value relative to a given timestamp.
:param duration: The duration, as a string.
:type: string
:param timestamp: The unix timestamp we should apply the duration to.
Optional, default to the current time.
:type: string
:return: The new timestamp, after the duration is applied.
:rtype: int
"""
assert isinstance(duration, basestring)
assert timestamp is None or isinstance(timestamp, int)
timedelta = duration_parser(duration)
if timestamp is None:
data = datetime.utcnow() + timedelta
else:
data = datetime.utcfromtimestamp(timestamp) + timedelta
return calendar.timegm(data.utctimetuple()) | [
"def",
"parse_duration",
"(",
"duration",
",",
"timestamp",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"duration",
",",
"basestring",
")",
"assert",
"timestamp",
"is",
"None",
"or",
"isinstance",
"(",
"timestamp",
",",
"int",
")",
"timedelta",
"=",
... | Interprets a ISO8601 duration value relative to a given timestamp.
:param duration: The duration, as a string.
:type: string
:param timestamp: The unix timestamp we should apply the duration to.
Optional, default to the current time.
:type: string
:return: The new timestamp, after the duration is applied.
:rtype: int | [
"Interprets",
"a",
"ISO8601",
"duration",
"value",
"relative",
"to",
"a",
"given",
"timestamp",
"."
] | 9fe7a72da5b4caa1529c1640b52d2649447ce49b | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/utils.py#L477-L499 | train | 224,717 |
onelogin/python-saml | src/onelogin/saml2/utils.py | OneLogin_Saml2_Utils.get_status | def get_status(dom):
"""
Gets Status from a Response.
:param dom: The Response as XML
:type: Document
:returns: The Status, an array with the code and a message.
:rtype: dict
"""
status = {}
status_entry = OneLogin_Saml2_Utils.query(dom, '/samlp:Response/samlp:Status')
if len(status_entry) != 1:
raise OneLogin_Saml2_ValidationError(
'Missing Status on response',
OneLogin_Saml2_ValidationError.MISSING_STATUS
)
code_entry = OneLogin_Saml2_Utils.query(dom, '/samlp:Response/samlp:Status/samlp:StatusCode', status_entry[0])
if len(code_entry) != 1:
raise OneLogin_Saml2_ValidationError(
'Missing Status Code on response',
OneLogin_Saml2_ValidationError.MISSING_STATUS_CODE
)
code = code_entry[0].values()[0]
status['code'] = code
status['msg'] = ''
message_entry = OneLogin_Saml2_Utils.query(dom, '/samlp:Response/samlp:Status/samlp:StatusMessage', status_entry[0])
if len(message_entry) == 0:
subcode_entry = OneLogin_Saml2_Utils.query(dom, '/samlp:Response/samlp:Status/samlp:StatusCode/samlp:StatusCode', status_entry[0])
if len(subcode_entry) == 1:
status['msg'] = subcode_entry[0].values()[0]
elif len(message_entry) == 1:
status['msg'] = OneLogin_Saml2_Utils.element_text(message_entry[0])
return status | python | def get_status(dom):
"""
Gets Status from a Response.
:param dom: The Response as XML
:type: Document
:returns: The Status, an array with the code and a message.
:rtype: dict
"""
status = {}
status_entry = OneLogin_Saml2_Utils.query(dom, '/samlp:Response/samlp:Status')
if len(status_entry) != 1:
raise OneLogin_Saml2_ValidationError(
'Missing Status on response',
OneLogin_Saml2_ValidationError.MISSING_STATUS
)
code_entry = OneLogin_Saml2_Utils.query(dom, '/samlp:Response/samlp:Status/samlp:StatusCode', status_entry[0])
if len(code_entry) != 1:
raise OneLogin_Saml2_ValidationError(
'Missing Status Code on response',
OneLogin_Saml2_ValidationError.MISSING_STATUS_CODE
)
code = code_entry[0].values()[0]
status['code'] = code
status['msg'] = ''
message_entry = OneLogin_Saml2_Utils.query(dom, '/samlp:Response/samlp:Status/samlp:StatusMessage', status_entry[0])
if len(message_entry) == 0:
subcode_entry = OneLogin_Saml2_Utils.query(dom, '/samlp:Response/samlp:Status/samlp:StatusCode/samlp:StatusCode', status_entry[0])
if len(subcode_entry) == 1:
status['msg'] = subcode_entry[0].values()[0]
elif len(message_entry) == 1:
status['msg'] = OneLogin_Saml2_Utils.element_text(message_entry[0])
return status | [
"def",
"get_status",
"(",
"dom",
")",
":",
"status",
"=",
"{",
"}",
"status_entry",
"=",
"OneLogin_Saml2_Utils",
".",
"query",
"(",
"dom",
",",
"'/samlp:Response/samlp:Status'",
")",
"if",
"len",
"(",
"status_entry",
")",
"!=",
"1",
":",
"raise",
"OneLogin_S... | Gets Status from a Response.
:param dom: The Response as XML
:type: Document
:returns: The Status, an array with the code and a message.
:rtype: dict | [
"Gets",
"Status",
"from",
"a",
"Response",
"."
] | 9fe7a72da5b4caa1529c1640b52d2649447ce49b | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/utils.py#L734-L771 | train | 224,718 |
onelogin/python-saml | src/onelogin/saml2/utils.py | OneLogin_Saml2_Utils.write_temp_file | def write_temp_file(content):
"""
Writes some content into a temporary file and returns it.
:param content: The file content
:type: string
:returns: The temporary file
:rtype: file-like object
"""
f_temp = NamedTemporaryFile(delete=True)
f_temp.file.write(content)
f_temp.file.flush()
return f_temp | python | def write_temp_file(content):
"""
Writes some content into a temporary file and returns it.
:param content: The file content
:type: string
:returns: The temporary file
:rtype: file-like object
"""
f_temp = NamedTemporaryFile(delete=True)
f_temp.file.write(content)
f_temp.file.flush()
return f_temp | [
"def",
"write_temp_file",
"(",
"content",
")",
":",
"f_temp",
"=",
"NamedTemporaryFile",
"(",
"delete",
"=",
"True",
")",
"f_temp",
".",
"file",
".",
"write",
"(",
"content",
")",
"f_temp",
".",
"file",
".",
"flush",
"(",
")",
"return",
"f_temp"
] | Writes some content into a temporary file and returns it.
:param content: The file content
:type: string
:returns: The temporary file
:rtype: file-like object | [
"Writes",
"some",
"content",
"into",
"a",
"temporary",
"file",
"and",
"returns",
"it",
"."
] | 9fe7a72da5b4caa1529c1640b52d2649447ce49b | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/utils.py#L814-L827 | train | 224,719 |
onelogin/python-saml | src/onelogin/saml2/settings.py | OneLogin_Saml2_Settings.__add_default_values | def __add_default_values(self):
"""
Add default values if the settings info is not complete
"""
self.__sp.setdefault('assertionConsumerService', {})
self.__sp['assertionConsumerService'].setdefault('binding', OneLogin_Saml2_Constants.BINDING_HTTP_POST)
self.__sp.setdefault('attributeConsumingService', {})
self.__sp.setdefault('singleLogoutService', {})
self.__sp['singleLogoutService'].setdefault('binding', OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT)
# Related to nameID
self.__sp.setdefault('NameIDFormat', OneLogin_Saml2_Constants.NAMEID_UNSPECIFIED)
self.__security.setdefault('nameIdEncrypted', False)
# Metadata format
self.__security.setdefault('metadataValidUntil', None) # None means use default
self.__security.setdefault('metadataCacheDuration', None) # None means use default
# Sign provided
self.__security.setdefault('authnRequestsSigned', False)
self.__security.setdefault('logoutRequestSigned', False)
self.__security.setdefault('logoutResponseSigned', False)
self.__security.setdefault('signMetadata', False)
# Sign expected
self.__security.setdefault('wantMessagesSigned', False)
self.__security.setdefault('wantAssertionsSigned', False)
# NameID element expected
self.__security.setdefault('wantNameId', True)
# SAML responses with a InResponseTo attribute not rejected when requestId not passed
self.__security.setdefault('rejectUnsolicitedResponsesWithInResponseTo', False)
# Encrypt expected
self.__security.setdefault('wantAssertionsEncrypted', False)
self.__security.setdefault('wantNameIdEncrypted', False)
# Signature Algorithm
self.__security.setdefault('signatureAlgorithm', OneLogin_Saml2_Constants.RSA_SHA1)
# Digest Algorithm
self.__security.setdefault('digestAlgorithm', OneLogin_Saml2_Constants.SHA1)
# AttributeStatement required by default
self.__security.setdefault('wantAttributeStatement', True)
self.__idp.setdefault('x509cert', '')
self.__idp.setdefault('certFingerprint', '')
self.__idp.setdefault('certFingerprintAlgorithm', 'sha1')
self.__sp.setdefault('x509cert', '')
self.__sp.setdefault('privateKey', '')
self.__security.setdefault('requestedAuthnContext', True)
self.__security.setdefault('failOnAuthnContextMismatch', False) | python | def __add_default_values(self):
"""
Add default values if the settings info is not complete
"""
self.__sp.setdefault('assertionConsumerService', {})
self.__sp['assertionConsumerService'].setdefault('binding', OneLogin_Saml2_Constants.BINDING_HTTP_POST)
self.__sp.setdefault('attributeConsumingService', {})
self.__sp.setdefault('singleLogoutService', {})
self.__sp['singleLogoutService'].setdefault('binding', OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT)
# Related to nameID
self.__sp.setdefault('NameIDFormat', OneLogin_Saml2_Constants.NAMEID_UNSPECIFIED)
self.__security.setdefault('nameIdEncrypted', False)
# Metadata format
self.__security.setdefault('metadataValidUntil', None) # None means use default
self.__security.setdefault('metadataCacheDuration', None) # None means use default
# Sign provided
self.__security.setdefault('authnRequestsSigned', False)
self.__security.setdefault('logoutRequestSigned', False)
self.__security.setdefault('logoutResponseSigned', False)
self.__security.setdefault('signMetadata', False)
# Sign expected
self.__security.setdefault('wantMessagesSigned', False)
self.__security.setdefault('wantAssertionsSigned', False)
# NameID element expected
self.__security.setdefault('wantNameId', True)
# SAML responses with a InResponseTo attribute not rejected when requestId not passed
self.__security.setdefault('rejectUnsolicitedResponsesWithInResponseTo', False)
# Encrypt expected
self.__security.setdefault('wantAssertionsEncrypted', False)
self.__security.setdefault('wantNameIdEncrypted', False)
# Signature Algorithm
self.__security.setdefault('signatureAlgorithm', OneLogin_Saml2_Constants.RSA_SHA1)
# Digest Algorithm
self.__security.setdefault('digestAlgorithm', OneLogin_Saml2_Constants.SHA1)
# AttributeStatement required by default
self.__security.setdefault('wantAttributeStatement', True)
self.__idp.setdefault('x509cert', '')
self.__idp.setdefault('certFingerprint', '')
self.__idp.setdefault('certFingerprintAlgorithm', 'sha1')
self.__sp.setdefault('x509cert', '')
self.__sp.setdefault('privateKey', '')
self.__security.setdefault('requestedAuthnContext', True)
self.__security.setdefault('failOnAuthnContextMismatch', False) | [
"def",
"__add_default_values",
"(",
"self",
")",
":",
"self",
".",
"__sp",
".",
"setdefault",
"(",
"'assertionConsumerService'",
",",
"{",
"}",
")",
"self",
".",
"__sp",
"[",
"'assertionConsumerService'",
"]",
".",
"setdefault",
"(",
"'binding'",
",",
"OneLogi... | Add default values if the settings info is not complete | [
"Add",
"default",
"values",
"if",
"the",
"settings",
"info",
"is",
"not",
"complete"
] | 9fe7a72da5b4caa1529c1640b52d2649447ce49b | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/settings.py#L250-L307 | train | 224,720 |
onelogin/python-saml | src/onelogin/saml2/auth.py | OneLogin_Saml2_Auth.login | def login(self, return_to=None, force_authn=False, is_passive=False, set_nameid_policy=True, name_id_value_req=None):
"""
Initiates the SSO process.
:param return_to: Optional argument. The target URL the user should be redirected to after login.
:type return_to: string
:param force_authn: Optional argument. When true the AuthNRequest will set the ForceAuthn='true'.
:type force_authn: bool
:param is_passive: Optional argument. When true the AuthNRequest will set the Ispassive='true'.
:type is_passive: bool
:param set_nameid_policy: Optional argument. When true the AuthNRequest will set a nameIdPolicy element.
:type set_nameid_policy: bool
:param name_id_value_req: Optional argument. Indicates to the IdP the subject that should be authenticated
:type name_id_value_req: string
:returns: Redirection URL
:rtype: string
"""
authn_request = OneLogin_Saml2_Authn_Request(self.__settings, force_authn, is_passive, set_nameid_policy, name_id_value_req)
self.__last_request = authn_request.get_xml()
self.__last_request_id = authn_request.get_id()
saml_request = authn_request.get_request()
parameters = {'SAMLRequest': saml_request}
if return_to is not None:
parameters['RelayState'] = return_to
else:
parameters['RelayState'] = OneLogin_Saml2_Utils.get_self_url_no_query(self.__request_data)
security = self.__settings.get_security_data()
if security.get('authnRequestsSigned', False):
parameters['SigAlg'] = security['signatureAlgorithm']
parameters['Signature'] = self.build_request_signature(saml_request, parameters['RelayState'], security['signatureAlgorithm'])
return self.redirect_to(self.get_sso_url(), parameters) | python | def login(self, return_to=None, force_authn=False, is_passive=False, set_nameid_policy=True, name_id_value_req=None):
"""
Initiates the SSO process.
:param return_to: Optional argument. The target URL the user should be redirected to after login.
:type return_to: string
:param force_authn: Optional argument. When true the AuthNRequest will set the ForceAuthn='true'.
:type force_authn: bool
:param is_passive: Optional argument. When true the AuthNRequest will set the Ispassive='true'.
:type is_passive: bool
:param set_nameid_policy: Optional argument. When true the AuthNRequest will set a nameIdPolicy element.
:type set_nameid_policy: bool
:param name_id_value_req: Optional argument. Indicates to the IdP the subject that should be authenticated
:type name_id_value_req: string
:returns: Redirection URL
:rtype: string
"""
authn_request = OneLogin_Saml2_Authn_Request(self.__settings, force_authn, is_passive, set_nameid_policy, name_id_value_req)
self.__last_request = authn_request.get_xml()
self.__last_request_id = authn_request.get_id()
saml_request = authn_request.get_request()
parameters = {'SAMLRequest': saml_request}
if return_to is not None:
parameters['RelayState'] = return_to
else:
parameters['RelayState'] = OneLogin_Saml2_Utils.get_self_url_no_query(self.__request_data)
security = self.__settings.get_security_data()
if security.get('authnRequestsSigned', False):
parameters['SigAlg'] = security['signatureAlgorithm']
parameters['Signature'] = self.build_request_signature(saml_request, parameters['RelayState'], security['signatureAlgorithm'])
return self.redirect_to(self.get_sso_url(), parameters) | [
"def",
"login",
"(",
"self",
",",
"return_to",
"=",
"None",
",",
"force_authn",
"=",
"False",
",",
"is_passive",
"=",
"False",
",",
"set_nameid_policy",
"=",
"True",
",",
"name_id_value_req",
"=",
"None",
")",
":",
"authn_request",
"=",
"OneLogin_Saml2_Authn_R... | Initiates the SSO process.
:param return_to: Optional argument. The target URL the user should be redirected to after login.
:type return_to: string
:param force_authn: Optional argument. When true the AuthNRequest will set the ForceAuthn='true'.
:type force_authn: bool
:param is_passive: Optional argument. When true the AuthNRequest will set the Ispassive='true'.
:type is_passive: bool
:param set_nameid_policy: Optional argument. When true the AuthNRequest will set a nameIdPolicy element.
:type set_nameid_policy: bool
:param name_id_value_req: Optional argument. Indicates to the IdP the subject that should be authenticated
:type name_id_value_req: string
:returns: Redirection URL
:rtype: string | [
"Initiates",
"the",
"SSO",
"process",
"."
] | 9fe7a72da5b4caa1529c1640b52d2649447ce49b | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/auth.py#L326-L363 | train | 224,721 |
onelogin/python-saml | src/onelogin/saml2/auth.py | OneLogin_Saml2_Auth.logout | def logout(self, return_to=None, name_id=None, session_index=None, nq=None, name_id_format=None):
"""
Initiates the SLO process.
:param return_to: Optional argument. The target URL the user should be redirected to after logout.
:type return_to: string
:param name_id: The NameID that will be set in the LogoutRequest.
:type name_id: string
:param session_index: SessionIndex that identifies the session of the user.
:type session_index: string
:param nq: IDP Name Qualifier
:type: string
:param name_id_format: The NameID Format that will be set in the LogoutRequest.
:type: string
:returns: Redirection url
"""
slo_url = self.get_slo_url()
if slo_url is None:
raise OneLogin_Saml2_Error(
'The IdP does not support Single Log Out',
OneLogin_Saml2_Error.SAML_SINGLE_LOGOUT_NOT_SUPPORTED
)
if name_id is None and self.__nameid is not None:
name_id = self.__nameid
if name_id_format is None and self.__nameid_format is not None:
name_id_format = self.__nameid_format
logout_request = OneLogin_Saml2_Logout_Request(
self.__settings,
name_id=name_id,
session_index=session_index,
nq=nq,
name_id_format=name_id_format
)
self.__last_request = logout_request.get_xml()
self.__last_request_id = logout_request.id
saml_request = logout_request.get_request()
parameters = {'SAMLRequest': logout_request.get_request()}
if return_to is not None:
parameters['RelayState'] = return_to
else:
parameters['RelayState'] = OneLogin_Saml2_Utils.get_self_url_no_query(self.__request_data)
security = self.__settings.get_security_data()
if security.get('logoutRequestSigned', False):
parameters['SigAlg'] = security['signatureAlgorithm']
parameters['Signature'] = self.build_request_signature(saml_request, parameters['RelayState'], security['signatureAlgorithm'])
return self.redirect_to(slo_url, parameters) | python | def logout(self, return_to=None, name_id=None, session_index=None, nq=None, name_id_format=None):
"""
Initiates the SLO process.
:param return_to: Optional argument. The target URL the user should be redirected to after logout.
:type return_to: string
:param name_id: The NameID that will be set in the LogoutRequest.
:type name_id: string
:param session_index: SessionIndex that identifies the session of the user.
:type session_index: string
:param nq: IDP Name Qualifier
:type: string
:param name_id_format: The NameID Format that will be set in the LogoutRequest.
:type: string
:returns: Redirection url
"""
slo_url = self.get_slo_url()
if slo_url is None:
raise OneLogin_Saml2_Error(
'The IdP does not support Single Log Out',
OneLogin_Saml2_Error.SAML_SINGLE_LOGOUT_NOT_SUPPORTED
)
if name_id is None and self.__nameid is not None:
name_id = self.__nameid
if name_id_format is None and self.__nameid_format is not None:
name_id_format = self.__nameid_format
logout_request = OneLogin_Saml2_Logout_Request(
self.__settings,
name_id=name_id,
session_index=session_index,
nq=nq,
name_id_format=name_id_format
)
self.__last_request = logout_request.get_xml()
self.__last_request_id = logout_request.id
saml_request = logout_request.get_request()
parameters = {'SAMLRequest': logout_request.get_request()}
if return_to is not None:
parameters['RelayState'] = return_to
else:
parameters['RelayState'] = OneLogin_Saml2_Utils.get_self_url_no_query(self.__request_data)
security = self.__settings.get_security_data()
if security.get('logoutRequestSigned', False):
parameters['SigAlg'] = security['signatureAlgorithm']
parameters['Signature'] = self.build_request_signature(saml_request, parameters['RelayState'], security['signatureAlgorithm'])
return self.redirect_to(slo_url, parameters) | [
"def",
"logout",
"(",
"self",
",",
"return_to",
"=",
"None",
",",
"name_id",
"=",
"None",
",",
"session_index",
"=",
"None",
",",
"nq",
"=",
"None",
",",
"name_id_format",
"=",
"None",
")",
":",
"slo_url",
"=",
"self",
".",
"get_slo_url",
"(",
")",
"... | Initiates the SLO process.
:param return_to: Optional argument. The target URL the user should be redirected to after logout.
:type return_to: string
:param name_id: The NameID that will be set in the LogoutRequest.
:type name_id: string
:param session_index: SessionIndex that identifies the session of the user.
:type session_index: string
:param nq: IDP Name Qualifier
:type: string
:param name_id_format: The NameID Format that will be set in the LogoutRequest.
:type: string
:returns: Redirection url | [
"Initiates",
"the",
"SLO",
"process",
"."
] | 9fe7a72da5b4caa1529c1640b52d2649447ce49b | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/auth.py#L365-L419 | train | 224,722 |
onelogin/python-saml | src/onelogin/saml2/auth.py | OneLogin_Saml2_Auth.get_slo_url | def get_slo_url(self):
"""
Gets the SLO URL.
:returns: An URL, the SLO endpoint of the IdP
:rtype: string
"""
url = None
idp_data = self.__settings.get_idp_data()
if 'singleLogoutService' in idp_data.keys() and 'url' in idp_data['singleLogoutService']:
url = idp_data['singleLogoutService']['url']
return url | python | def get_slo_url(self):
"""
Gets the SLO URL.
:returns: An URL, the SLO endpoint of the IdP
:rtype: string
"""
url = None
idp_data = self.__settings.get_idp_data()
if 'singleLogoutService' in idp_data.keys() and 'url' in idp_data['singleLogoutService']:
url = idp_data['singleLogoutService']['url']
return url | [
"def",
"get_slo_url",
"(",
"self",
")",
":",
"url",
"=",
"None",
"idp_data",
"=",
"self",
".",
"__settings",
".",
"get_idp_data",
"(",
")",
"if",
"'singleLogoutService'",
"in",
"idp_data",
".",
"keys",
"(",
")",
"and",
"'url'",
"in",
"idp_data",
"[",
"'s... | Gets the SLO URL.
:returns: An URL, the SLO endpoint of the IdP
:rtype: string | [
"Gets",
"the",
"SLO",
"URL",
"."
] | 9fe7a72da5b4caa1529c1640b52d2649447ce49b | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/auth.py#L431-L442 | train | 224,723 |
sryza/spark-timeseries | python/sparkts/utils.py | add_pyspark_path | def add_pyspark_path():
"""Add PySpark to the library path based on the value of SPARK_HOME. """
try:
spark_home = os.environ['SPARK_HOME']
sys.path.append(os.path.join(spark_home, 'python'))
py4j_src_zip = glob(os.path.join(spark_home, 'python',
'lib', 'py4j-*-src.zip'))
if len(py4j_src_zip) == 0:
raise ValueError('py4j source archive not found in %s'
% os.path.join(spark_home, 'python', 'lib'))
else:
py4j_src_zip = sorted(py4j_src_zip)[::-1]
sys.path.append(py4j_src_zip[0])
except KeyError:
logging.error("""SPARK_HOME was not set. please set it. e.g.
SPARK_HOME='/home/...' ./bin/pyspark [program]""")
exit(-1)
except ValueError as e:
logging.error(str(e))
exit(-1) | python | def add_pyspark_path():
"""Add PySpark to the library path based on the value of SPARK_HOME. """
try:
spark_home = os.environ['SPARK_HOME']
sys.path.append(os.path.join(spark_home, 'python'))
py4j_src_zip = glob(os.path.join(spark_home, 'python',
'lib', 'py4j-*-src.zip'))
if len(py4j_src_zip) == 0:
raise ValueError('py4j source archive not found in %s'
% os.path.join(spark_home, 'python', 'lib'))
else:
py4j_src_zip = sorted(py4j_src_zip)[::-1]
sys.path.append(py4j_src_zip[0])
except KeyError:
logging.error("""SPARK_HOME was not set. please set it. e.g.
SPARK_HOME='/home/...' ./bin/pyspark [program]""")
exit(-1)
except ValueError as e:
logging.error(str(e))
exit(-1) | [
"def",
"add_pyspark_path",
"(",
")",
":",
"try",
":",
"spark_home",
"=",
"os",
".",
"environ",
"[",
"'SPARK_HOME'",
"]",
"sys",
".",
"path",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"spark_home",
",",
"'python'",
")",
")",
"py4j_src_zip... | Add PySpark to the library path based on the value of SPARK_HOME. | [
"Add",
"PySpark",
"to",
"the",
"library",
"path",
"based",
"on",
"the",
"value",
"of",
"SPARK_HOME",
"."
] | 280aa887dc08ab114411245268f230fdabb76eec | https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/utils.py#L9-L30 | train | 224,724 |
sryza/spark-timeseries | python/sparkts/utils.py | datetime_to_nanos | def datetime_to_nanos(dt):
"""
Accepts a string, Pandas Timestamp, or long, and returns nanos since the epoch.
"""
if isinstance(dt, pd.Timestamp):
return dt.value
elif isinstance(dt, str):
return pd.Timestamp(dt).value
elif isinstance(dt, long):
return dt
elif isinstance(dt, datetime):
return long(dt.strftime("%s%f")) * 1000
raise ValueError | python | def datetime_to_nanos(dt):
"""
Accepts a string, Pandas Timestamp, or long, and returns nanos since the epoch.
"""
if isinstance(dt, pd.Timestamp):
return dt.value
elif isinstance(dt, str):
return pd.Timestamp(dt).value
elif isinstance(dt, long):
return dt
elif isinstance(dt, datetime):
return long(dt.strftime("%s%f")) * 1000
raise ValueError | [
"def",
"datetime_to_nanos",
"(",
"dt",
")",
":",
"if",
"isinstance",
"(",
"dt",
",",
"pd",
".",
"Timestamp",
")",
":",
"return",
"dt",
".",
"value",
"elif",
"isinstance",
"(",
"dt",
",",
"str",
")",
":",
"return",
"pd",
".",
"Timestamp",
"(",
"dt",
... | Accepts a string, Pandas Timestamp, or long, and returns nanos since the epoch. | [
"Accepts",
"a",
"string",
"Pandas",
"Timestamp",
"or",
"long",
"and",
"returns",
"nanos",
"since",
"the",
"epoch",
"."
] | 280aa887dc08ab114411245268f230fdabb76eec | https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/utils.py#L37-L50 | train | 224,725 |
sryza/spark-timeseries | python/sparkts/datetimeindex.py | uniform | def uniform(start, end=None, periods=None, freq=None, sc=None):
"""
Instantiates a uniform DateTimeIndex.
Either end or periods must be specified.
Parameters
----------
start : string, long (nanos from epoch), or Pandas Timestamp
end : string, long (nanos from epoch), or Pandas Timestamp
periods : int
freq : a frequency object
sc : SparkContext
"""
dtmodule = sc._jvm.com.cloudera.sparkts.__getattr__('DateTimeIndex$').__getattr__('MODULE$')
if freq is None:
raise ValueError("Missing frequency")
elif end is None and periods == None:
raise ValueError("Need an end date or number of periods")
elif end is not None:
return DateTimeIndex(dtmodule.uniformFromInterval( \
datetime_to_nanos(start), datetime_to_nanos(end), freq._jfreq))
else:
return DateTimeIndex(dtmodule.uniform( \
datetime_to_nanos(start), periods, freq._jfreq)) | python | def uniform(start, end=None, periods=None, freq=None, sc=None):
"""
Instantiates a uniform DateTimeIndex.
Either end or periods must be specified.
Parameters
----------
start : string, long (nanos from epoch), or Pandas Timestamp
end : string, long (nanos from epoch), or Pandas Timestamp
periods : int
freq : a frequency object
sc : SparkContext
"""
dtmodule = sc._jvm.com.cloudera.sparkts.__getattr__('DateTimeIndex$').__getattr__('MODULE$')
if freq is None:
raise ValueError("Missing frequency")
elif end is None and periods == None:
raise ValueError("Need an end date or number of periods")
elif end is not None:
return DateTimeIndex(dtmodule.uniformFromInterval( \
datetime_to_nanos(start), datetime_to_nanos(end), freq._jfreq))
else:
return DateTimeIndex(dtmodule.uniform( \
datetime_to_nanos(start), periods, freq._jfreq)) | [
"def",
"uniform",
"(",
"start",
",",
"end",
"=",
"None",
",",
"periods",
"=",
"None",
",",
"freq",
"=",
"None",
",",
"sc",
"=",
"None",
")",
":",
"dtmodule",
"=",
"sc",
".",
"_jvm",
".",
"com",
".",
"cloudera",
".",
"sparkts",
".",
"__getattr__",
... | Instantiates a uniform DateTimeIndex.
Either end or periods must be specified.
Parameters
----------
start : string, long (nanos from epoch), or Pandas Timestamp
end : string, long (nanos from epoch), or Pandas Timestamp
periods : int
freq : a frequency object
sc : SparkContext | [
"Instantiates",
"a",
"uniform",
"DateTimeIndex",
"."
] | 280aa887dc08ab114411245268f230fdabb76eec | https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/datetimeindex.py#L129-L153 | train | 224,726 |
sryza/spark-timeseries | python/sparkts/datetimeindex.py | DateTimeIndex._zdt_to_nanos | def _zdt_to_nanos(self, zdt):
"""Extracts nanoseconds from a ZonedDateTime"""
instant = zdt.toInstant()
return instant.getNano() + instant.getEpochSecond() * 1000000000 | python | def _zdt_to_nanos(self, zdt):
"""Extracts nanoseconds from a ZonedDateTime"""
instant = zdt.toInstant()
return instant.getNano() + instant.getEpochSecond() * 1000000000 | [
"def",
"_zdt_to_nanos",
"(",
"self",
",",
"zdt",
")",
":",
"instant",
"=",
"zdt",
".",
"toInstant",
"(",
")",
"return",
"instant",
".",
"getNano",
"(",
")",
"+",
"instant",
".",
"getEpochSecond",
"(",
")",
"*",
"1000000000"
] | Extracts nanoseconds from a ZonedDateTime | [
"Extracts",
"nanoseconds",
"from",
"a",
"ZonedDateTime"
] | 280aa887dc08ab114411245268f230fdabb76eec | https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/datetimeindex.py#L24-L27 | train | 224,727 |
sryza/spark-timeseries | python/sparkts/datetimeindex.py | DateTimeIndex.datetime_at_loc | def datetime_at_loc(self, loc):
"""Returns the timestamp at the given integer location as a Pandas Timestamp."""
return pd.Timestamp(self._zdt_to_nanos(self._jdt_index.dateTimeAtLoc(loc))) | python | def datetime_at_loc(self, loc):
"""Returns the timestamp at the given integer location as a Pandas Timestamp."""
return pd.Timestamp(self._zdt_to_nanos(self._jdt_index.dateTimeAtLoc(loc))) | [
"def",
"datetime_at_loc",
"(",
"self",
",",
"loc",
")",
":",
"return",
"pd",
".",
"Timestamp",
"(",
"self",
".",
"_zdt_to_nanos",
"(",
"self",
".",
"_jdt_index",
".",
"dateTimeAtLoc",
"(",
"loc",
")",
")",
")"
] | Returns the timestamp at the given integer location as a Pandas Timestamp. | [
"Returns",
"the",
"timestamp",
"at",
"the",
"given",
"integer",
"location",
"as",
"a",
"Pandas",
"Timestamp",
"."
] | 280aa887dc08ab114411245268f230fdabb76eec | https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/datetimeindex.py#L37-L39 | train | 224,728 |
sryza/spark-timeseries | python/sparkts/datetimeindex.py | DateTimeIndex.islice | def islice(self, start, end):
"""
Returns a new DateTimeIndex, containing a subslice of the timestamps in this index,
as specified by the given integer start and end locations.
Parameters
----------
start : int
The location of the start of the range, inclusive.
end : int
The location of the end of the range, exclusive.
"""
jdt_index = self._jdt_index.islice(start, end)
return DateTimeIndex(jdt_index=jdt_index) | python | def islice(self, start, end):
"""
Returns a new DateTimeIndex, containing a subslice of the timestamps in this index,
as specified by the given integer start and end locations.
Parameters
----------
start : int
The location of the start of the range, inclusive.
end : int
The location of the end of the range, exclusive.
"""
jdt_index = self._jdt_index.islice(start, end)
return DateTimeIndex(jdt_index=jdt_index) | [
"def",
"islice",
"(",
"self",
",",
"start",
",",
"end",
")",
":",
"jdt_index",
"=",
"self",
".",
"_jdt_index",
".",
"islice",
"(",
"start",
",",
"end",
")",
"return",
"DateTimeIndex",
"(",
"jdt_index",
"=",
"jdt_index",
")"
] | Returns a new DateTimeIndex, containing a subslice of the timestamps in this index,
as specified by the given integer start and end locations.
Parameters
----------
start : int
The location of the start of the range, inclusive.
end : int
The location of the end of the range, exclusive. | [
"Returns",
"a",
"new",
"DateTimeIndex",
"containing",
"a",
"subslice",
"of",
"the",
"timestamps",
"in",
"this",
"index",
"as",
"specified",
"by",
"the",
"given",
"integer",
"start",
"and",
"end",
"locations",
"."
] | 280aa887dc08ab114411245268f230fdabb76eec | https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/datetimeindex.py#L51-L64 | train | 224,729 |
sryza/spark-timeseries | python/sparkts/models/AutoregressionX.py | fit_model | def fit_model(y, x, yMaxLag, xMaxLag, includesOriginalX=True, noIntercept=False, sc=None):
"""
Fit an autoregressive model with additional exogenous variables. The model predicts a value
at time t of a dependent variable, Y, as a function of previous values of Y, and a combination
of previous values of exogenous regressors X_i, and current values of exogenous regressors X_i.
This is a generalization of an AR model, which is simply an ARX with no exogenous regressors.
The fitting procedure here is the same, using least squares. Note that all lags up to the
maxlag are included. In the case of the dependent variable the max lag is 'yMaxLag', while
for the exogenous variables the max lag is 'xMaxLag', with which each column in the original
matrix provided is lagged accordingly.
Parameters
----------
y:
the dependent variable, time series as a Numpy array
x:
a matrix of exogenous variables as a Numpy array
yMaxLag:
the maximum lag order for the dependent variable
xMaxLag:
the maximum lag order for exogenous variables
includesOriginalX:
a boolean flag indicating if the non-lagged exogenous variables should
be included. Default is true
noIntercept:
a boolean flag indicating if the intercept should be dropped. Default is
false
Returns an ARXModel, which is an autoregressive model with exogenous variables.
"""
assert sc != None, "Missing SparkContext"
jvm = sc._jvm
jmodel = jvm.com.cloudera.sparkts.models.AutoregressionX.fitModel(_nparray2breezevector(sc, y.toArray()), _nparray2breezematrix(sc, x.toArray()), yMaxLag, xMaxLag, includesOriginalX, noIntercept)
return ARXModel(jmodel=jmodel, sc=sc) | python | def fit_model(y, x, yMaxLag, xMaxLag, includesOriginalX=True, noIntercept=False, sc=None):
"""
Fit an autoregressive model with additional exogenous variables. The model predicts a value
at time t of a dependent variable, Y, as a function of previous values of Y, and a combination
of previous values of exogenous regressors X_i, and current values of exogenous regressors X_i.
This is a generalization of an AR model, which is simply an ARX with no exogenous regressors.
The fitting procedure here is the same, using least squares. Note that all lags up to the
maxlag are included. In the case of the dependent variable the max lag is 'yMaxLag', while
for the exogenous variables the max lag is 'xMaxLag', with which each column in the original
matrix provided is lagged accordingly.
Parameters
----------
y:
the dependent variable, time series as a Numpy array
x:
a matrix of exogenous variables as a Numpy array
yMaxLag:
the maximum lag order for the dependent variable
xMaxLag:
the maximum lag order for exogenous variables
includesOriginalX:
a boolean flag indicating if the non-lagged exogenous variables should
be included. Default is true
noIntercept:
a boolean flag indicating if the intercept should be dropped. Default is
false
Returns an ARXModel, which is an autoregressive model with exogenous variables.
"""
assert sc != None, "Missing SparkContext"
jvm = sc._jvm
jmodel = jvm.com.cloudera.sparkts.models.AutoregressionX.fitModel(_nparray2breezevector(sc, y.toArray()), _nparray2breezematrix(sc, x.toArray()), yMaxLag, xMaxLag, includesOriginalX, noIntercept)
return ARXModel(jmodel=jmodel, sc=sc) | [
"def",
"fit_model",
"(",
"y",
",",
"x",
",",
"yMaxLag",
",",
"xMaxLag",
",",
"includesOriginalX",
"=",
"True",
",",
"noIntercept",
"=",
"False",
",",
"sc",
"=",
"None",
")",
":",
"assert",
"sc",
"!=",
"None",
",",
"\"Missing SparkContext\"",
"jvm",
"=",
... | Fit an autoregressive model with additional exogenous variables. The model predicts a value
at time t of a dependent variable, Y, as a function of previous values of Y, and a combination
of previous values of exogenous regressors X_i, and current values of exogenous regressors X_i.
This is a generalization of an AR model, which is simply an ARX with no exogenous regressors.
The fitting procedure here is the same, using least squares. Note that all lags up to the
maxlag are included. In the case of the dependent variable the max lag is 'yMaxLag', while
for the exogenous variables the max lag is 'xMaxLag', with which each column in the original
matrix provided is lagged accordingly.
Parameters
----------
y:
the dependent variable, time series as a Numpy array
x:
a matrix of exogenous variables as a Numpy array
yMaxLag:
the maximum lag order for the dependent variable
xMaxLag:
the maximum lag order for exogenous variables
includesOriginalX:
a boolean flag indicating if the non-lagged exogenous variables should
be included. Default is true
noIntercept:
a boolean flag indicating if the intercept should be dropped. Default is
false
Returns an ARXModel, which is an autoregressive model with exogenous variables. | [
"Fit",
"an",
"autoregressive",
"model",
"with",
"additional",
"exogenous",
"variables",
".",
"The",
"model",
"predicts",
"a",
"value",
"at",
"time",
"t",
"of",
"a",
"dependent",
"variable",
"Y",
"as",
"a",
"function",
"of",
"previous",
"values",
"of",
"Y",
... | 280aa887dc08ab114411245268f230fdabb76eec | https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/models/AutoregressionX.py#L11-L45 | train | 224,730 |
sryza/spark-timeseries | python/sparkts/timeseriesrdd.py | time_series_rdd_from_pandas_series_rdd | def time_series_rdd_from_pandas_series_rdd(series_rdd):
"""
Instantiates a TimeSeriesRDD from an RDD of Pandas Series objects.
The series in the RDD are all expected to have the same DatetimeIndex.
Parameters
----------
series_rdd : RDD of (string, pandas.Series) tuples
sc : SparkContext
"""
first = series_rdd.first()
dt_index = irregular(first[1].index, series_rdd.ctx)
return TimeSeriesRDD(dt_index, series_rdd.mapValues(lambda x: x.values)) | python | def time_series_rdd_from_pandas_series_rdd(series_rdd):
"""
Instantiates a TimeSeriesRDD from an RDD of Pandas Series objects.
The series in the RDD are all expected to have the same DatetimeIndex.
Parameters
----------
series_rdd : RDD of (string, pandas.Series) tuples
sc : SparkContext
"""
first = series_rdd.first()
dt_index = irregular(first[1].index, series_rdd.ctx)
return TimeSeriesRDD(dt_index, series_rdd.mapValues(lambda x: x.values)) | [
"def",
"time_series_rdd_from_pandas_series_rdd",
"(",
"series_rdd",
")",
":",
"first",
"=",
"series_rdd",
".",
"first",
"(",
")",
"dt_index",
"=",
"irregular",
"(",
"first",
"[",
"1",
"]",
".",
"index",
",",
"series_rdd",
".",
"ctx",
")",
"return",
"TimeSeri... | Instantiates a TimeSeriesRDD from an RDD of Pandas Series objects.
The series in the RDD are all expected to have the same DatetimeIndex.
Parameters
----------
series_rdd : RDD of (string, pandas.Series) tuples
sc : SparkContext | [
"Instantiates",
"a",
"TimeSeriesRDD",
"from",
"an",
"RDD",
"of",
"Pandas",
"Series",
"objects",
"."
] | 280aa887dc08ab114411245268f230fdabb76eec | https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/timeseriesrdd.py#L201-L214 | train | 224,731 |
sryza/spark-timeseries | python/sparkts/timeseriesrdd.py | time_series_rdd_from_observations | def time_series_rdd_from_observations(dt_index, df, ts_col, key_col, val_col):
"""
Instantiates a TimeSeriesRDD from a DataFrame of observations.
An observation is a row containing a timestamp, a string key, and float value.
Parameters
----------
dt_index : DateTimeIndex
The index of the RDD to create. Observations not contained in this index will be ignored.
df : DataFrame
ts_col : string
The name of the column in the DataFrame containing the timestamps.
key_col : string
The name of the column in the DataFrame containing the keys.
val_col : string
The name of the column in the DataFrame containing the values.
"""
jvm = df._sc._jvm
jtsrdd = jvm.com.cloudera.sparkts.api.java.JavaTimeSeriesRDDFactory.timeSeriesRDDFromObservations( \
dt_index._jdt_index, df._jdf, ts_col, key_col, val_col)
return TimeSeriesRDD(None, None, jtsrdd, df._sc) | python | def time_series_rdd_from_observations(dt_index, df, ts_col, key_col, val_col):
"""
Instantiates a TimeSeriesRDD from a DataFrame of observations.
An observation is a row containing a timestamp, a string key, and float value.
Parameters
----------
dt_index : DateTimeIndex
The index of the RDD to create. Observations not contained in this index will be ignored.
df : DataFrame
ts_col : string
The name of the column in the DataFrame containing the timestamps.
key_col : string
The name of the column in the DataFrame containing the keys.
val_col : string
The name of the column in the DataFrame containing the values.
"""
jvm = df._sc._jvm
jtsrdd = jvm.com.cloudera.sparkts.api.java.JavaTimeSeriesRDDFactory.timeSeriesRDDFromObservations( \
dt_index._jdt_index, df._jdf, ts_col, key_col, val_col)
return TimeSeriesRDD(None, None, jtsrdd, df._sc) | [
"def",
"time_series_rdd_from_observations",
"(",
"dt_index",
",",
"df",
",",
"ts_col",
",",
"key_col",
",",
"val_col",
")",
":",
"jvm",
"=",
"df",
".",
"_sc",
".",
"_jvm",
"jtsrdd",
"=",
"jvm",
".",
"com",
".",
"cloudera",
".",
"sparkts",
".",
"api",
"... | Instantiates a TimeSeriesRDD from a DataFrame of observations.
An observation is a row containing a timestamp, a string key, and float value.
Parameters
----------
dt_index : DateTimeIndex
The index of the RDD to create. Observations not contained in this index will be ignored.
df : DataFrame
ts_col : string
The name of the column in the DataFrame containing the timestamps.
key_col : string
The name of the column in the DataFrame containing the keys.
val_col : string
The name of the column in the DataFrame containing the values. | [
"Instantiates",
"a",
"TimeSeriesRDD",
"from",
"a",
"DataFrame",
"of",
"observations",
"."
] | 280aa887dc08ab114411245268f230fdabb76eec | https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/timeseriesrdd.py#L216-L237 | train | 224,732 |
sryza/spark-timeseries | python/sparkts/timeseriesrdd.py | TimeSeriesRDD.map_series | def map_series(self, fn, dt_index = None):
"""
Returns a TimeSeriesRDD, with a transformation applied to all the series in this RDD.
Either the series produced by the given function should conform to this TimeSeriesRDD's
index, or a new DateTimeIndex should be given that they conform to.
Parameters
----------
fn : function
A function that maps arrays of floats to arrays of floats.
dt_index : DateTimeIndex
A DateTimeIndex for the produced TimeseriesRDD.
"""
if dt_index == None:
dt_index = self.index()
return TimeSeriesRDD(dt_index, self.map(fn)) | python | def map_series(self, fn, dt_index = None):
"""
Returns a TimeSeriesRDD, with a transformation applied to all the series in this RDD.
Either the series produced by the given function should conform to this TimeSeriesRDD's
index, or a new DateTimeIndex should be given that they conform to.
Parameters
----------
fn : function
A function that maps arrays of floats to arrays of floats.
dt_index : DateTimeIndex
A DateTimeIndex for the produced TimeseriesRDD.
"""
if dt_index == None:
dt_index = self.index()
return TimeSeriesRDD(dt_index, self.map(fn)) | [
"def",
"map_series",
"(",
"self",
",",
"fn",
",",
"dt_index",
"=",
"None",
")",
":",
"if",
"dt_index",
"==",
"None",
":",
"dt_index",
"=",
"self",
".",
"index",
"(",
")",
"return",
"TimeSeriesRDD",
"(",
"dt_index",
",",
"self",
".",
"map",
"(",
"fn",... | Returns a TimeSeriesRDD, with a transformation applied to all the series in this RDD.
Either the series produced by the given function should conform to this TimeSeriesRDD's
index, or a new DateTimeIndex should be given that they conform to.
Parameters
----------
fn : function
A function that maps arrays of floats to arrays of floats.
dt_index : DateTimeIndex
A DateTimeIndex for the produced TimeseriesRDD. | [
"Returns",
"a",
"TimeSeriesRDD",
"with",
"a",
"transformation",
"applied",
"to",
"all",
"the",
"series",
"in",
"this",
"RDD",
"."
] | 280aa887dc08ab114411245268f230fdabb76eec | https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/timeseriesrdd.py#L77-L93 | train | 224,733 |
sryza/spark-timeseries | python/sparkts/timeseriesrdd.py | TimeSeriesRDD.to_instants | def to_instants(self):
"""
Returns an RDD of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing an RDD of tuples of datetime and
a numpy array containing all the observations that occurred at that time.
"""
jrdd = self._jtsrdd.toInstants(-1).map( \
self.ctx._jvm.com.cloudera.sparkts.InstantToBytes())
return RDD(jrdd, self.ctx, _InstantDeserializer()) | python | def to_instants(self):
"""
Returns an RDD of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing an RDD of tuples of datetime and
a numpy array containing all the observations that occurred at that time.
"""
jrdd = self._jtsrdd.toInstants(-1).map( \
self.ctx._jvm.com.cloudera.sparkts.InstantToBytes())
return RDD(jrdd, self.ctx, _InstantDeserializer()) | [
"def",
"to_instants",
"(",
"self",
")",
":",
"jrdd",
"=",
"self",
".",
"_jtsrdd",
".",
"toInstants",
"(",
"-",
"1",
")",
".",
"map",
"(",
"self",
".",
"ctx",
".",
"_jvm",
".",
"com",
".",
"cloudera",
".",
"sparkts",
".",
"InstantToBytes",
"(",
")",... | Returns an RDD of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing an RDD of tuples of datetime and
a numpy array containing all the observations that occurred at that time. | [
"Returns",
"an",
"RDD",
"of",
"instants",
"each",
"a",
"horizontal",
"slice",
"of",
"this",
"TimeSeriesRDD",
"at",
"a",
"time",
"."
] | 280aa887dc08ab114411245268f230fdabb76eec | https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/timeseriesrdd.py#L95-L104 | train | 224,734 |
sryza/spark-timeseries | python/sparkts/timeseriesrdd.py | TimeSeriesRDD.to_instants_dataframe | def to_instants_dataframe(self, sql_ctx):
"""
Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toInstantsDataFrame(ssql_ctx, -1)
return DataFrame(jdf, sql_ctx) | python | def to_instants_dataframe(self, sql_ctx):
"""
Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toInstantsDataFrame(ssql_ctx, -1)
return DataFrame(jdf, sql_ctx) | [
"def",
"to_instants_dataframe",
"(",
"self",
",",
"sql_ctx",
")",
":",
"ssql_ctx",
"=",
"sql_ctx",
".",
"_ssql_ctx",
"jdf",
"=",
"self",
".",
"_jtsrdd",
".",
"toInstantsDataFrame",
"(",
"ssql_ctx",
",",
"-",
"1",
")",
"return",
"DataFrame",
"(",
"jdf",
","... | Returns a DataFrame of instants, each a horizontal slice of this TimeSeriesRDD at a time.
This essentially transposes the TimeSeriesRDD, producing a DataFrame where each column
is a key form one of the rows in the TimeSeriesRDD. | [
"Returns",
"a",
"DataFrame",
"of",
"instants",
"each",
"a",
"horizontal",
"slice",
"of",
"this",
"TimeSeriesRDD",
"at",
"a",
"time",
"."
] | 280aa887dc08ab114411245268f230fdabb76eec | https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/timeseriesrdd.py#L106-L115 | train | 224,735 |
sryza/spark-timeseries | python/sparkts/timeseriesrdd.py | TimeSeriesRDD.to_observations_dataframe | def to_observations_dataframe(self, sql_ctx, ts_col='timestamp', key_col='key', val_col='value'):
"""
Returns a DataFrame of observations, each containing a timestamp, a key, and a value.
Parameters
----------
sql_ctx : SQLContext
ts_col : string
The name for the timestamp column.
key_col : string
The name for the key column.
val_col : string
The name for the value column.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toObservationsDataFrame(ssql_ctx, ts_col, key_col, val_col)
return DataFrame(jdf, sql_ctx) | python | def to_observations_dataframe(self, sql_ctx, ts_col='timestamp', key_col='key', val_col='value'):
"""
Returns a DataFrame of observations, each containing a timestamp, a key, and a value.
Parameters
----------
sql_ctx : SQLContext
ts_col : string
The name for the timestamp column.
key_col : string
The name for the key column.
val_col : string
The name for the value column.
"""
ssql_ctx = sql_ctx._ssql_ctx
jdf = self._jtsrdd.toObservationsDataFrame(ssql_ctx, ts_col, key_col, val_col)
return DataFrame(jdf, sql_ctx) | [
"def",
"to_observations_dataframe",
"(",
"self",
",",
"sql_ctx",
",",
"ts_col",
"=",
"'timestamp'",
",",
"key_col",
"=",
"'key'",
",",
"val_col",
"=",
"'value'",
")",
":",
"ssql_ctx",
"=",
"sql_ctx",
".",
"_ssql_ctx",
"jdf",
"=",
"self",
".",
"_jtsrdd",
".... | Returns a DataFrame of observations, each containing a timestamp, a key, and a value.
Parameters
----------
sql_ctx : SQLContext
ts_col : string
The name for the timestamp column.
key_col : string
The name for the key column.
val_col : string
The name for the value column. | [
"Returns",
"a",
"DataFrame",
"of",
"observations",
"each",
"containing",
"a",
"timestamp",
"a",
"key",
"and",
"a",
"value",
"."
] | 280aa887dc08ab114411245268f230fdabb76eec | https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/timeseriesrdd.py#L123-L139 | train | 224,736 |
sryza/spark-timeseries | python/sparkts/timeseriesrdd.py | TimeSeriesRDD.to_pandas_series_rdd | def to_pandas_series_rdd(self):
"""
Returns an RDD of Pandas Series objects indexed with Pandas DatetimeIndexes
"""
pd_index = self.index().to_pandas_index()
return self.map(lambda x: (x[0], pd.Series(x[1], pd_index))) | python | def to_pandas_series_rdd(self):
"""
Returns an RDD of Pandas Series objects indexed with Pandas DatetimeIndexes
"""
pd_index = self.index().to_pandas_index()
return self.map(lambda x: (x[0], pd.Series(x[1], pd_index))) | [
"def",
"to_pandas_series_rdd",
"(",
"self",
")",
":",
"pd_index",
"=",
"self",
".",
"index",
"(",
")",
".",
"to_pandas_index",
"(",
")",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"(",
"x",
"[",
"0",
"]",
",",
"pd",
".",
"Series",
"(",
... | Returns an RDD of Pandas Series objects indexed with Pandas DatetimeIndexes | [
"Returns",
"an",
"RDD",
"of",
"Pandas",
"Series",
"objects",
"indexed",
"with",
"Pandas",
"DatetimeIndexes"
] | 280aa887dc08ab114411245268f230fdabb76eec | https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/timeseriesrdd.py#L141-L146 | train | 224,737 |
sryza/spark-timeseries | python/sparkts/timeseriesrdd.py | TimeSeriesRDD.to_pandas_dataframe | def to_pandas_dataframe(self):
"""
Pulls the contents of the RDD to the driver and places them in a Pandas DataFrame.
Each record in the RDD becomes and column, and the DataFrame is indexed with a
DatetimeIndex generated from this RDD's index.
"""
pd_index = self.index().to_pandas_index()
return pd.DataFrame.from_items(self.collect()).set_index(pd_index) | python | def to_pandas_dataframe(self):
"""
Pulls the contents of the RDD to the driver and places them in a Pandas DataFrame.
Each record in the RDD becomes and column, and the DataFrame is indexed with a
DatetimeIndex generated from this RDD's index.
"""
pd_index = self.index().to_pandas_index()
return pd.DataFrame.from_items(self.collect()).set_index(pd_index) | [
"def",
"to_pandas_dataframe",
"(",
"self",
")",
":",
"pd_index",
"=",
"self",
".",
"index",
"(",
")",
".",
"to_pandas_index",
"(",
")",
"return",
"pd",
".",
"DataFrame",
".",
"from_items",
"(",
"self",
".",
"collect",
"(",
")",
")",
".",
"set_index",
"... | Pulls the contents of the RDD to the driver and places them in a Pandas DataFrame.
Each record in the RDD becomes and column, and the DataFrame is indexed with a
DatetimeIndex generated from this RDD's index. | [
"Pulls",
"the",
"contents",
"of",
"the",
"RDD",
"to",
"the",
"driver",
"and",
"places",
"them",
"in",
"a",
"Pandas",
"DataFrame",
".",
"Each",
"record",
"in",
"the",
"RDD",
"becomes",
"and",
"column",
"and",
"the",
"DataFrame",
"is",
"indexed",
"with",
"... | 280aa887dc08ab114411245268f230fdabb76eec | https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/timeseriesrdd.py#L148-L156 | train | 224,738 |
google/textfsm | textfsm/texttable.py | Row._SetHeader | def _SetHeader(self, values):
"""Set the row's header from a list."""
if self._values and len(values) != len(self._values):
raise ValueError('Header values not equal to existing data width.')
if not self._values:
for _ in range(len(values)):
self._values.append(None)
self._keys = list(values)
self._BuildIndex() | python | def _SetHeader(self, values):
"""Set the row's header from a list."""
if self._values and len(values) != len(self._values):
raise ValueError('Header values not equal to existing data width.')
if not self._values:
for _ in range(len(values)):
self._values.append(None)
self._keys = list(values)
self._BuildIndex() | [
"def",
"_SetHeader",
"(",
"self",
",",
"values",
")",
":",
"if",
"self",
".",
"_values",
"and",
"len",
"(",
"values",
")",
"!=",
"len",
"(",
"self",
".",
"_values",
")",
":",
"raise",
"ValueError",
"(",
"'Header values not equal to existing data width.'",
")... | Set the row's header from a list. | [
"Set",
"the",
"row",
"s",
"header",
"from",
"a",
"list",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/texttable.py#L190-L198 | train | 224,739 |
google/textfsm | textfsm/texttable.py | Row._SetValues | def _SetValues(self, values):
"""Set values from supplied dictionary or list.
Args:
values: A Row, dict indexed by column name, or list.
Raises:
TypeError: Argument is not a list or dict, or list is not equal row
length or dictionary keys don't match.
"""
def _ToStr(value):
"""Convert individul list entries to string."""
if isinstance(value, (list, tuple)):
result = []
for val in value:
result.append(str(val))
return result
else:
return str(value)
# Row with identical header can be copied directly.
if isinstance(values, Row):
if self._keys != values.header:
raise TypeError('Attempt to append row with mismatched header.')
self._values = copy.deepcopy(values.values)
elif isinstance(values, dict):
for key in self._keys:
if key not in values:
raise TypeError('Dictionary key mismatch with row.')
for key in self._keys:
self[key] = _ToStr(values[key])
elif isinstance(values, list) or isinstance(values, tuple):
if len(values) != len(self._values):
raise TypeError('Supplied list length != row length')
for (index, value) in enumerate(values):
self._values[index] = _ToStr(value)
else:
raise TypeError('Supplied argument must be Row, dict or list, not %s',
type(values)) | python | def _SetValues(self, values):
"""Set values from supplied dictionary or list.
Args:
values: A Row, dict indexed by column name, or list.
Raises:
TypeError: Argument is not a list or dict, or list is not equal row
length or dictionary keys don't match.
"""
def _ToStr(value):
"""Convert individul list entries to string."""
if isinstance(value, (list, tuple)):
result = []
for val in value:
result.append(str(val))
return result
else:
return str(value)
# Row with identical header can be copied directly.
if isinstance(values, Row):
if self._keys != values.header:
raise TypeError('Attempt to append row with mismatched header.')
self._values = copy.deepcopy(values.values)
elif isinstance(values, dict):
for key in self._keys:
if key not in values:
raise TypeError('Dictionary key mismatch with row.')
for key in self._keys:
self[key] = _ToStr(values[key])
elif isinstance(values, list) or isinstance(values, tuple):
if len(values) != len(self._values):
raise TypeError('Supplied list length != row length')
for (index, value) in enumerate(values):
self._values[index] = _ToStr(value)
else:
raise TypeError('Supplied argument must be Row, dict or list, not %s',
type(values)) | [
"def",
"_SetValues",
"(",
"self",
",",
"values",
")",
":",
"def",
"_ToStr",
"(",
"value",
")",
":",
"\"\"\"Convert individul list entries to string.\"\"\"",
"if",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"result",
"=",
"[",
... | Set values from supplied dictionary or list.
Args:
values: A Row, dict indexed by column name, or list.
Raises:
TypeError: Argument is not a list or dict, or list is not equal row
length or dictionary keys don't match. | [
"Set",
"values",
"from",
"supplied",
"dictionary",
"or",
"list",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/texttable.py#L222-L264 | train | 224,740 |
google/textfsm | textfsm/texttable.py | TextTable.Filter | def Filter(self, function=None):
"""Construct Textable from the rows of which the function returns true.
Args:
function: A function applied to each row which returns a bool. If
function is None, all rows with empty column values are
removed.
Returns:
A new TextTable()
Raises:
TableError: When an invalid row entry is Append()'d
"""
flat = lambda x: x if isinstance(x, str) else ''.join([flat(y) for y in x])
if function is None:
function = lambda row: bool(flat(row.values))
new_table = self.__class__()
# pylint: disable=protected-access
new_table._table = [self.header]
for row in self:
if function(row) is True:
new_table.Append(row)
return new_table | python | def Filter(self, function=None):
"""Construct Textable from the rows of which the function returns true.
Args:
function: A function applied to each row which returns a bool. If
function is None, all rows with empty column values are
removed.
Returns:
A new TextTable()
Raises:
TableError: When an invalid row entry is Append()'d
"""
flat = lambda x: x if isinstance(x, str) else ''.join([flat(y) for y in x])
if function is None:
function = lambda row: bool(flat(row.values))
new_table = self.__class__()
# pylint: disable=protected-access
new_table._table = [self.header]
for row in self:
if function(row) is True:
new_table.Append(row)
return new_table | [
"def",
"Filter",
"(",
"self",
",",
"function",
"=",
"None",
")",
":",
"flat",
"=",
"lambda",
"x",
":",
"x",
"if",
"isinstance",
"(",
"x",
",",
"str",
")",
"else",
"''",
".",
"join",
"(",
"[",
"flat",
"(",
"y",
")",
"for",
"y",
"in",
"x",
"]",... | Construct Textable from the rows of which the function returns true.
Args:
function: A function applied to each row which returns a bool. If
function is None, all rows with empty column values are
removed.
Returns:
A new TextTable()
Raises:
TableError: When an invalid row entry is Append()'d | [
"Construct",
"Textable",
"from",
"the",
"rows",
"of",
"which",
"the",
"function",
"returns",
"true",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/texttable.py#L378-L402 | train | 224,741 |
google/textfsm | textfsm/texttable.py | TextTable._GetTable | def _GetTable(self):
"""Returns table, with column headers and separators.
Returns:
The whole table including headers as a string. Each row is
joined by a newline and each entry by self.separator.
"""
result = []
# Avoid the global lookup cost on each iteration.
lstr = str
for row in self._table:
result.append(
'%s\n' %
self.separator.join(lstr(v) for v in row))
return ''.join(result) | python | def _GetTable(self):
"""Returns table, with column headers and separators.
Returns:
The whole table including headers as a string. Each row is
joined by a newline and each entry by self.separator.
"""
result = []
# Avoid the global lookup cost on each iteration.
lstr = str
for row in self._table:
result.append(
'%s\n' %
self.separator.join(lstr(v) for v in row))
return ''.join(result) | [
"def",
"_GetTable",
"(",
"self",
")",
":",
"result",
"=",
"[",
"]",
"# Avoid the global lookup cost on each iteration.",
"lstr",
"=",
"str",
"for",
"row",
"in",
"self",
".",
"_table",
":",
"result",
".",
"append",
"(",
"'%s\\n'",
"%",
"self",
".",
"separator... | Returns table, with column headers and separators.
Returns:
The whole table including headers as a string. Each row is
joined by a newline and each entry by self.separator. | [
"Returns",
"table",
"with",
"column",
"headers",
"and",
"separators",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/texttable.py#L595-L610 | train | 224,742 |
google/textfsm | textfsm/texttable.py | TextTable._SetTable | def _SetTable(self, table):
"""Sets table, with column headers and separators."""
if not isinstance(table, TextTable):
raise TypeError('Not an instance of TextTable.')
self.Reset()
self._table = copy.deepcopy(table._table) # pylint: disable=W0212
# Point parent table of each row back ourselves.
for row in self:
row.table = self | python | def _SetTable(self, table):
"""Sets table, with column headers and separators."""
if not isinstance(table, TextTable):
raise TypeError('Not an instance of TextTable.')
self.Reset()
self._table = copy.deepcopy(table._table) # pylint: disable=W0212
# Point parent table of each row back ourselves.
for row in self:
row.table = self | [
"def",
"_SetTable",
"(",
"self",
",",
"table",
")",
":",
"if",
"not",
"isinstance",
"(",
"table",
",",
"TextTable",
")",
":",
"raise",
"TypeError",
"(",
"'Not an instance of TextTable.'",
")",
"self",
".",
"Reset",
"(",
")",
"self",
".",
"_table",
"=",
"... | Sets table, with column headers and separators. | [
"Sets",
"table",
"with",
"column",
"headers",
"and",
"separators",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/texttable.py#L612-L620 | train | 224,743 |
google/textfsm | textfsm/texttable.py | TextTable._TextJustify | def _TextJustify(self, text, col_size):
"""Formats text within column with white space padding.
A single space is prefixed, and a number of spaces are added as a
suffix such that the length of the resultant string equals the col_size.
If the length of the text exceeds the column width available then it
is split into words and returned as a list of string, each string
contains one or more words padded to the column size.
Args:
text: String of text to format.
col_size: integer size of column to pad out the text to.
Returns:
List of strings col_size in length.
Raises:
TableError: If col_size is too small to fit the words in the text.
"""
result = []
if '\n' in text:
for paragraph in text.split('\n'):
result.extend(self._TextJustify(paragraph, col_size))
return result
wrapper = textwrap.TextWrapper(width=col_size-2, break_long_words=False,
expand_tabs=False)
try:
text_list = wrapper.wrap(text)
except ValueError:
raise TableError('Field too small (minimum width: 3)')
if not text_list:
return [' '*col_size]
for current_line in text_list:
stripped_len = len(terminal.StripAnsiText(current_line))
ansi_color_adds = len(current_line) - stripped_len
# +2 for white space on either side.
if stripped_len + 2 > col_size:
raise TableError('String contains words that do not fit in column.')
result.append(' %-*s' % (col_size - 1 + ansi_color_adds, current_line))
return result | python | def _TextJustify(self, text, col_size):
"""Formats text within column with white space padding.
A single space is prefixed, and a number of spaces are added as a
suffix such that the length of the resultant string equals the col_size.
If the length of the text exceeds the column width available then it
is split into words and returned as a list of string, each string
contains one or more words padded to the column size.
Args:
text: String of text to format.
col_size: integer size of column to pad out the text to.
Returns:
List of strings col_size in length.
Raises:
TableError: If col_size is too small to fit the words in the text.
"""
result = []
if '\n' in text:
for paragraph in text.split('\n'):
result.extend(self._TextJustify(paragraph, col_size))
return result
wrapper = textwrap.TextWrapper(width=col_size-2, break_long_words=False,
expand_tabs=False)
try:
text_list = wrapper.wrap(text)
except ValueError:
raise TableError('Field too small (minimum width: 3)')
if not text_list:
return [' '*col_size]
for current_line in text_list:
stripped_len = len(terminal.StripAnsiText(current_line))
ansi_color_adds = len(current_line) - stripped_len
# +2 for white space on either side.
if stripped_len + 2 > col_size:
raise TableError('String contains words that do not fit in column.')
result.append(' %-*s' % (col_size - 1 + ansi_color_adds, current_line))
return result | [
"def",
"_TextJustify",
"(",
"self",
",",
"text",
",",
"col_size",
")",
":",
"result",
"=",
"[",
"]",
"if",
"'\\n'",
"in",
"text",
":",
"for",
"paragraph",
"in",
"text",
".",
"split",
"(",
"'\\n'",
")",
":",
"result",
".",
"extend",
"(",
"self",
"."... | Formats text within column with white space padding.
A single space is prefixed, and a number of spaces are added as a
suffix such that the length of the resultant string equals the col_size.
If the length of the text exceeds the column width available then it
is split into words and returned as a list of string, each string
contains one or more words padded to the column size.
Args:
text: String of text to format.
col_size: integer size of column to pad out the text to.
Returns:
List of strings col_size in length.
Raises:
TableError: If col_size is too small to fit the words in the text. | [
"Formats",
"text",
"within",
"column",
"with",
"white",
"space",
"padding",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/texttable.py#L639-L684 | train | 224,744 |
google/textfsm | textfsm/texttable.py | TextTable.index | def index(self, name=None): # pylint: disable=C6409
"""Returns index number of supplied column name.
Args:
name: string of column name.
Raises:
TableError: If name not found.
Returns:
Index of the specified header entry.
"""
try:
return self.header.index(name)
except ValueError:
raise TableError('Unknown index name %s.' % name) | python | def index(self, name=None): # pylint: disable=C6409
"""Returns index number of supplied column name.
Args:
name: string of column name.
Raises:
TableError: If name not found.
Returns:
Index of the specified header entry.
"""
try:
return self.header.index(name)
except ValueError:
raise TableError('Unknown index name %s.' % name) | [
"def",
"index",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"# pylint: disable=C6409",
"try",
":",
"return",
"self",
".",
"header",
".",
"index",
"(",
"name",
")",
"except",
"ValueError",
":",
"raise",
"TableError",
"(",
"'Unknown index name %s.'",
"%",... | Returns index number of supplied column name.
Args:
name: string of column name.
Raises:
TableError: If name not found.
Returns:
Index of the specified header entry. | [
"Returns",
"index",
"number",
"of",
"supplied",
"column",
"name",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/texttable.py#L1074-L1089 | train | 224,745 |
google/textfsm | textfsm/clitable.py | CliTable._ParseCmdItem | def _ParseCmdItem(self, cmd_input, template_file=None):
"""Creates Texttable with output of command.
Args:
cmd_input: String, Device response.
template_file: File object, template to parse with.
Returns:
TextTable containing command output.
Raises:
CliTableError: A template was not found for the given command.
"""
# Build FSM machine from the template.
fsm = textfsm.TextFSM(template_file)
if not self._keys:
self._keys = set(fsm.GetValuesByAttrib('Key'))
# Pass raw data through FSM.
table = texttable.TextTable()
table.header = fsm.header
# Fill TextTable from record entries.
for record in fsm.ParseText(cmd_input):
table.Append(record)
return table | python | def _ParseCmdItem(self, cmd_input, template_file=None):
"""Creates Texttable with output of command.
Args:
cmd_input: String, Device response.
template_file: File object, template to parse with.
Returns:
TextTable containing command output.
Raises:
CliTableError: A template was not found for the given command.
"""
# Build FSM machine from the template.
fsm = textfsm.TextFSM(template_file)
if not self._keys:
self._keys = set(fsm.GetValuesByAttrib('Key'))
# Pass raw data through FSM.
table = texttable.TextTable()
table.header = fsm.header
# Fill TextTable from record entries.
for record in fsm.ParseText(cmd_input):
table.Append(record)
return table | [
"def",
"_ParseCmdItem",
"(",
"self",
",",
"cmd_input",
",",
"template_file",
"=",
"None",
")",
":",
"# Build FSM machine from the template.",
"fsm",
"=",
"textfsm",
".",
"TextFSM",
"(",
"template_file",
")",
"if",
"not",
"self",
".",
"_keys",
":",
"self",
".",... | Creates Texttable with output of command.
Args:
cmd_input: String, Device response.
template_file: File object, template to parse with.
Returns:
TextTable containing command output.
Raises:
CliTableError: A template was not found for the given command. | [
"Creates",
"Texttable",
"with",
"output",
"of",
"command",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/clitable.py#L288-L313 | train | 224,746 |
google/textfsm | textfsm/clitable.py | CliTable._Completion | def _Completion(self, match):
# pylint: disable=C6114
r"""Replaces double square brackets with variable length completion.
Completion cannot be mixed with regexp matching or '\' characters
i.e. '[[(\n)]] would become (\(n)?)?.'
Args:
match: A regex Match() object.
Returns:
String of the format '(a(b(c(d)?)?)?)?'.
"""
# Strip the outer '[[' & ']]' and replace with ()? regexp pattern.
word = str(match.group())[2:-2]
return '(' + ('(').join(word) + ')?' * len(word) | python | def _Completion(self, match):
# pylint: disable=C6114
r"""Replaces double square brackets with variable length completion.
Completion cannot be mixed with regexp matching or '\' characters
i.e. '[[(\n)]] would become (\(n)?)?.'
Args:
match: A regex Match() object.
Returns:
String of the format '(a(b(c(d)?)?)?)?'.
"""
# Strip the outer '[[' & ']]' and replace with ()? regexp pattern.
word = str(match.group())[2:-2]
return '(' + ('(').join(word) + ')?' * len(word) | [
"def",
"_Completion",
"(",
"self",
",",
"match",
")",
":",
"# pylint: disable=C6114",
"# Strip the outer '[[' & ']]' and replace with ()? regexp pattern.",
"word",
"=",
"str",
"(",
"match",
".",
"group",
"(",
")",
")",
"[",
"2",
":",
"-",
"2",
"]",
"return",
"'(... | r"""Replaces double square brackets with variable length completion.
Completion cannot be mixed with regexp matching or '\' characters
i.e. '[[(\n)]] would become (\(n)?)?.'
Args:
match: A regex Match() object.
Returns:
String of the format '(a(b(c(d)?)?)?)?'. | [
"r",
"Replaces",
"double",
"square",
"brackets",
"with",
"variable",
"length",
"completion",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/clitable.py#L329-L344 | train | 224,747 |
google/textfsm | textfsm/parser.py | main | def main(argv=None):
"""Validate text parsed with FSM or validate an FSM via command line."""
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], 'h', ['help'])
except getopt.error as msg:
raise Usage(msg)
for opt, _ in opts:
if opt in ('-h', '--help'):
print(__doc__)
print(help_msg)
return 0
if not args or len(args) > 4:
raise Usage('Invalid arguments.')
# If we have an argument, parse content of file and display as a template.
# Template displayed will match input template, minus any comment lines.
with open(args[0], 'r') as template:
fsm = TextFSM(template)
print('FSM Template:\n%s\n' % fsm)
if len(args) > 1:
# Second argument is file with example cli input.
# Prints parsed tabular result.
with open(args[1], 'r') as f:
cli_input = f.read()
table = fsm.ParseText(cli_input)
print('FSM Table:')
result = str(fsm.header) + '\n'
for line in table:
result += str(line) + '\n'
print(result, end='')
if len(args) > 2:
# Compare tabular result with data in third file argument.
# Exit value indicates if processed data matched expected result.
with open(args[2], 'r') as f:
ref_table = f.read()
if ref_table != result:
print('Data mis-match!')
return 1
else:
print('Data match!') | python | def main(argv=None):
"""Validate text parsed with FSM or validate an FSM via command line."""
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], 'h', ['help'])
except getopt.error as msg:
raise Usage(msg)
for opt, _ in opts:
if opt in ('-h', '--help'):
print(__doc__)
print(help_msg)
return 0
if not args or len(args) > 4:
raise Usage('Invalid arguments.')
# If we have an argument, parse content of file and display as a template.
# Template displayed will match input template, minus any comment lines.
with open(args[0], 'r') as template:
fsm = TextFSM(template)
print('FSM Template:\n%s\n' % fsm)
if len(args) > 1:
# Second argument is file with example cli input.
# Prints parsed tabular result.
with open(args[1], 'r') as f:
cli_input = f.read()
table = fsm.ParseText(cli_input)
print('FSM Table:')
result = str(fsm.header) + '\n'
for line in table:
result += str(line) + '\n'
print(result, end='')
if len(args) > 2:
# Compare tabular result with data in third file argument.
# Exit value indicates if processed data matched expected result.
with open(args[2], 'r') as f:
ref_table = f.read()
if ref_table != result:
print('Data mis-match!')
return 1
else:
print('Data match!') | [
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"if",
"argv",
"is",
"None",
":",
"argv",
"=",
"sys",
".",
"argv",
"try",
":",
"opts",
",",
"args",
"=",
"getopt",
".",
"getopt",
"(",
"argv",
"[",
"1",
":",
"]",
",",
"'h'",
",",
"[",
"'help'... | Validate text parsed with FSM or validate an FSM via command line. | [
"Validate",
"text",
"parsed",
"with",
"FSM",
"or",
"validate",
"an",
"FSM",
"via",
"command",
"line",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L1044-L1093 | train | 224,748 |
google/textfsm | textfsm/parser.py | TextFSMOptions.ValidOptions | def ValidOptions(cls):
"""Returns a list of valid option names."""
valid_options = []
for obj_name in dir(cls):
obj = getattr(cls, obj_name)
if inspect.isclass(obj) and issubclass(obj, cls.OptionBase):
valid_options.append(obj_name)
return valid_options | python | def ValidOptions(cls):
"""Returns a list of valid option names."""
valid_options = []
for obj_name in dir(cls):
obj = getattr(cls, obj_name)
if inspect.isclass(obj) and issubclass(obj, cls.OptionBase):
valid_options.append(obj_name)
return valid_options | [
"def",
"ValidOptions",
"(",
"cls",
")",
":",
"valid_options",
"=",
"[",
"]",
"for",
"obj_name",
"in",
"dir",
"(",
"cls",
")",
":",
"obj",
"=",
"getattr",
"(",
"cls",
",",
"obj_name",
")",
"if",
"inspect",
".",
"isclass",
"(",
"obj",
")",
"and",
"is... | Returns a list of valid option names. | [
"Returns",
"a",
"list",
"of",
"valid",
"option",
"names",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L114-L121 | train | 224,749 |
google/textfsm | textfsm/parser.py | TextFSMValue.Header | def Header(self):
"""Fetch the header name of this Value."""
# Call OnGetValue on options.
_ = [option.OnGetValue() for option in self.options]
return self.name | python | def Header(self):
"""Fetch the header name of this Value."""
# Call OnGetValue on options.
_ = [option.OnGetValue() for option in self.options]
return self.name | [
"def",
"Header",
"(",
"self",
")",
":",
"# Call OnGetValue on options.",
"_",
"=",
"[",
"option",
".",
"OnGetValue",
"(",
")",
"for",
"option",
"in",
"self",
".",
"options",
"]",
"return",
"self",
".",
"name"
] | Fetch the header name of this Value. | [
"Fetch",
"the",
"header",
"name",
"of",
"this",
"Value",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L262-L266 | train | 224,750 |
google/textfsm | textfsm/parser.py | TextFSMValue._AddOption | def _AddOption(self, name):
"""Add an option to this Value.
Args:
name: (str), the name of the Option to add.
Raises:
TextFSMTemplateError: If option is already present or
the option does not exist.
"""
# Check for duplicate option declaration
if name in [option.name for option in self.options]:
raise TextFSMTemplateError('Duplicate option "%s"' % name)
# Create the option object
try:
option = self._options_cls.GetOption(name)(self)
except AttributeError:
raise TextFSMTemplateError('Unknown option "%s"' % name)
self.options.append(option) | python | def _AddOption(self, name):
"""Add an option to this Value.
Args:
name: (str), the name of the Option to add.
Raises:
TextFSMTemplateError: If option is already present or
the option does not exist.
"""
# Check for duplicate option declaration
if name in [option.name for option in self.options]:
raise TextFSMTemplateError('Duplicate option "%s"' % name)
# Create the option object
try:
option = self._options_cls.GetOption(name)(self)
except AttributeError:
raise TextFSMTemplateError('Unknown option "%s"' % name)
self.options.append(option) | [
"def",
"_AddOption",
"(",
"self",
",",
"name",
")",
":",
"# Check for duplicate option declaration",
"if",
"name",
"in",
"[",
"option",
".",
"name",
"for",
"option",
"in",
"self",
".",
"options",
"]",
":",
"raise",
"TextFSMTemplateError",
"(",
"'Duplicate option... | Add an option to this Value.
Args:
name: (str), the name of the Option to add.
Raises:
TextFSMTemplateError: If option is already present or
the option does not exist. | [
"Add",
"an",
"option",
"to",
"this",
"Value",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L321-L342 | train | 224,751 |
google/textfsm | textfsm/parser.py | TextFSM.Reset | def Reset(self):
"""Preserves FSM but resets starting state and current record."""
# Current state is Start state.
self._cur_state = self.states['Start']
self._cur_state_name = 'Start'
# Clear table of results and current record.
self._result = []
self._ClearAllRecord() | python | def Reset(self):
"""Preserves FSM but resets starting state and current record."""
# Current state is Start state.
self._cur_state = self.states['Start']
self._cur_state_name = 'Start'
# Clear table of results and current record.
self._result = []
self._ClearAllRecord() | [
"def",
"Reset",
"(",
"self",
")",
":",
"# Current state is Start state.",
"self",
".",
"_cur_state",
"=",
"self",
".",
"states",
"[",
"'Start'",
"]",
"self",
".",
"_cur_state_name",
"=",
"'Start'",
"# Clear table of results and current record.",
"self",
".",
"_resul... | Preserves FSM but resets starting state and current record. | [
"Preserves",
"FSM",
"but",
"resets",
"starting",
"state",
"and",
"current",
"record",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L596-L605 | train | 224,752 |
google/textfsm | textfsm/parser.py | TextFSM._GetHeader | def _GetHeader(self):
"""Returns header."""
header = []
for value in self.values:
try:
header.append(value.Header())
except SkipValue:
continue
return header | python | def _GetHeader(self):
"""Returns header."""
header = []
for value in self.values:
try:
header.append(value.Header())
except SkipValue:
continue
return header | [
"def",
"_GetHeader",
"(",
"self",
")",
":",
"header",
"=",
"[",
"]",
"for",
"value",
"in",
"self",
".",
"values",
":",
"try",
":",
"header",
".",
"append",
"(",
"value",
".",
"Header",
"(",
")",
")",
"except",
"SkipValue",
":",
"continue",
"return",
... | Returns header. | [
"Returns",
"header",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L612-L620 | train | 224,753 |
google/textfsm | textfsm/parser.py | TextFSM._GetValue | def _GetValue(self, name):
"""Returns the TextFSMValue object natching the requested name."""
for value in self.values:
if value.name == name:
return value | python | def _GetValue(self, name):
"""Returns the TextFSMValue object natching the requested name."""
for value in self.values:
if value.name == name:
return value | [
"def",
"_GetValue",
"(",
"self",
",",
"name",
")",
":",
"for",
"value",
"in",
"self",
".",
"values",
":",
"if",
"value",
".",
"name",
"==",
"name",
":",
"return",
"value"
] | Returns the TextFSMValue object natching the requested name. | [
"Returns",
"the",
"TextFSMValue",
"object",
"natching",
"the",
"requested",
"name",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L622-L626 | train | 224,754 |
google/textfsm | textfsm/parser.py | TextFSM._AppendRecord | def _AppendRecord(self):
"""Adds current record to result if well formed."""
# If no Values then don't output.
if not self.values:
return
cur_record = []
for value in self.values:
try:
value.OnSaveRecord()
except SkipRecord:
self._ClearRecord()
return
except SkipValue:
continue
# Build current record into a list.
cur_record.append(value.value)
# If no Values in template or whole record is empty then don't output.
if len(cur_record) == (cur_record.count(None) + cur_record.count([])):
return
# Replace any 'None' entries with null string ''.
while None in cur_record:
cur_record[cur_record.index(None)] = ''
self._result.append(cur_record)
self._ClearRecord() | python | def _AppendRecord(self):
"""Adds current record to result if well formed."""
# If no Values then don't output.
if not self.values:
return
cur_record = []
for value in self.values:
try:
value.OnSaveRecord()
except SkipRecord:
self._ClearRecord()
return
except SkipValue:
continue
# Build current record into a list.
cur_record.append(value.value)
# If no Values in template or whole record is empty then don't output.
if len(cur_record) == (cur_record.count(None) + cur_record.count([])):
return
# Replace any 'None' entries with null string ''.
while None in cur_record:
cur_record[cur_record.index(None)] = ''
self._result.append(cur_record)
self._ClearRecord() | [
"def",
"_AppendRecord",
"(",
"self",
")",
":",
"# If no Values then don't output.",
"if",
"not",
"self",
".",
"values",
":",
"return",
"cur_record",
"=",
"[",
"]",
"for",
"value",
"in",
"self",
".",
"values",
":",
"try",
":",
"value",
".",
"OnSaveRecord",
... | Adds current record to result if well formed. | [
"Adds",
"current",
"record",
"to",
"result",
"if",
"well",
"formed",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L628-L657 | train | 224,755 |
google/textfsm | textfsm/parser.py | TextFSM._Parse | def _Parse(self, template):
"""Parses template file for FSM structure.
Args:
template: Valid template file.
Raises:
TextFSMTemplateError: If template file syntax is invalid.
"""
if not template:
raise TextFSMTemplateError('Null template.')
# Parse header with Variables.
self._ParseFSMVariables(template)
# Parse States.
while self._ParseFSMState(template):
pass
# Validate destination states.
self._ValidateFSM() | python | def _Parse(self, template):
"""Parses template file for FSM structure.
Args:
template: Valid template file.
Raises:
TextFSMTemplateError: If template file syntax is invalid.
"""
if not template:
raise TextFSMTemplateError('Null template.')
# Parse header with Variables.
self._ParseFSMVariables(template)
# Parse States.
while self._ParseFSMState(template):
pass
# Validate destination states.
self._ValidateFSM() | [
"def",
"_Parse",
"(",
"self",
",",
"template",
")",
":",
"if",
"not",
"template",
":",
"raise",
"TextFSMTemplateError",
"(",
"'Null template.'",
")",
"# Parse header with Variables.",
"self",
".",
"_ParseFSMVariables",
"(",
"template",
")",
"# Parse States.",
"while... | Parses template file for FSM structure.
Args:
template: Valid template file.
Raises:
TextFSMTemplateError: If template file syntax is invalid. | [
"Parses",
"template",
"file",
"for",
"FSM",
"structure",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L659-L680 | train | 224,756 |
google/textfsm | textfsm/parser.py | TextFSM._ParseFSMVariables | def _ParseFSMVariables(self, template):
"""Extracts Variables from start of template file.
Values are expected as a contiguous block at the head of the file.
These will be line separated from the State definitions that follow.
Args:
template: Valid template file, with Value definitions at the top.
Raises:
TextFSMTemplateError: If syntax or semantic errors are found.
"""
self.values = []
for line in template:
self._line_num += 1
line = line.rstrip()
# Blank line signifies end of Value definitions.
if not line:
return
# Skip commented lines.
if self.comment_regex.match(line):
continue
if line.startswith('Value '):
try:
value = TextFSMValue(
fsm=self, max_name_len=self.MAX_NAME_LEN,
options_class=self._options_cls)
value.Parse(line)
except TextFSMTemplateError as error:
raise TextFSMTemplateError('%s Line %s.' % (error, self._line_num))
if value.name in self.header:
raise TextFSMTemplateError(
"Duplicate declarations for Value '%s'. Line: %s."
% (value.name, self._line_num))
try:
self._ValidateOptions(value)
except TextFSMTemplateError as error:
raise TextFSMTemplateError('%s Line %s.' % (error, self._line_num))
self.values.append(value)
self.value_map[value.name] = value.template
# The line has text but without the 'Value ' prefix.
elif not self.values:
raise TextFSMTemplateError('No Value definitions found.')
else:
raise TextFSMTemplateError(
'Expected blank line after last Value entry. Line: %s.'
% (self._line_num)) | python | def _ParseFSMVariables(self, template):
"""Extracts Variables from start of template file.
Values are expected as a contiguous block at the head of the file.
These will be line separated from the State definitions that follow.
Args:
template: Valid template file, with Value definitions at the top.
Raises:
TextFSMTemplateError: If syntax or semantic errors are found.
"""
self.values = []
for line in template:
self._line_num += 1
line = line.rstrip()
# Blank line signifies end of Value definitions.
if not line:
return
# Skip commented lines.
if self.comment_regex.match(line):
continue
if line.startswith('Value '):
try:
value = TextFSMValue(
fsm=self, max_name_len=self.MAX_NAME_LEN,
options_class=self._options_cls)
value.Parse(line)
except TextFSMTemplateError as error:
raise TextFSMTemplateError('%s Line %s.' % (error, self._line_num))
if value.name in self.header:
raise TextFSMTemplateError(
"Duplicate declarations for Value '%s'. Line: %s."
% (value.name, self._line_num))
try:
self._ValidateOptions(value)
except TextFSMTemplateError as error:
raise TextFSMTemplateError('%s Line %s.' % (error, self._line_num))
self.values.append(value)
self.value_map[value.name] = value.template
# The line has text but without the 'Value ' prefix.
elif not self.values:
raise TextFSMTemplateError('No Value definitions found.')
else:
raise TextFSMTemplateError(
'Expected blank line after last Value entry. Line: %s.'
% (self._line_num)) | [
"def",
"_ParseFSMVariables",
"(",
"self",
",",
"template",
")",
":",
"self",
".",
"values",
"=",
"[",
"]",
"for",
"line",
"in",
"template",
":",
"self",
".",
"_line_num",
"+=",
"1",
"line",
"=",
"line",
".",
"rstrip",
"(",
")",
"# Blank line signifies en... | Extracts Variables from start of template file.
Values are expected as a contiguous block at the head of the file.
These will be line separated from the State definitions that follow.
Args:
template: Valid template file, with Value definitions at the top.
Raises:
TextFSMTemplateError: If syntax or semantic errors are found. | [
"Extracts",
"Variables",
"from",
"start",
"of",
"template",
"file",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L682-L736 | train | 224,757 |
google/textfsm | textfsm/parser.py | TextFSM._ParseFSMState | def _ParseFSMState(self, template):
"""Extracts State and associated Rules from body of template file.
After the Value definitions the remainder of the template is
state definitions. The routine is expected to be called iteratively
until no more states remain - indicated by returning None.
The routine checks that the state names are a well formed string, do
not clash with reserved names and are unique.
Args:
template: Valid template file after Value definitions
have already been read.
Returns:
Name of the state parsed from file. None otherwise.
Raises:
TextFSMTemplateError: If any state definitions are invalid.
"""
if not template:
return
state_name = ''
# Strip off extra white space lines (including comments).
for line in template:
self._line_num += 1
line = line.rstrip()
# First line is state definition
if line and not self.comment_regex.match(line):
# Ensure statename has valid syntax and is not a reserved word.
if (not self.state_name_re.match(line) or
len(line) > self.MAX_NAME_LEN or
line in TextFSMRule.LINE_OP or
line in TextFSMRule.RECORD_OP):
raise TextFSMTemplateError("Invalid state name: '%s'. Line: %s"
% (line, self._line_num))
state_name = line
if state_name in self.states:
raise TextFSMTemplateError("Duplicate state name: '%s'. Line: %s"
% (line, self._line_num))
self.states[state_name] = []
self.state_list.append(state_name)
break
# Parse each rule in the state.
for line in template:
self._line_num += 1
line = line.rstrip()
# Finish rules processing on blank line.
if not line:
break
if self.comment_regex.match(line):
continue
# A rule within a state, starts with whitespace
if not (line.startswith(' ^') or line.startswith('\t^')):
raise TextFSMTemplateError(
"Missing white space or carat ('^') before rule. Line: %s" %
self._line_num)
self.states[state_name].append(
TextFSMRule(line, self._line_num, self.value_map))
return state_name | python | def _ParseFSMState(self, template):
"""Extracts State and associated Rules from body of template file.
After the Value definitions the remainder of the template is
state definitions. The routine is expected to be called iteratively
until no more states remain - indicated by returning None.
The routine checks that the state names are a well formed string, do
not clash with reserved names and are unique.
Args:
template: Valid template file after Value definitions
have already been read.
Returns:
Name of the state parsed from file. None otherwise.
Raises:
TextFSMTemplateError: If any state definitions are invalid.
"""
if not template:
return
state_name = ''
# Strip off extra white space lines (including comments).
for line in template:
self._line_num += 1
line = line.rstrip()
# First line is state definition
if line and not self.comment_regex.match(line):
# Ensure statename has valid syntax and is not a reserved word.
if (not self.state_name_re.match(line) or
len(line) > self.MAX_NAME_LEN or
line in TextFSMRule.LINE_OP or
line in TextFSMRule.RECORD_OP):
raise TextFSMTemplateError("Invalid state name: '%s'. Line: %s"
% (line, self._line_num))
state_name = line
if state_name in self.states:
raise TextFSMTemplateError("Duplicate state name: '%s'. Line: %s"
% (line, self._line_num))
self.states[state_name] = []
self.state_list.append(state_name)
break
# Parse each rule in the state.
for line in template:
self._line_num += 1
line = line.rstrip()
# Finish rules processing on blank line.
if not line:
break
if self.comment_regex.match(line):
continue
# A rule within a state, starts with whitespace
if not (line.startswith(' ^') or line.startswith('\t^')):
raise TextFSMTemplateError(
"Missing white space or carat ('^') before rule. Line: %s" %
self._line_num)
self.states[state_name].append(
TextFSMRule(line, self._line_num, self.value_map))
return state_name | [
"def",
"_ParseFSMState",
"(",
"self",
",",
"template",
")",
":",
"if",
"not",
"template",
":",
"return",
"state_name",
"=",
"''",
"# Strip off extra white space lines (including comments).",
"for",
"line",
"in",
"template",
":",
"self",
".",
"_line_num",
"+=",
"1"... | Extracts State and associated Rules from body of template file.
After the Value definitions the remainder of the template is
state definitions. The routine is expected to be called iteratively
until no more states remain - indicated by returning None.
The routine checks that the state names are a well formed string, do
not clash with reserved names and are unique.
Args:
template: Valid template file after Value definitions
have already been read.
Returns:
Name of the state parsed from file. None otherwise.
Raises:
TextFSMTemplateError: If any state definitions are invalid. | [
"Extracts",
"State",
"and",
"associated",
"Rules",
"from",
"body",
"of",
"template",
"file",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L743-L812 | train | 224,758 |
google/textfsm | textfsm/parser.py | TextFSM._ValidateFSM | def _ValidateFSM(self):
"""Checks state names and destinations for validity.
Each destination state must exist, be a valid name and
not be a reserved name.
There must be a 'Start' state and if 'EOF' or 'End' states are specified,
they must be empty.
Returns:
True if FSM is valid.
Raises:
TextFSMTemplateError: If any state definitions are invalid.
"""
# Must have 'Start' state.
if 'Start' not in self.states:
raise TextFSMTemplateError("Missing state 'Start'.")
# 'End/EOF' state (if specified) must be empty.
if self.states.get('End'):
raise TextFSMTemplateError("Non-Empty 'End' state.")
if self.states.get('EOF'):
raise TextFSMTemplateError("Non-Empty 'EOF' state.")
# Remove 'End' state.
if 'End' in self.states:
del self.states['End']
self.state_list.remove('End')
# Ensure jump states are all valid.
for state in self.states:
for rule in self.states[state]:
if rule.line_op == 'Error':
continue
if not rule.new_state or rule.new_state in ('End', 'EOF'):
continue
if rule.new_state not in self.states:
raise TextFSMTemplateError(
"State '%s' not found, referenced in state '%s'" %
(rule.new_state, state))
return True | python | def _ValidateFSM(self):
"""Checks state names and destinations for validity.
Each destination state must exist, be a valid name and
not be a reserved name.
There must be a 'Start' state and if 'EOF' or 'End' states are specified,
they must be empty.
Returns:
True if FSM is valid.
Raises:
TextFSMTemplateError: If any state definitions are invalid.
"""
# Must have 'Start' state.
if 'Start' not in self.states:
raise TextFSMTemplateError("Missing state 'Start'.")
# 'End/EOF' state (if specified) must be empty.
if self.states.get('End'):
raise TextFSMTemplateError("Non-Empty 'End' state.")
if self.states.get('EOF'):
raise TextFSMTemplateError("Non-Empty 'EOF' state.")
# Remove 'End' state.
if 'End' in self.states:
del self.states['End']
self.state_list.remove('End')
# Ensure jump states are all valid.
for state in self.states:
for rule in self.states[state]:
if rule.line_op == 'Error':
continue
if not rule.new_state or rule.new_state in ('End', 'EOF'):
continue
if rule.new_state not in self.states:
raise TextFSMTemplateError(
"State '%s' not found, referenced in state '%s'" %
(rule.new_state, state))
return True | [
"def",
"_ValidateFSM",
"(",
"self",
")",
":",
"# Must have 'Start' state.",
"if",
"'Start'",
"not",
"in",
"self",
".",
"states",
":",
"raise",
"TextFSMTemplateError",
"(",
"\"Missing state 'Start'.\"",
")",
"# 'End/EOF' state (if specified) must be empty.",
"if",
"self",
... | Checks state names and destinations for validity.
Each destination state must exist, be a valid name and
not be a reserved name.
There must be a 'Start' state and if 'EOF' or 'End' states are specified,
they must be empty.
Returns:
True if FSM is valid.
Raises:
TextFSMTemplateError: If any state definitions are invalid. | [
"Checks",
"state",
"names",
"and",
"destinations",
"for",
"validity",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L814-L859 | train | 224,759 |
google/textfsm | textfsm/parser.py | TextFSM.ParseText | def ParseText(self, text, eof=True):
"""Passes CLI output through FSM and returns list of tuples.
First tuple is the header, every subsequent tuple is a row.
Args:
text: (str), Text to parse with embedded newlines.
eof: (boolean), Set to False if we are parsing only part of the file.
Suppresses triggering EOF state.
Raises:
TextFSMError: An error occurred within the FSM.
Returns:
List of Lists.
"""
lines = []
if text:
lines = text.splitlines()
for line in lines:
self._CheckLine(line)
if self._cur_state_name in ('End', 'EOF'):
break
if self._cur_state_name != 'End' and 'EOF' not in self.states and eof:
# Implicit EOF performs Next.Record operation.
# Suppressed if Null EOF state is instantiated.
self._AppendRecord()
return self._result | python | def ParseText(self, text, eof=True):
"""Passes CLI output through FSM and returns list of tuples.
First tuple is the header, every subsequent tuple is a row.
Args:
text: (str), Text to parse with embedded newlines.
eof: (boolean), Set to False if we are parsing only part of the file.
Suppresses triggering EOF state.
Raises:
TextFSMError: An error occurred within the FSM.
Returns:
List of Lists.
"""
lines = []
if text:
lines = text.splitlines()
for line in lines:
self._CheckLine(line)
if self._cur_state_name in ('End', 'EOF'):
break
if self._cur_state_name != 'End' and 'EOF' not in self.states and eof:
# Implicit EOF performs Next.Record operation.
# Suppressed if Null EOF state is instantiated.
self._AppendRecord()
return self._result | [
"def",
"ParseText",
"(",
"self",
",",
"text",
",",
"eof",
"=",
"True",
")",
":",
"lines",
"=",
"[",
"]",
"if",
"text",
":",
"lines",
"=",
"text",
".",
"splitlines",
"(",
")",
"for",
"line",
"in",
"lines",
":",
"self",
".",
"_CheckLine",
"(",
"lin... | Passes CLI output through FSM and returns list of tuples.
First tuple is the header, every subsequent tuple is a row.
Args:
text: (str), Text to parse with embedded newlines.
eof: (boolean), Set to False if we are parsing only part of the file.
Suppresses triggering EOF state.
Raises:
TextFSMError: An error occurred within the FSM.
Returns:
List of Lists. | [
"Passes",
"CLI",
"output",
"through",
"FSM",
"and",
"returns",
"list",
"of",
"tuples",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L861-L892 | train | 224,760 |
google/textfsm | textfsm/parser.py | TextFSM.ParseTextToDicts | def ParseTextToDicts(self, *args, **kwargs):
"""Calls ParseText and turns the result into list of dicts.
List items are dicts of rows, dict key is column header and value is column
value.
Args:
text: (str), Text to parse with embedded newlines.
eof: (boolean), Set to False if we are parsing only part of the file.
Suppresses triggering EOF state.
Raises:
TextFSMError: An error occurred within the FSM.
Returns:
List of dicts.
"""
result_lists = self.ParseText(*args, **kwargs)
result_dicts = []
for row in result_lists:
result_dicts.append(dict(zip(self.header, row)))
return result_dicts | python | def ParseTextToDicts(self, *args, **kwargs):
"""Calls ParseText and turns the result into list of dicts.
List items are dicts of rows, dict key is column header and value is column
value.
Args:
text: (str), Text to parse with embedded newlines.
eof: (boolean), Set to False if we are parsing only part of the file.
Suppresses triggering EOF state.
Raises:
TextFSMError: An error occurred within the FSM.
Returns:
List of dicts.
"""
result_lists = self.ParseText(*args, **kwargs)
result_dicts = []
for row in result_lists:
result_dicts.append(dict(zip(self.header, row)))
return result_dicts | [
"def",
"ParseTextToDicts",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result_lists",
"=",
"self",
".",
"ParseText",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"result_dicts",
"=",
"[",
"]",
"for",
"row",
"in",
"result_lis... | Calls ParseText and turns the result into list of dicts.
List items are dicts of rows, dict key is column header and value is column
value.
Args:
text: (str), Text to parse with embedded newlines.
eof: (boolean), Set to False if we are parsing only part of the file.
Suppresses triggering EOF state.
Raises:
TextFSMError: An error occurred within the FSM.
Returns:
List of dicts. | [
"Calls",
"ParseText",
"and",
"turns",
"the",
"result",
"into",
"list",
"of",
"dicts",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L894-L918 | train | 224,761 |
google/textfsm | textfsm/parser.py | TextFSM._AssignVar | def _AssignVar(self, matched, value):
"""Assigns variable into current record from a matched rule.
If a record entry is a list then append, otherwise values are replaced.
Args:
matched: (regexp.match) Named group for each matched value.
value: (str) The matched value.
"""
_value = self._GetValue(value)
if _value is not None:
_value.AssignVar(matched.group(value)) | python | def _AssignVar(self, matched, value):
"""Assigns variable into current record from a matched rule.
If a record entry is a list then append, otherwise values are replaced.
Args:
matched: (regexp.match) Named group for each matched value.
value: (str) The matched value.
"""
_value = self._GetValue(value)
if _value is not None:
_value.AssignVar(matched.group(value)) | [
"def",
"_AssignVar",
"(",
"self",
",",
"matched",
",",
"value",
")",
":",
"_value",
"=",
"self",
".",
"_GetValue",
"(",
"value",
")",
"if",
"_value",
"is",
"not",
"None",
":",
"_value",
".",
"AssignVar",
"(",
"matched",
".",
"group",
"(",
"value",
")... | Assigns variable into current record from a matched rule.
If a record entry is a list then append, otherwise values are replaced.
Args:
matched: (regexp.match) Named group for each matched value.
value: (str) The matched value. | [
"Assigns",
"variable",
"into",
"current",
"record",
"from",
"a",
"matched",
"rule",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L955-L966 | train | 224,762 |
google/textfsm | textfsm/parser.py | TextFSM._Operations | def _Operations(self, rule, line):
"""Operators on the data record.
Operators come in two parts and are a '.' separated pair:
Operators that effect the input line or the current state (line_op).
'Next' Get next input line and restart parsing (default).
'Continue' Keep current input line and continue resume parsing.
'Error' Unrecoverable input discard result and raise Error.
Operators that affect the record being built for output (record_op).
'NoRecord' Does nothing (default)
'Record' Adds the current record to the result.
'Clear' Clears non-Filldown data from the record.
'Clearall' Clears all data from the record.
Args:
rule: FSMRule object.
line: A string, the current input line.
Returns:
True if state machine should restart state with new line.
Raises:
TextFSMError: If Error state is encountered.
"""
# First process the Record operators.
if rule.record_op == 'Record':
self._AppendRecord()
elif rule.record_op == 'Clear':
# Clear record.
self._ClearRecord()
elif rule.record_op == 'Clearall':
# Clear all record entries.
self._ClearAllRecord()
# Lastly process line operators.
if rule.line_op == 'Error':
if rule.new_state:
raise TextFSMError('Error: %s. Rule Line: %s. Input Line: %s.'
% (rule.new_state, rule.line_num, line))
raise TextFSMError('State Error raised. Rule Line: %s. Input Line: %s'
% (rule.line_num, line))
elif rule.line_op == 'Continue':
# Continue with current line without returning to the start of the state.
return False
# Back to start of current state with a new line.
return True | python | def _Operations(self, rule, line):
"""Operators on the data record.
Operators come in two parts and are a '.' separated pair:
Operators that effect the input line or the current state (line_op).
'Next' Get next input line and restart parsing (default).
'Continue' Keep current input line and continue resume parsing.
'Error' Unrecoverable input discard result and raise Error.
Operators that affect the record being built for output (record_op).
'NoRecord' Does nothing (default)
'Record' Adds the current record to the result.
'Clear' Clears non-Filldown data from the record.
'Clearall' Clears all data from the record.
Args:
rule: FSMRule object.
line: A string, the current input line.
Returns:
True if state machine should restart state with new line.
Raises:
TextFSMError: If Error state is encountered.
"""
# First process the Record operators.
if rule.record_op == 'Record':
self._AppendRecord()
elif rule.record_op == 'Clear':
# Clear record.
self._ClearRecord()
elif rule.record_op == 'Clearall':
# Clear all record entries.
self._ClearAllRecord()
# Lastly process line operators.
if rule.line_op == 'Error':
if rule.new_state:
raise TextFSMError('Error: %s. Rule Line: %s. Input Line: %s.'
% (rule.new_state, rule.line_num, line))
raise TextFSMError('State Error raised. Rule Line: %s. Input Line: %s'
% (rule.line_num, line))
elif rule.line_op == 'Continue':
# Continue with current line without returning to the start of the state.
return False
# Back to start of current state with a new line.
return True | [
"def",
"_Operations",
"(",
"self",
",",
"rule",
",",
"line",
")",
":",
"# First process the Record operators.",
"if",
"rule",
".",
"record_op",
"==",
"'Record'",
":",
"self",
".",
"_AppendRecord",
"(",
")",
"elif",
"rule",
".",
"record_op",
"==",
"'Clear'",
... | Operators on the data record.
Operators come in two parts and are a '.' separated pair:
Operators that effect the input line or the current state (line_op).
'Next' Get next input line and restart parsing (default).
'Continue' Keep current input line and continue resume parsing.
'Error' Unrecoverable input discard result and raise Error.
Operators that affect the record being built for output (record_op).
'NoRecord' Does nothing (default)
'Record' Adds the current record to the result.
'Clear' Clears non-Filldown data from the record.
'Clearall' Clears all data from the record.
Args:
rule: FSMRule object.
line: A string, the current input line.
Returns:
True if state machine should restart state with new line.
Raises:
TextFSMError: If Error state is encountered. | [
"Operators",
"on",
"the",
"data",
"record",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L968-L1020 | train | 224,763 |
google/textfsm | textfsm/parser.py | TextFSM.GetValuesByAttrib | def GetValuesByAttrib(self, attribute):
"""Returns the list of values that have a particular attribute."""
if attribute not in self._options_cls.ValidOptions():
raise ValueError("'%s': Not a valid attribute." % attribute)
result = []
for value in self.values:
if attribute in value.OptionNames():
result.append(value.name)
return result | python | def GetValuesByAttrib(self, attribute):
"""Returns the list of values that have a particular attribute."""
if attribute not in self._options_cls.ValidOptions():
raise ValueError("'%s': Not a valid attribute." % attribute)
result = []
for value in self.values:
if attribute in value.OptionNames():
result.append(value.name)
return result | [
"def",
"GetValuesByAttrib",
"(",
"self",
",",
"attribute",
")",
":",
"if",
"attribute",
"not",
"in",
"self",
".",
"_options_cls",
".",
"ValidOptions",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"'%s': Not a valid attribute.\"",
"%",
"attribute",
")",
"result",... | Returns the list of values that have a particular attribute. | [
"Returns",
"the",
"list",
"of",
"values",
"that",
"have",
"a",
"particular",
"attribute",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/parser.py#L1030-L1041 | train | 224,764 |
google/textfsm | textfsm/terminal.py | _AnsiCmd | def _AnsiCmd(command_list):
"""Takes a list of SGR values and formats them as an ANSI escape sequence.
Args:
command_list: List of strings, each string represents an SGR value.
e.g. 'fg_blue', 'bg_yellow'
Returns:
The ANSI escape sequence.
Raises:
ValueError: if a member of command_list does not map to a valid SGR value.
"""
if not isinstance(command_list, list):
raise ValueError('Invalid list: %s' % command_list)
# Checks that entries are valid SGR names.
# No checking is done for sequences that are correct but 'nonsensical'.
for sgr in command_list:
if sgr.lower() not in SGR:
raise ValueError('Invalid or unsupported SGR name: %s' % sgr)
# Convert to numerical strings.
command_str = [str(SGR[x.lower()]) for x in command_list]
# Wrap values in Ansi escape sequence (CSI prefix & SGR suffix).
return '\033[%sm' % (';'.join(command_str)) | python | def _AnsiCmd(command_list):
"""Takes a list of SGR values and formats them as an ANSI escape sequence.
Args:
command_list: List of strings, each string represents an SGR value.
e.g. 'fg_blue', 'bg_yellow'
Returns:
The ANSI escape sequence.
Raises:
ValueError: if a member of command_list does not map to a valid SGR value.
"""
if not isinstance(command_list, list):
raise ValueError('Invalid list: %s' % command_list)
# Checks that entries are valid SGR names.
# No checking is done for sequences that are correct but 'nonsensical'.
for sgr in command_list:
if sgr.lower() not in SGR:
raise ValueError('Invalid or unsupported SGR name: %s' % sgr)
# Convert to numerical strings.
command_str = [str(SGR[x.lower()]) for x in command_list]
# Wrap values in Ansi escape sequence (CSI prefix & SGR suffix).
return '\033[%sm' % (';'.join(command_str)) | [
"def",
"_AnsiCmd",
"(",
"command_list",
")",
":",
"if",
"not",
"isinstance",
"(",
"command_list",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid list: %s'",
"%",
"command_list",
")",
"# Checks that entries are valid SGR names.",
"# No checking is done for ... | Takes a list of SGR values and formats them as an ANSI escape sequence.
Args:
command_list: List of strings, each string represents an SGR value.
e.g. 'fg_blue', 'bg_yellow'
Returns:
The ANSI escape sequence.
Raises:
ValueError: if a member of command_list does not map to a valid SGR value. | [
"Takes",
"a",
"list",
"of",
"SGR",
"values",
"and",
"formats",
"them",
"as",
"an",
"ANSI",
"escape",
"sequence",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/terminal.py#L115-L138 | train | 224,765 |
google/textfsm | textfsm/terminal.py | TerminalSize | def TerminalSize():
"""Returns terminal length and width as a tuple."""
try:
with open(os.ctermid(), 'r') as tty_instance:
length_width = struct.unpack(
'hh', fcntl.ioctl(tty_instance.fileno(), termios.TIOCGWINSZ, '1234'))
except (IOError, OSError):
try:
length_width = (int(os.environ['LINES']),
int(os.environ['COLUMNS']))
except (ValueError, KeyError):
length_width = (24, 80)
return length_width | python | def TerminalSize():
"""Returns terminal length and width as a tuple."""
try:
with open(os.ctermid(), 'r') as tty_instance:
length_width = struct.unpack(
'hh', fcntl.ioctl(tty_instance.fileno(), termios.TIOCGWINSZ, '1234'))
except (IOError, OSError):
try:
length_width = (int(os.environ['LINES']),
int(os.environ['COLUMNS']))
except (ValueError, KeyError):
length_width = (24, 80)
return length_width | [
"def",
"TerminalSize",
"(",
")",
":",
"try",
":",
"with",
"open",
"(",
"os",
".",
"ctermid",
"(",
")",
",",
"'r'",
")",
"as",
"tty_instance",
":",
"length_width",
"=",
"struct",
".",
"unpack",
"(",
"'hh'",
",",
"fcntl",
".",
"ioctl",
"(",
"tty_instan... | Returns terminal length and width as a tuple. | [
"Returns",
"terminal",
"length",
"and",
"width",
"as",
"a",
"tuple",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/terminal.py#L170-L182 | train | 224,766 |
google/textfsm | textfsm/terminal.py | main | def main(argv=None):
"""Routine to page text or determine window size via command line."""
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], 'dhs', ['nodelay', 'help', 'size'])
except getopt.error as msg:
raise Usage(msg)
# Print usage and return, regardless of presence of other args.
for opt, _ in opts:
if opt in ('-h', '--help'):
print(__doc__)
print(help_msg)
return 0
isdelay = False
for opt, _ in opts:
# Prints the size of the terminal and returns.
# Mutually exclusive to the paging of text and overrides that behaviour.
if opt in ('-s', '--size'):
print('Length: %d, Width: %d' % TerminalSize())
return 0
elif opt in ('-d', '--delay'):
isdelay = True
else:
raise Usage('Invalid arguments.')
# Page text supplied in either specified file or stdin.
if len(args) == 1:
with open(args[0]) as f:
fd = f.read()
else:
fd = sys.stdin.read()
Pager(fd, delay=isdelay).Page() | python | def main(argv=None):
"""Routine to page text or determine window size via command line."""
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], 'dhs', ['nodelay', 'help', 'size'])
except getopt.error as msg:
raise Usage(msg)
# Print usage and return, regardless of presence of other args.
for opt, _ in opts:
if opt in ('-h', '--help'):
print(__doc__)
print(help_msg)
return 0
isdelay = False
for opt, _ in opts:
# Prints the size of the terminal and returns.
# Mutually exclusive to the paging of text and overrides that behaviour.
if opt in ('-s', '--size'):
print('Length: %d, Width: %d' % TerminalSize())
return 0
elif opt in ('-d', '--delay'):
isdelay = True
else:
raise Usage('Invalid arguments.')
# Page text supplied in either specified file or stdin.
if len(args) == 1:
with open(args[0]) as f:
fd = f.read()
else:
fd = sys.stdin.read()
Pager(fd, delay=isdelay).Page() | [
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"if",
"argv",
"is",
"None",
":",
"argv",
"=",
"sys",
".",
"argv",
"try",
":",
"opts",
",",
"args",
"=",
"getopt",
".",
"getopt",
"(",
"argv",
"[",
"1",
":",
"]",
",",
"'dhs'",
",",
"[",
"'nod... | Routine to page text or determine window size via command line. | [
"Routine",
"to",
"page",
"text",
"or",
"determine",
"window",
"size",
"via",
"command",
"line",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/terminal.py#L447-L484 | train | 224,767 |
google/textfsm | textfsm/terminal.py | Pager.Reset | def Reset(self):
"""Reset the pager to the top of the text."""
self._displayed = 0
self._currentpagelines = 0
self._lastscroll = 1
self._lines_to_show = self._cli_lines | python | def Reset(self):
"""Reset the pager to the top of the text."""
self._displayed = 0
self._currentpagelines = 0
self._lastscroll = 1
self._lines_to_show = self._cli_lines | [
"def",
"Reset",
"(",
"self",
")",
":",
"self",
".",
"_displayed",
"=",
"0",
"self",
".",
"_currentpagelines",
"=",
"0",
"self",
".",
"_lastscroll",
"=",
"1",
"self",
".",
"_lines_to_show",
"=",
"self",
".",
"_cli_lines"
] | Reset the pager to the top of the text. | [
"Reset",
"the",
"pager",
"to",
"the",
"top",
"of",
"the",
"text",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/terminal.py#L302-L307 | train | 224,768 |
google/textfsm | textfsm/terminal.py | Pager.SetLines | def SetLines(self, lines):
"""Set number of screen lines.
Args:
lines: An int, number of lines. If None, use terminal dimensions.
Raises:
ValueError, TypeError: Not a valid integer representation.
"""
(self._cli_lines, self._cli_cols) = TerminalSize()
if lines:
self._cli_lines = int(lines) | python | def SetLines(self, lines):
"""Set number of screen lines.
Args:
lines: An int, number of lines. If None, use terminal dimensions.
Raises:
ValueError, TypeError: Not a valid integer representation.
"""
(self._cli_lines, self._cli_cols) = TerminalSize()
if lines:
self._cli_lines = int(lines) | [
"def",
"SetLines",
"(",
"self",
",",
"lines",
")",
":",
"(",
"self",
".",
"_cli_lines",
",",
"self",
".",
"_cli_cols",
")",
"=",
"TerminalSize",
"(",
")",
"if",
"lines",
":",
"self",
".",
"_cli_lines",
"=",
"int",
"(",
"lines",
")"
] | Set number of screen lines.
Args:
lines: An int, number of lines. If None, use terminal dimensions.
Raises:
ValueError, TypeError: Not a valid integer representation. | [
"Set",
"number",
"of",
"screen",
"lines",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/terminal.py#L309-L322 | train | 224,769 |
google/textfsm | textfsm/terminal.py | Pager.Page | def Page(self, text=None, show_percent=None):
"""Page text.
Continues to page through any text supplied in the constructor. Also, any
text supplied to this method will be appended to the total text to be
displayed. The method returns when all available text has been displayed to
the user, or the user quits the pager.
Args:
text: A string, extra text to be paged.
show_percent: A boolean, if True, indicate how much is displayed so far.
If None, this behaviour is 'text is None'.
Returns:
A boolean. If True, more data can be displayed to the user. False
implies that the user has quit the pager.
"""
if text is not None:
self._text += text
if show_percent is None:
show_percent = text is None
self._show_percent = show_percent
text = LineWrap(self._text).splitlines()
while True:
# Get a list of new lines to display.
self._newlines = text[self._displayed:self._displayed+self._lines_to_show]
for line in self._newlines:
sys.stdout.write(line + '\n')
if self._delay and self._lastscroll > 0:
time.sleep(0.005)
self._displayed += len(self._newlines)
self._currentpagelines += len(self._newlines)
if self._currentpagelines >= self._lines_to_show:
self._currentpagelines = 0
wish = self._AskUser()
if wish == 'q': # Quit pager.
return False
elif wish == 'g': # Display till the end.
self._Scroll(len(text) - self._displayed + 1)
elif wish == '\r': # Enter, down a line.
self._Scroll(1)
elif wish == '\033[B': # Down arrow, down a line.
self._Scroll(1)
elif wish == '\033[A': # Up arrow, up a line.
self._Scroll(-1)
elif wish == 'b': # Up a page.
self._Scroll(0 - self._cli_lines)
else: # Next page.
self._Scroll()
if self._displayed >= len(text):
break
return True | python | def Page(self, text=None, show_percent=None):
"""Page text.
Continues to page through any text supplied in the constructor. Also, any
text supplied to this method will be appended to the total text to be
displayed. The method returns when all available text has been displayed to
the user, or the user quits the pager.
Args:
text: A string, extra text to be paged.
show_percent: A boolean, if True, indicate how much is displayed so far.
If None, this behaviour is 'text is None'.
Returns:
A boolean. If True, more data can be displayed to the user. False
implies that the user has quit the pager.
"""
if text is not None:
self._text += text
if show_percent is None:
show_percent = text is None
self._show_percent = show_percent
text = LineWrap(self._text).splitlines()
while True:
# Get a list of new lines to display.
self._newlines = text[self._displayed:self._displayed+self._lines_to_show]
for line in self._newlines:
sys.stdout.write(line + '\n')
if self._delay and self._lastscroll > 0:
time.sleep(0.005)
self._displayed += len(self._newlines)
self._currentpagelines += len(self._newlines)
if self._currentpagelines >= self._lines_to_show:
self._currentpagelines = 0
wish = self._AskUser()
if wish == 'q': # Quit pager.
return False
elif wish == 'g': # Display till the end.
self._Scroll(len(text) - self._displayed + 1)
elif wish == '\r': # Enter, down a line.
self._Scroll(1)
elif wish == '\033[B': # Down arrow, down a line.
self._Scroll(1)
elif wish == '\033[A': # Up arrow, up a line.
self._Scroll(-1)
elif wish == 'b': # Up a page.
self._Scroll(0 - self._cli_lines)
else: # Next page.
self._Scroll()
if self._displayed >= len(text):
break
return True | [
"def",
"Page",
"(",
"self",
",",
"text",
"=",
"None",
",",
"show_percent",
"=",
"None",
")",
":",
"if",
"text",
"is",
"not",
"None",
":",
"self",
".",
"_text",
"+=",
"text",
"if",
"show_percent",
"is",
"None",
":",
"show_percent",
"=",
"text",
"is",
... | Page text.
Continues to page through any text supplied in the constructor. Also, any
text supplied to this method will be appended to the total text to be
displayed. The method returns when all available text has been displayed to
the user, or the user quits the pager.
Args:
text: A string, extra text to be paged.
show_percent: A boolean, if True, indicate how much is displayed so far.
If None, this behaviour is 'text is None'.
Returns:
A boolean. If True, more data can be displayed to the user. False
implies that the user has quit the pager. | [
"Page",
"text",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/terminal.py#L329-L383 | train | 224,770 |
google/textfsm | textfsm/terminal.py | Pager._Scroll | def _Scroll(self, lines=None):
"""Set attributes to scroll the buffer correctly.
Args:
lines: An int, number of lines to scroll. If None, scrolls
by the terminal length.
"""
if lines is None:
lines = self._cli_lines
if lines < 0:
self._displayed -= self._cli_lines
self._displayed += lines
if self._displayed < 0:
self._displayed = 0
self._lines_to_show = self._cli_lines
else:
self._lines_to_show = lines
self._lastscroll = lines | python | def _Scroll(self, lines=None):
"""Set attributes to scroll the buffer correctly.
Args:
lines: An int, number of lines to scroll. If None, scrolls
by the terminal length.
"""
if lines is None:
lines = self._cli_lines
if lines < 0:
self._displayed -= self._cli_lines
self._displayed += lines
if self._displayed < 0:
self._displayed = 0
self._lines_to_show = self._cli_lines
else:
self._lines_to_show = lines
self._lastscroll = lines | [
"def",
"_Scroll",
"(",
"self",
",",
"lines",
"=",
"None",
")",
":",
"if",
"lines",
"is",
"None",
":",
"lines",
"=",
"self",
".",
"_cli_lines",
"if",
"lines",
"<",
"0",
":",
"self",
".",
"_displayed",
"-=",
"self",
".",
"_cli_lines",
"self",
".",
"_... | Set attributes to scroll the buffer correctly.
Args:
lines: An int, number of lines to scroll. If None, scrolls
by the terminal length. | [
"Set",
"attributes",
"to",
"scroll",
"the",
"buffer",
"correctly",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/terminal.py#L385-L404 | train | 224,771 |
google/textfsm | textfsm/terminal.py | Pager._AskUser | def _AskUser(self):
"""Prompt the user for the next action.
Returns:
A string, the character entered by the user.
"""
if self._show_percent:
progress = int(self._displayed*100 / (len(self._text.splitlines())))
progress_text = ' (%d%%)' % progress
else:
progress_text = ''
question = AnsiText(
'Enter: next line, Space: next page, '
'b: prev page, q: quit.%s' %
progress_text, ['green'])
sys.stdout.write(question)
sys.stdout.flush()
ch = self._GetCh()
sys.stdout.write('\r%s\r' % (' '*len(question)))
sys.stdout.flush()
return ch | python | def _AskUser(self):
"""Prompt the user for the next action.
Returns:
A string, the character entered by the user.
"""
if self._show_percent:
progress = int(self._displayed*100 / (len(self._text.splitlines())))
progress_text = ' (%d%%)' % progress
else:
progress_text = ''
question = AnsiText(
'Enter: next line, Space: next page, '
'b: prev page, q: quit.%s' %
progress_text, ['green'])
sys.stdout.write(question)
sys.stdout.flush()
ch = self._GetCh()
sys.stdout.write('\r%s\r' % (' '*len(question)))
sys.stdout.flush()
return ch | [
"def",
"_AskUser",
"(",
"self",
")",
":",
"if",
"self",
".",
"_show_percent",
":",
"progress",
"=",
"int",
"(",
"self",
".",
"_displayed",
"*",
"100",
"/",
"(",
"len",
"(",
"self",
".",
"_text",
".",
"splitlines",
"(",
")",
")",
")",
")",
"progress... | Prompt the user for the next action.
Returns:
A string, the character entered by the user. | [
"Prompt",
"the",
"user",
"for",
"the",
"next",
"action",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/terminal.py#L406-L426 | train | 224,772 |
google/textfsm | textfsm/terminal.py | Pager._GetCh | def _GetCh(self):
"""Read a single character from the user.
Returns:
A string, the character read.
"""
fd = self._tty.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = self._tty.read(1)
# Also support arrow key shortcuts (escape + 2 chars)
if ord(ch) == 27:
ch += self._tty.read(2)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
return ch | python | def _GetCh(self):
"""Read a single character from the user.
Returns:
A string, the character read.
"""
fd = self._tty.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = self._tty.read(1)
# Also support arrow key shortcuts (escape + 2 chars)
if ord(ch) == 27:
ch += self._tty.read(2)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
return ch | [
"def",
"_GetCh",
"(",
"self",
")",
":",
"fd",
"=",
"self",
".",
"_tty",
".",
"fileno",
"(",
")",
"old",
"=",
"termios",
".",
"tcgetattr",
"(",
"fd",
")",
"try",
":",
"tty",
".",
"setraw",
"(",
"fd",
")",
"ch",
"=",
"self",
".",
"_tty",
".",
"... | Read a single character from the user.
Returns:
A string, the character read. | [
"Read",
"a",
"single",
"character",
"from",
"the",
"user",
"."
] | 63a2aaece33e07947aa80963dca99b893964633b | https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/terminal.py#L428-L444 | train | 224,773 |
skyfielders/python-skyfield | skyfield/relativity.py | add_deflection | def add_deflection(position, observer, ephemeris, t,
include_earth_deflection, count=3):
"""Update `position` for how solar system masses will deflect its light.
Given the ICRS `position` [x,y,z] of an object (au) that is being
viewed from the `observer` also expressed as [x,y,z], and given an
ephemeris that can be used to determine solar system body positions,
and given the time `t` and Boolean `apply_earth` indicating whether
to worry about the effect of Earth's mass, and a `count` of how many
major solar system bodies to worry about, this function updates
`position` in-place to show how the masses in the solar system will
deflect its image.
"""
# Compute light-time to observed object.
tlt = length_of(position) / C_AUDAY
# Cycle through gravitating bodies.
jd_tdb = t.tdb
ts = t.ts
for name in deflectors[:count]:
try:
deflector = ephemeris[name]
except KeyError:
deflector = ephemeris[name + ' barycenter']
# Get position of gravitating body wrt ss barycenter at time 't_tdb'.
bposition = deflector.at(ts.tdb(jd=jd_tdb)).position.au # TODO
# Get position of gravitating body wrt observer at time 'jd_tdb'.
gpv = bposition - observer
# Compute light-time from point on incoming light ray that is closest
# to gravitating body.
dlt = light_time_difference(position, gpv)
# Get position of gravitating body wrt ss barycenter at time when
# incoming photons were closest to it.
tclose = jd_tdb
# if dlt > 0.0:
# tclose = jd - dlt
tclose = where(dlt > 0.0, jd_tdb - dlt, tclose)
tclose = where(tlt < dlt, jd_tdb - tlt, tclose)
# if tlt < dlt:
# tclose = jd - tlt
bposition = deflector.at(ts.tdb(jd=tclose)).position.au # TODO
rmass = rmasses[name]
_add_deflection(position, observer, bposition, rmass)
# If observer is not at geocenter, add in deflection due to Earth.
if include_earth_deflection.any():
deflector = ephemeris['earth']
bposition = deflector.at(ts.tdb(jd=tclose)).position.au # TODO
rmass = rmasses['earth']
# TODO: Make the following code less messy, maybe by having
# _add_deflection() return a new vector instead of modifying the
# old one in-place.
deflected_position = position.copy()
_add_deflection(deflected_position, observer, bposition, rmass)
if include_earth_deflection.shape:
position[:,include_earth_deflection] = (
deflected_position[:,include_earth_deflection])
else:
position[:] = deflected_position[:] | python | def add_deflection(position, observer, ephemeris, t,
include_earth_deflection, count=3):
"""Update `position` for how solar system masses will deflect its light.
Given the ICRS `position` [x,y,z] of an object (au) that is being
viewed from the `observer` also expressed as [x,y,z], and given an
ephemeris that can be used to determine solar system body positions,
and given the time `t` and Boolean `apply_earth` indicating whether
to worry about the effect of Earth's mass, and a `count` of how many
major solar system bodies to worry about, this function updates
`position` in-place to show how the masses in the solar system will
deflect its image.
"""
# Compute light-time to observed object.
tlt = length_of(position) / C_AUDAY
# Cycle through gravitating bodies.
jd_tdb = t.tdb
ts = t.ts
for name in deflectors[:count]:
try:
deflector = ephemeris[name]
except KeyError:
deflector = ephemeris[name + ' barycenter']
# Get position of gravitating body wrt ss barycenter at time 't_tdb'.
bposition = deflector.at(ts.tdb(jd=jd_tdb)).position.au # TODO
# Get position of gravitating body wrt observer at time 'jd_tdb'.
gpv = bposition - observer
# Compute light-time from point on incoming light ray that is closest
# to gravitating body.
dlt = light_time_difference(position, gpv)
# Get position of gravitating body wrt ss barycenter at time when
# incoming photons were closest to it.
tclose = jd_tdb
# if dlt > 0.0:
# tclose = jd - dlt
tclose = where(dlt > 0.0, jd_tdb - dlt, tclose)
tclose = where(tlt < dlt, jd_tdb - tlt, tclose)
# if tlt < dlt:
# tclose = jd - tlt
bposition = deflector.at(ts.tdb(jd=tclose)).position.au # TODO
rmass = rmasses[name]
_add_deflection(position, observer, bposition, rmass)
# If observer is not at geocenter, add in deflection due to Earth.
if include_earth_deflection.any():
deflector = ephemeris['earth']
bposition = deflector.at(ts.tdb(jd=tclose)).position.au # TODO
rmass = rmasses['earth']
# TODO: Make the following code less messy, maybe by having
# _add_deflection() return a new vector instead of modifying the
# old one in-place.
deflected_position = position.copy()
_add_deflection(deflected_position, observer, bposition, rmass)
if include_earth_deflection.shape:
position[:,include_earth_deflection] = (
deflected_position[:,include_earth_deflection])
else:
position[:] = deflected_position[:] | [
"def",
"add_deflection",
"(",
"position",
",",
"observer",
",",
"ephemeris",
",",
"t",
",",
"include_earth_deflection",
",",
"count",
"=",
"3",
")",
":",
"# Compute light-time to observed object.",
"tlt",
"=",
"length_of",
"(",
"position",
")",
"/",
"C_AUDAY",
"... | Update `position` for how solar system masses will deflect its light.
Given the ICRS `position` [x,y,z] of an object (au) that is being
viewed from the `observer` also expressed as [x,y,z], and given an
ephemeris that can be used to determine solar system body positions,
and given the time `t` and Boolean `apply_earth` indicating whether
to worry about the effect of Earth's mass, and a `count` of how many
major solar system bodies to worry about, this function updates
`position` in-place to show how the masses in the solar system will
deflect its image. | [
"Update",
"position",
"for",
"how",
"solar",
"system",
"masses",
"will",
"deflect",
"its",
"light",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/relativity.py#L23-L97 | train | 224,774 |
skyfielders/python-skyfield | skyfield/relativity.py | _add_deflection | def _add_deflection(position, observer, deflector, rmass):
"""Correct a position vector for how one particular mass deflects light.
Given the ICRS `position` [x,y,z] of an object (AU) together with
the positions of an `observer` and a `deflector` of reciprocal mass
`rmass`, this function updates `position` in-place to show how much
the presence of the deflector will deflect the image of the object.
"""
# Construct vector 'pq' from gravitating body to observed object and
# construct vector 'pe' from gravitating body to observer.
pq = observer + position - deflector
pe = observer - deflector
# Compute vector magnitudes and unit vectors.
pmag = length_of(position)
qmag = length_of(pq)
emag = length_of(pe)
phat = position / where(pmag, pmag, 1.0) # where() avoids divide-by-zero
qhat = pq / where(qmag, qmag, 1.0)
ehat = pe / where(emag, emag, 1.0)
# Compute dot products of vectors.
pdotq = dots(phat, qhat)
qdote = dots(qhat, ehat)
edotp = dots(ehat, phat)
# If gravitating body is observed object, or is on a straight line
# toward or away from observed object to within 1 arcsec, deflection
# is set to zero set 'pos2' equal to 'pos1'.
make_no_correction = abs(edotp) > 0.99999999999
# Compute scalar factors.
fac1 = 2.0 * GS / (C * C * emag * AU_M * rmass)
fac2 = 1.0 + qdote
# Correct position vector.
position += where(make_no_correction, 0.0,
fac1 * (pdotq * ehat - edotp * qhat) / fac2 * pmag) | python | def _add_deflection(position, observer, deflector, rmass):
"""Correct a position vector for how one particular mass deflects light.
Given the ICRS `position` [x,y,z] of an object (AU) together with
the positions of an `observer` and a `deflector` of reciprocal mass
`rmass`, this function updates `position` in-place to show how much
the presence of the deflector will deflect the image of the object.
"""
# Construct vector 'pq' from gravitating body to observed object and
# construct vector 'pe' from gravitating body to observer.
pq = observer + position - deflector
pe = observer - deflector
# Compute vector magnitudes and unit vectors.
pmag = length_of(position)
qmag = length_of(pq)
emag = length_of(pe)
phat = position / where(pmag, pmag, 1.0) # where() avoids divide-by-zero
qhat = pq / where(qmag, qmag, 1.0)
ehat = pe / where(emag, emag, 1.0)
# Compute dot products of vectors.
pdotq = dots(phat, qhat)
qdote = dots(qhat, ehat)
edotp = dots(ehat, phat)
# If gravitating body is observed object, or is on a straight line
# toward or away from observed object to within 1 arcsec, deflection
# is set to zero set 'pos2' equal to 'pos1'.
make_no_correction = abs(edotp) > 0.99999999999
# Compute scalar factors.
fac1 = 2.0 * GS / (C * C * emag * AU_M * rmass)
fac2 = 1.0 + qdote
# Correct position vector.
position += where(make_no_correction, 0.0,
fac1 * (pdotq * ehat - edotp * qhat) / fac2 * pmag) | [
"def",
"_add_deflection",
"(",
"position",
",",
"observer",
",",
"deflector",
",",
"rmass",
")",
":",
"# Construct vector 'pq' from gravitating body to observed object and",
"# construct vector 'pe' from gravitating body to observer.",
"pq",
"=",
"observer",
"+",
"position",
"-... | Correct a position vector for how one particular mass deflects light.
Given the ICRS `position` [x,y,z] of an object (AU) together with
the positions of an `observer` and a `deflector` of reciprocal mass
`rmass`, this function updates `position` in-place to show how much
the presence of the deflector will deflect the image of the object. | [
"Correct",
"a",
"position",
"vector",
"for",
"how",
"one",
"particular",
"mass",
"deflects",
"light",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/relativity.py#L121-L166 | train | 224,775 |
skyfielders/python-skyfield | skyfield/relativity.py | add_aberration | def add_aberration(position, velocity, light_time):
"""Correct a relative position vector for aberration of light.
Given the relative `position` [x,y,z] of an object (AU) from a
particular observer, the `velocity` [dx,dy,dz] at which the observer
is traveling (AU/day), and the light propagation delay `light_time`
to the object (days), this function updates `position` in-place to
give the object's apparent position due to the aberration of light.
"""
p1mag = light_time * C_AUDAY
vemag = length_of(velocity)
beta = vemag / C_AUDAY
dot = dots(position, velocity)
cosd = dot / (p1mag * vemag)
gammai = sqrt(1.0 - beta * beta)
p = beta * cosd
q = (1.0 + p / (1.0 + gammai)) * light_time
r = 1.0 + p
position *= gammai
position += q * velocity
position /= r | python | def add_aberration(position, velocity, light_time):
"""Correct a relative position vector for aberration of light.
Given the relative `position` [x,y,z] of an object (AU) from a
particular observer, the `velocity` [dx,dy,dz] at which the observer
is traveling (AU/day), and the light propagation delay `light_time`
to the object (days), this function updates `position` in-place to
give the object's apparent position due to the aberration of light.
"""
p1mag = light_time * C_AUDAY
vemag = length_of(velocity)
beta = vemag / C_AUDAY
dot = dots(position, velocity)
cosd = dot / (p1mag * vemag)
gammai = sqrt(1.0 - beta * beta)
p = beta * cosd
q = (1.0 + p / (1.0 + gammai)) * light_time
r = 1.0 + p
position *= gammai
position += q * velocity
position /= r | [
"def",
"add_aberration",
"(",
"position",
",",
"velocity",
",",
"light_time",
")",
":",
"p1mag",
"=",
"light_time",
"*",
"C_AUDAY",
"vemag",
"=",
"length_of",
"(",
"velocity",
")",
"beta",
"=",
"vemag",
"/",
"C_AUDAY",
"dot",
"=",
"dots",
"(",
"position",
... | Correct a relative position vector for aberration of light.
Given the relative `position` [x,y,z] of an object (AU) from a
particular observer, the `velocity` [dx,dy,dz] at which the observer
is traveling (AU/day), and the light propagation delay `light_time`
to the object (days), this function updates `position` in-place to
give the object's apparent position due to the aberration of light. | [
"Correct",
"a",
"relative",
"position",
"vector",
"for",
"aberration",
"of",
"light",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/relativity.py#L170-L193 | train | 224,776 |
skyfielders/python-skyfield | skyfield/jpllib.py | _center | def _center(code, segment_dict):
"""Starting with `code`, follow segments from target to center."""
while code in segment_dict:
segment = segment_dict[code]
yield segment
code = segment.center | python | def _center(code, segment_dict):
"""Starting with `code`, follow segments from target to center."""
while code in segment_dict:
segment = segment_dict[code]
yield segment
code = segment.center | [
"def",
"_center",
"(",
"code",
",",
"segment_dict",
")",
":",
"while",
"code",
"in",
"segment_dict",
":",
"segment",
"=",
"segment_dict",
"[",
"code",
"]",
"yield",
"segment",
"code",
"=",
"segment",
".",
"center"
] | Starting with `code`, follow segments from target to center. | [
"Starting",
"with",
"code",
"follow",
"segments",
"from",
"target",
"to",
"center",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/jpllib.py#L217-L222 | train | 224,777 |
skyfielders/python-skyfield | skyfield/jpllib.py | SpiceKernel.names | def names(self):
"""Return all target names that are valid with this kernel.
>>> pprint(planets.names())
{0: ['SOLAR_SYSTEM_BARYCENTER', 'SSB', 'SOLAR SYSTEM BARYCENTER'],
1: ['MERCURY_BARYCENTER', 'MERCURY BARYCENTER'],
2: ['VENUS_BARYCENTER', 'VENUS BARYCENTER'],
3: ['EARTH_BARYCENTER',
'EMB',
...
The result is a dictionary with target code keys and name lists
as values. The last name in each list is the one that Skyfield
uses when printing information about a body.
"""
d = defaultdict(list)
for code, name in target_name_pairs:
if code in self.codes:
d[code].append(name)
return dict(d) | python | def names(self):
"""Return all target names that are valid with this kernel.
>>> pprint(planets.names())
{0: ['SOLAR_SYSTEM_BARYCENTER', 'SSB', 'SOLAR SYSTEM BARYCENTER'],
1: ['MERCURY_BARYCENTER', 'MERCURY BARYCENTER'],
2: ['VENUS_BARYCENTER', 'VENUS BARYCENTER'],
3: ['EARTH_BARYCENTER',
'EMB',
...
The result is a dictionary with target code keys and name lists
as values. The last name in each list is the one that Skyfield
uses when printing information about a body.
"""
d = defaultdict(list)
for code, name in target_name_pairs:
if code in self.codes:
d[code].append(name)
return dict(d) | [
"def",
"names",
"(",
"self",
")",
":",
"d",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"code",
",",
"name",
"in",
"target_name_pairs",
":",
"if",
"code",
"in",
"self",
".",
"codes",
":",
"d",
"[",
"code",
"]",
".",
"append",
"(",
"name",
")",
"... | Return all target names that are valid with this kernel.
>>> pprint(planets.names())
{0: ['SOLAR_SYSTEM_BARYCENTER', 'SSB', 'SOLAR SYSTEM BARYCENTER'],
1: ['MERCURY_BARYCENTER', 'MERCURY BARYCENTER'],
2: ['VENUS_BARYCENTER', 'VENUS BARYCENTER'],
3: ['EARTH_BARYCENTER',
'EMB',
...
The result is a dictionary with target code keys and name lists
as values. The last name in each list is the one that Skyfield
uses when printing information about a body. | [
"Return",
"all",
"target",
"names",
"that",
"are",
"valid",
"with",
"this",
"kernel",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/jpllib.py#L106-L126 | train | 224,778 |
skyfielders/python-skyfield | skyfield/jpllib.py | SpiceKernel.decode | def decode(self, name):
"""Translate a target name into its integer code.
>>> planets.decode('Venus')
299
Raises ``ValueError`` if you supply an unknown name, or
``KeyError`` if the target is missing from this kernel. You can
supply an integer code if you already have one and just want to
check whether it is present in this kernel.
"""
if isinstance(name, int):
code = name
else:
name = name.upper()
code = _targets.get(name)
if code is None:
raise ValueError('unknown SPICE target {0!r}'.format(name))
if code not in self.codes:
targets = ', '.join(_format_code_and_name(c) for c in self.codes)
raise KeyError('kernel {0!r} is missing {1!r} -'
' the targets it supports are: {2}'
.format(self.filename, name, targets))
return code | python | def decode(self, name):
"""Translate a target name into its integer code.
>>> planets.decode('Venus')
299
Raises ``ValueError`` if you supply an unknown name, or
``KeyError`` if the target is missing from this kernel. You can
supply an integer code if you already have one and just want to
check whether it is present in this kernel.
"""
if isinstance(name, int):
code = name
else:
name = name.upper()
code = _targets.get(name)
if code is None:
raise ValueError('unknown SPICE target {0!r}'.format(name))
if code not in self.codes:
targets = ', '.join(_format_code_and_name(c) for c in self.codes)
raise KeyError('kernel {0!r} is missing {1!r} -'
' the targets it supports are: {2}'
.format(self.filename, name, targets))
return code | [
"def",
"decode",
"(",
"self",
",",
"name",
")",
":",
"if",
"isinstance",
"(",
"name",
",",
"int",
")",
":",
"code",
"=",
"name",
"else",
":",
"name",
"=",
"name",
".",
"upper",
"(",
")",
"code",
"=",
"_targets",
".",
"get",
"(",
"name",
")",
"i... | Translate a target name into its integer code.
>>> planets.decode('Venus')
299
Raises ``ValueError`` if you supply an unknown name, or
``KeyError`` if the target is missing from this kernel. You can
supply an integer code if you already have one and just want to
check whether it is present in this kernel. | [
"Translate",
"a",
"target",
"name",
"into",
"its",
"integer",
"code",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/jpllib.py#L128-L152 | train | 224,779 |
skyfielders/python-skyfield | skyfield/iokit.py | _search | def _search(mapping, filename):
"""Search a Loader data structure for a filename."""
result = mapping.get(filename)
if result is not None:
return result
name, ext = os.path.splitext(filename)
result = mapping.get(ext)
if result is not None:
for pattern, result2 in result:
if fnmatch(filename, pattern):
return result2
return None | python | def _search(mapping, filename):
"""Search a Loader data structure for a filename."""
result = mapping.get(filename)
if result is not None:
return result
name, ext = os.path.splitext(filename)
result = mapping.get(ext)
if result is not None:
for pattern, result2 in result:
if fnmatch(filename, pattern):
return result2
return None | [
"def",
"_search",
"(",
"mapping",
",",
"filename",
")",
":",
"result",
"=",
"mapping",
".",
"get",
"(",
"filename",
")",
"if",
"result",
"is",
"not",
"None",
":",
"return",
"result",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
... | Search a Loader data structure for a filename. | [
"Search",
"a",
"Loader",
"data",
"structure",
"for",
"a",
"filename",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/iokit.py#L288-L299 | train | 224,780 |
skyfielders/python-skyfield | skyfield/iokit.py | load_file | def load_file(path):
"""Open a file on your local drive, using its extension to guess its type.
This routine only works on ``.bsp`` ephemeris files right now, but
will gain support for additional file types in the future. ::
from skyfield.api import load_file
planets = load_file('~/Downloads/de421.bsp')
"""
path = os.path.expanduser(path)
base, ext = os.path.splitext(path)
if ext == '.bsp':
return SpiceKernel(path)
raise ValueError('unrecognized file extension: {}'.format(path)) | python | def load_file(path):
"""Open a file on your local drive, using its extension to guess its type.
This routine only works on ``.bsp`` ephemeris files right now, but
will gain support for additional file types in the future. ::
from skyfield.api import load_file
planets = load_file('~/Downloads/de421.bsp')
"""
path = os.path.expanduser(path)
base, ext = os.path.splitext(path)
if ext == '.bsp':
return SpiceKernel(path)
raise ValueError('unrecognized file extension: {}'.format(path)) | [
"def",
"load_file",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
"base",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"if",
"ext",
"==",
"'.bsp'",
":",
"return",
"SpiceKernel"... | Open a file on your local drive, using its extension to guess its type.
This routine only works on ``.bsp`` ephemeris files right now, but
will gain support for additional file types in the future. ::
from skyfield.api import load_file
planets = load_file('~/Downloads/de421.bsp') | [
"Open",
"a",
"file",
"on",
"your",
"local",
"drive",
"using",
"its",
"extension",
"to",
"guess",
"its",
"type",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/iokit.py#L302-L316 | train | 224,781 |
skyfielders/python-skyfield | skyfield/iokit.py | parse_deltat_data | def parse_deltat_data(fileobj):
"""Parse the United States Naval Observatory ``deltat.data`` file.
Each line file gives the date and the value of Delta T::
2016 2 1 68.1577
This function returns a 2xN array of raw Julian dates and matching
Delta T values.
"""
array = np.loadtxt(fileobj)
year, month, day = array[-1,:3].astype(int)
expiration_date = date(year + 1, month, day)
year, month, day, delta_t = array.T
data = np.array((julian_date(year, month, day), delta_t))
return expiration_date, data | python | def parse_deltat_data(fileobj):
"""Parse the United States Naval Observatory ``deltat.data`` file.
Each line file gives the date and the value of Delta T::
2016 2 1 68.1577
This function returns a 2xN array of raw Julian dates and matching
Delta T values.
"""
array = np.loadtxt(fileobj)
year, month, day = array[-1,:3].astype(int)
expiration_date = date(year + 1, month, day)
year, month, day, delta_t = array.T
data = np.array((julian_date(year, month, day), delta_t))
return expiration_date, data | [
"def",
"parse_deltat_data",
"(",
"fileobj",
")",
":",
"array",
"=",
"np",
".",
"loadtxt",
"(",
"fileobj",
")",
"year",
",",
"month",
",",
"day",
"=",
"array",
"[",
"-",
"1",
",",
":",
"3",
"]",
".",
"astype",
"(",
"int",
")",
"expiration_date",
"="... | Parse the United States Naval Observatory ``deltat.data`` file.
Each line file gives the date and the value of Delta T::
2016 2 1 68.1577
This function returns a 2xN array of raw Julian dates and matching
Delta T values. | [
"Parse",
"the",
"United",
"States",
"Naval",
"Observatory",
"deltat",
".",
"data",
"file",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/iokit.py#L319-L335 | train | 224,782 |
skyfielders/python-skyfield | skyfield/iokit.py | parse_deltat_preds | def parse_deltat_preds(fileobj):
"""Parse the United States Naval Observatory ``deltat.preds`` file.
The old format supplies a floating point year, the value of Delta T,
and one or two other fields::
2015.75 67.97 0.210 0.02
The new format adds a modified Julian day as the first field:
58484.000 2019.00 69.34 -0.152 0.117
This function returns a 2xN array of raw Julian dates and matching
Delta T values.
"""
lines = iter(fileobj)
header = next(lines)
if header.startswith(b'YEAR'):
# Format in use until 2019 February
next(lines) # discard blank line
year_float, delta_t = np.loadtxt(lines, usecols=[0, 1]).T
else:
# Format in use since 2019 February
year_float, delta_t = np.loadtxt(lines, usecols=[1, 2]).T
year = year_float.astype(int)
month = 1 + (year_float * 12.0).astype(int) % 12
expiration_date = date(year[0] + 2, month[0], 1)
data = np.array((julian_date(year, month, 1), delta_t))
return expiration_date, data | python | def parse_deltat_preds(fileobj):
"""Parse the United States Naval Observatory ``deltat.preds`` file.
The old format supplies a floating point year, the value of Delta T,
and one or two other fields::
2015.75 67.97 0.210 0.02
The new format adds a modified Julian day as the first field:
58484.000 2019.00 69.34 -0.152 0.117
This function returns a 2xN array of raw Julian dates and matching
Delta T values.
"""
lines = iter(fileobj)
header = next(lines)
if header.startswith(b'YEAR'):
# Format in use until 2019 February
next(lines) # discard blank line
year_float, delta_t = np.loadtxt(lines, usecols=[0, 1]).T
else:
# Format in use since 2019 February
year_float, delta_t = np.loadtxt(lines, usecols=[1, 2]).T
year = year_float.astype(int)
month = 1 + (year_float * 12.0).astype(int) % 12
expiration_date = date(year[0] + 2, month[0], 1)
data = np.array((julian_date(year, month, 1), delta_t))
return expiration_date, data | [
"def",
"parse_deltat_preds",
"(",
"fileobj",
")",
":",
"lines",
"=",
"iter",
"(",
"fileobj",
")",
"header",
"=",
"next",
"(",
"lines",
")",
"if",
"header",
".",
"startswith",
"(",
"b'YEAR'",
")",
":",
"# Format in use until 2019 February",
"next",
"(",
"line... | Parse the United States Naval Observatory ``deltat.preds`` file.
The old format supplies a floating point year, the value of Delta T,
and one or two other fields::
2015.75 67.97 0.210 0.02
The new format adds a modified Julian day as the first field:
58484.000 2019.00 69.34 -0.152 0.117
This function returns a 2xN array of raw Julian dates and matching
Delta T values. | [
"Parse",
"the",
"United",
"States",
"Naval",
"Observatory",
"deltat",
".",
"preds",
"file",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/iokit.py#L338-L369 | train | 224,783 |
skyfielders/python-skyfield | skyfield/iokit.py | parse_leap_seconds | def parse_leap_seconds(fileobj):
"""Parse the IERS file ``Leap_Second.dat``.
The leap dates array can be searched with::
index = np.searchsorted(leap_dates, jd, 'right')
The resulting index allows (TAI - UTC) to be fetched with::
offset = leap_offsets[index]
"""
lines = iter(fileobj)
for line in lines:
if line.startswith(b'# File expires on'):
break
else:
raise ValueError('Leap_Second.dat is missing its expiration date')
line = line.decode('ascii')
with _lock: # won't help if anyone user threads are doing parsing, alas
original_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C')
try:
dt = datetime.strptime(line, '# File expires on %d %B %Y\n')
finally:
locale.setlocale(locale.LC_ALL, original_locale)
# The file went out of date at the beginning of July 2016, and kept
# downloading every time a user ran a Skyfield program. So we now
# build in a grace period:
grace_period = timedelta(days=30)
expiration_date = dt.date() + grace_period
mjd, day, month, year, offsets = np.loadtxt(lines).T
leap_dates = np.ndarray(len(mjd) + 2)
leap_dates[0] = '-inf'
leap_dates[1:-1] = mjd + 2400000.5
leap_dates[-1] = 'inf'
leap_offsets = np.ndarray(len(mjd) + 2)
leap_offsets[0] = leap_offsets[1] = offsets[0]
leap_offsets[2:] = offsets
return expiration_date, (leap_dates, leap_offsets) | python | def parse_leap_seconds(fileobj):
"""Parse the IERS file ``Leap_Second.dat``.
The leap dates array can be searched with::
index = np.searchsorted(leap_dates, jd, 'right')
The resulting index allows (TAI - UTC) to be fetched with::
offset = leap_offsets[index]
"""
lines = iter(fileobj)
for line in lines:
if line.startswith(b'# File expires on'):
break
else:
raise ValueError('Leap_Second.dat is missing its expiration date')
line = line.decode('ascii')
with _lock: # won't help if anyone user threads are doing parsing, alas
original_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C')
try:
dt = datetime.strptime(line, '# File expires on %d %B %Y\n')
finally:
locale.setlocale(locale.LC_ALL, original_locale)
# The file went out of date at the beginning of July 2016, and kept
# downloading every time a user ran a Skyfield program. So we now
# build in a grace period:
grace_period = timedelta(days=30)
expiration_date = dt.date() + grace_period
mjd, day, month, year, offsets = np.loadtxt(lines).T
leap_dates = np.ndarray(len(mjd) + 2)
leap_dates[0] = '-inf'
leap_dates[1:-1] = mjd + 2400000.5
leap_dates[-1] = 'inf'
leap_offsets = np.ndarray(len(mjd) + 2)
leap_offsets[0] = leap_offsets[1] = offsets[0]
leap_offsets[2:] = offsets
return expiration_date, (leap_dates, leap_offsets) | [
"def",
"parse_leap_seconds",
"(",
"fileobj",
")",
":",
"lines",
"=",
"iter",
"(",
"fileobj",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
".",
"startswith",
"(",
"b'# File expires on'",
")",
":",
"break",
"else",
":",
"raise",
"ValueError",
"(",
... | Parse the IERS file ``Leap_Second.dat``.
The leap dates array can be searched with::
index = np.searchsorted(leap_dates, jd, 'right')
The resulting index allows (TAI - UTC) to be fetched with::
offset = leap_offsets[index] | [
"Parse",
"the",
"IERS",
"file",
"Leap_Second",
".",
"dat",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/iokit.py#L371-L412 | train | 224,784 |
skyfielders/python-skyfield | skyfield/iokit.py | parse_tle | def parse_tle(fileobj):
"""Parse a file of TLE satellite element sets.
Builds an Earth satellite from each pair of adjacent lines in the
file that start with "1 " and "2 " and have 69 or more characters
each. If the preceding line is exactly 24 characters long, then it
is parsed as the satellite's name. For each satellite found, yields
a tuple `(names, sat)` giving the name(s) on the preceding line (or
`None` if no name was found) and the satellite object itself.
An exception is raised if the attempt to parse a pair of candidate
lines as TLE elements fails.
"""
b0 = b1 = b''
for b2 in fileobj:
if (b1.startswith(b'1 ') and len(b1) >= 69 and
b2.startswith(b'2 ') and len(b2) >= 69):
b0 = b0.rstrip(b'\n\r')
if len(b0) == 24: # Celestrak
name = b0.decode('ascii').rstrip()
names = [name]
elif b0.startswith(b'0 '): # Spacetrack 3-line format
name = b0[2:].decode('ascii').rstrip()
names = [name]
else:
name = None
names = ()
line1 = b1.decode('ascii')
line2 = b2.decode('ascii')
sat = EarthSatellite(line1, line2, name)
if name and ' (' in name:
# Given a name like `ISS (ZARYA)` or `HTV-6 (KOUNOTORI
# 6)`, also support lookup by the name inside or outside
# the parentheses.
short_name, secondary_name = name.split(' (')
secondary_name = secondary_name.rstrip(')')
names.append(short_name)
names.append(secondary_name)
yield names, sat
b0 = b1
b1 = b2 | python | def parse_tle(fileobj):
"""Parse a file of TLE satellite element sets.
Builds an Earth satellite from each pair of adjacent lines in the
file that start with "1 " and "2 " and have 69 or more characters
each. If the preceding line is exactly 24 characters long, then it
is parsed as the satellite's name. For each satellite found, yields
a tuple `(names, sat)` giving the name(s) on the preceding line (or
`None` if no name was found) and the satellite object itself.
An exception is raised if the attempt to parse a pair of candidate
lines as TLE elements fails.
"""
b0 = b1 = b''
for b2 in fileobj:
if (b1.startswith(b'1 ') and len(b1) >= 69 and
b2.startswith(b'2 ') and len(b2) >= 69):
b0 = b0.rstrip(b'\n\r')
if len(b0) == 24: # Celestrak
name = b0.decode('ascii').rstrip()
names = [name]
elif b0.startswith(b'0 '): # Spacetrack 3-line format
name = b0[2:].decode('ascii').rstrip()
names = [name]
else:
name = None
names = ()
line1 = b1.decode('ascii')
line2 = b2.decode('ascii')
sat = EarthSatellite(line1, line2, name)
if name and ' (' in name:
# Given a name like `ISS (ZARYA)` or `HTV-6 (KOUNOTORI
# 6)`, also support lookup by the name inside or outside
# the parentheses.
short_name, secondary_name = name.split(' (')
secondary_name = secondary_name.rstrip(')')
names.append(short_name)
names.append(secondary_name)
yield names, sat
b0 = b1
b1 = b2 | [
"def",
"parse_tle",
"(",
"fileobj",
")",
":",
"b0",
"=",
"b1",
"=",
"b''",
"for",
"b2",
"in",
"fileobj",
":",
"if",
"(",
"b1",
".",
"startswith",
"(",
"b'1 '",
")",
"and",
"len",
"(",
"b1",
")",
">=",
"69",
"and",
"b2",
".",
"startswith",
"(",
... | Parse a file of TLE satellite element sets.
Builds an Earth satellite from each pair of adjacent lines in the
file that start with "1 " and "2 " and have 69 or more characters
each. If the preceding line is exactly 24 characters long, then it
is parsed as the satellite's name. For each satellite found, yields
a tuple `(names, sat)` giving the name(s) on the preceding line (or
`None` if no name was found) and the satellite object itself.
An exception is raised if the attempt to parse a pair of candidate
lines as TLE elements fails. | [
"Parse",
"a",
"file",
"of",
"TLE",
"satellite",
"element",
"sets",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/iokit.py#L415-L461 | train | 224,785 |
skyfielders/python-skyfield | skyfield/iokit.py | download | def download(url, path, verbose=None, blocksize=128*1024):
"""Download a file from a URL, possibly displaying a progress bar.
Saves the output to the file named by `path`. If the URL cannot be
downloaded or the file cannot be written, an IOError is raised.
Normally, if the standard error output is a terminal, then a
progress bar is displayed to keep the user entertained. Specify
`verbose=True` or `verbose=False` to control this behavior.
"""
tempname = path + '.download'
try:
connection = urlopen(url)
except Exception as e:
raise IOError('cannot get {0} because {1}'.format(url, e))
if verbose is None:
verbose = sys.stderr.isatty()
bar = None
if verbose:
if _running_IDLE:
print('Downloading {0} ...'.format(os.path.basename(path)),
file=sys.stderr)
else:
bar = ProgressBar(path)
content_length = int(connection.headers.get('content-length', -1))
# Python open() provides no way to achieve O_CREAT without also
# truncating the file, which would ruin the work of another process
# that is trying to download the same file at the same time. So:
flags = getattr(os, 'O_BINARY', 0) | os.O_CREAT | os.O_RDWR
fd = os.open(tempname, flags, 0o666)
with os.fdopen(fd, 'wb') as w:
try:
if lockf is not None:
fd = w.fileno()
lockf(fd, LOCK_EX) # only one download at a time
if os.path.exists(path): # did someone else finish first?
if os.path.exists(tempname):
os.unlink(tempname)
return
w.seek(0)
length = 0
while True:
data = connection.read(blocksize)
if not data:
break
w.write(data)
length += len(data)
if bar is not None:
bar.report(length, content_length)
w.flush()
if lockf is not None:
# On Unix, rename while still protected by the lock.
try:
os.rename(tempname, path)
except Exception as e:
raise IOError('error renaming {0} to {1} - {2}'.format(
tempname, path, e))
except Exception as e:
raise IOError('error getting {0} - {1}'.format(url, e))
finally:
if lockf is not None:
lockf(fd, LOCK_UN)
if lockf is None:
# On Windows, rename here because the file needs to be closed first.
try:
_replace(tempname, path)
except Exception as e:
raise IOError('error renaming {0} to {1} - {2}'.format(
tempname, path, e)) | python | def download(url, path, verbose=None, blocksize=128*1024):
"""Download a file from a URL, possibly displaying a progress bar.
Saves the output to the file named by `path`. If the URL cannot be
downloaded or the file cannot be written, an IOError is raised.
Normally, if the standard error output is a terminal, then a
progress bar is displayed to keep the user entertained. Specify
`verbose=True` or `verbose=False` to control this behavior.
"""
tempname = path + '.download'
try:
connection = urlopen(url)
except Exception as e:
raise IOError('cannot get {0} because {1}'.format(url, e))
if verbose is None:
verbose = sys.stderr.isatty()
bar = None
if verbose:
if _running_IDLE:
print('Downloading {0} ...'.format(os.path.basename(path)),
file=sys.stderr)
else:
bar = ProgressBar(path)
content_length = int(connection.headers.get('content-length', -1))
# Python open() provides no way to achieve O_CREAT without also
# truncating the file, which would ruin the work of another process
# that is trying to download the same file at the same time. So:
flags = getattr(os, 'O_BINARY', 0) | os.O_CREAT | os.O_RDWR
fd = os.open(tempname, flags, 0o666)
with os.fdopen(fd, 'wb') as w:
try:
if lockf is not None:
fd = w.fileno()
lockf(fd, LOCK_EX) # only one download at a time
if os.path.exists(path): # did someone else finish first?
if os.path.exists(tempname):
os.unlink(tempname)
return
w.seek(0)
length = 0
while True:
data = connection.read(blocksize)
if not data:
break
w.write(data)
length += len(data)
if bar is not None:
bar.report(length, content_length)
w.flush()
if lockf is not None:
# On Unix, rename while still protected by the lock.
try:
os.rename(tempname, path)
except Exception as e:
raise IOError('error renaming {0} to {1} - {2}'.format(
tempname, path, e))
except Exception as e:
raise IOError('error getting {0} - {1}'.format(url, e))
finally:
if lockf is not None:
lockf(fd, LOCK_UN)
if lockf is None:
# On Windows, rename here because the file needs to be closed first.
try:
_replace(tempname, path)
except Exception as e:
raise IOError('error renaming {0} to {1} - {2}'.format(
tempname, path, e)) | [
"def",
"download",
"(",
"url",
",",
"path",
",",
"verbose",
"=",
"None",
",",
"blocksize",
"=",
"128",
"*",
"1024",
")",
":",
"tempname",
"=",
"path",
"+",
"'.download'",
"try",
":",
"connection",
"=",
"urlopen",
"(",
"url",
")",
"except",
"Exception",... | Download a file from a URL, possibly displaying a progress bar.
Saves the output to the file named by `path`. If the URL cannot be
downloaded or the file cannot be written, an IOError is raised.
Normally, if the standard error output is a terminal, then a
progress bar is displayed to keep the user entertained. Specify
`verbose=True` or `verbose=False` to control this behavior. | [
"Download",
"a",
"file",
"from",
"a",
"URL",
"possibly",
"displaying",
"a",
"progress",
"bar",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/iokit.py#L464-L536 | train | 224,786 |
skyfielders/python-skyfield | skyfield/iokit.py | Loader.tle | def tle(self, url, reload=False, filename=None):
"""Load and parse a satellite TLE file.
Given a URL or a local path, this loads a file of three-line records in
the common Celestrak file format, or two-line records like those from
space-track.org. For a three-line element set, each first line gives
the name of a satellite and the following two lines are the TLE orbital
elements. A two-line element set comprises only these last two lines.
See the :meth:`~skyfield.iokit.Loader.open()` documentation for
the meaning of the ``reload`` and ``filename`` parameters.
Returns a dictionary whose keys are satellite names and numbers,
and whose values are :class:`~skyfield.sgp4lib.EarthSatellite`
objects. If you want to build a list in which each satellite
appears only once, simply run ``sats = set(d.values())`` on the
returned dictionary.
"""
d = {}
with self.open(url, reload=reload, filename=filename) as f:
for names, sat in parse_tle(f):
d[sat.model.satnum] = sat
for name in names:
d[name] = sat
return d | python | def tle(self, url, reload=False, filename=None):
"""Load and parse a satellite TLE file.
Given a URL or a local path, this loads a file of three-line records in
the common Celestrak file format, or two-line records like those from
space-track.org. For a three-line element set, each first line gives
the name of a satellite and the following two lines are the TLE orbital
elements. A two-line element set comprises only these last two lines.
See the :meth:`~skyfield.iokit.Loader.open()` documentation for
the meaning of the ``reload`` and ``filename`` parameters.
Returns a dictionary whose keys are satellite names and numbers,
and whose values are :class:`~skyfield.sgp4lib.EarthSatellite`
objects. If you want to build a list in which each satellite
appears only once, simply run ``sats = set(d.values())`` on the
returned dictionary.
"""
d = {}
with self.open(url, reload=reload, filename=filename) as f:
for names, sat in parse_tle(f):
d[sat.model.satnum] = sat
for name in names:
d[name] = sat
return d | [
"def",
"tle",
"(",
"self",
",",
"url",
",",
"reload",
"=",
"False",
",",
"filename",
"=",
"None",
")",
":",
"d",
"=",
"{",
"}",
"with",
"self",
".",
"open",
"(",
"url",
",",
"reload",
"=",
"reload",
",",
"filename",
"=",
"filename",
")",
"as",
... | Load and parse a satellite TLE file.
Given a URL or a local path, this loads a file of three-line records in
the common Celestrak file format, or two-line records like those from
space-track.org. For a three-line element set, each first line gives
the name of a satellite and the following two lines are the TLE orbital
elements. A two-line element set comprises only these last two lines.
See the :meth:`~skyfield.iokit.Loader.open()` documentation for
the meaning of the ``reload`` and ``filename`` parameters.
Returns a dictionary whose keys are satellite names and numbers,
and whose values are :class:`~skyfield.sgp4lib.EarthSatellite`
objects. If you want to build a list in which each satellite
appears only once, simply run ``sats = set(d.values())`` on the
returned dictionary. | [
"Load",
"and",
"parse",
"a",
"satellite",
"TLE",
"file",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/iokit.py#L199-L224 | train | 224,787 |
skyfielders/python-skyfield | skyfield/iokit.py | Loader.open | def open(self, url, mode='rb', reload=False, filename=None):
"""Open a file, downloading it first if it does not yet exist.
Unlike when you call a loader directly like ``my_loader()``,
this ``my_loader.open()`` method does not attempt to parse or
interpret the file; it simply returns an open file object.
The ``url`` can be either an external URL, or else the path to a
file on the current filesystem. A relative path will be assumed
to be relative to the base directory of this loader object.
If a URL was provided and the ``reload`` parameter is true, then
any existing file will be removed before the download starts.
The ``filename`` parameter lets you specify an alternative local
filename instead of having the filename extracted from the final
component of the URL.
"""
if '://' not in url:
path_that_might_be_relative = url
path = os.path.join(self.directory, path_that_might_be_relative)
return open(path, mode)
if filename is None:
filename = urlparse(url).path.split('/')[-1]
path = self.path_to(filename)
if reload and os.path.exists(path):
os.remove(path)
if not os.path.exists(path):
download(url, path, self.verbose)
return open(path, mode) | python | def open(self, url, mode='rb', reload=False, filename=None):
"""Open a file, downloading it first if it does not yet exist.
Unlike when you call a loader directly like ``my_loader()``,
this ``my_loader.open()`` method does not attempt to parse or
interpret the file; it simply returns an open file object.
The ``url`` can be either an external URL, or else the path to a
file on the current filesystem. A relative path will be assumed
to be relative to the base directory of this loader object.
If a URL was provided and the ``reload`` parameter is true, then
any existing file will be removed before the download starts.
The ``filename`` parameter lets you specify an alternative local
filename instead of having the filename extracted from the final
component of the URL.
"""
if '://' not in url:
path_that_might_be_relative = url
path = os.path.join(self.directory, path_that_might_be_relative)
return open(path, mode)
if filename is None:
filename = urlparse(url).path.split('/')[-1]
path = self.path_to(filename)
if reload and os.path.exists(path):
os.remove(path)
if not os.path.exists(path):
download(url, path, self.verbose)
return open(path, mode) | [
"def",
"open",
"(",
"self",
",",
"url",
",",
"mode",
"=",
"'rb'",
",",
"reload",
"=",
"False",
",",
"filename",
"=",
"None",
")",
":",
"if",
"'://'",
"not",
"in",
"url",
":",
"path_that_might_be_relative",
"=",
"url",
"path",
"=",
"os",
".",
"path",
... | Open a file, downloading it first if it does not yet exist.
Unlike when you call a loader directly like ``my_loader()``,
this ``my_loader.open()`` method does not attempt to parse or
interpret the file; it simply returns an open file object.
The ``url`` can be either an external URL, or else the path to a
file on the current filesystem. A relative path will be assumed
to be relative to the base directory of this loader object.
If a URL was provided and the ``reload`` parameter is true, then
any existing file will be removed before the download starts.
The ``filename`` parameter lets you specify an alternative local
filename instead of having the filename extracted from the final
component of the URL. | [
"Open",
"a",
"file",
"downloading",
"it",
"first",
"if",
"it",
"does",
"not",
"yet",
"exist",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/iokit.py#L226-L256 | train | 224,788 |
skyfielders/python-skyfield | skyfield/iokit.py | Loader.timescale | def timescale(self, delta_t=None):
"""Open or download three time scale files, returning a `Timescale`.
This method is how most Skyfield users build a `Timescale`
object, which is necessary for building specific `Time` objects
that name specific moments.
This will open or download the three files that Skyfield needs
to measure time. UT1 is tabulated by the United States Naval
Observatory files ``deltat.data`` and ``deltat.preds``, while
UTC is defined by ``Leap_Second.dat`` from the International
Earth Rotation Service.
"""
if delta_t is not None:
delta_t_recent = np.array(((-1e99, 1e99), (delta_t, delta_t)))
else:
data = self('deltat.data')
preds = self('deltat.preds')
data_end_time = data[0, -1]
i = np.searchsorted(preds[0], data_end_time, side='right')
delta_t_recent = np.concatenate([data, preds[:,i:]], axis=1)
leap_dates, leap_offsets = self('Leap_Second.dat')
return Timescale(delta_t_recent, leap_dates, leap_offsets) | python | def timescale(self, delta_t=None):
"""Open or download three time scale files, returning a `Timescale`.
This method is how most Skyfield users build a `Timescale`
object, which is necessary for building specific `Time` objects
that name specific moments.
This will open or download the three files that Skyfield needs
to measure time. UT1 is tabulated by the United States Naval
Observatory files ``deltat.data`` and ``deltat.preds``, while
UTC is defined by ``Leap_Second.dat`` from the International
Earth Rotation Service.
"""
if delta_t is not None:
delta_t_recent = np.array(((-1e99, 1e99), (delta_t, delta_t)))
else:
data = self('deltat.data')
preds = self('deltat.preds')
data_end_time = data[0, -1]
i = np.searchsorted(preds[0], data_end_time, side='right')
delta_t_recent = np.concatenate([data, preds[:,i:]], axis=1)
leap_dates, leap_offsets = self('Leap_Second.dat')
return Timescale(delta_t_recent, leap_dates, leap_offsets) | [
"def",
"timescale",
"(",
"self",
",",
"delta_t",
"=",
"None",
")",
":",
"if",
"delta_t",
"is",
"not",
"None",
":",
"delta_t_recent",
"=",
"np",
".",
"array",
"(",
"(",
"(",
"-",
"1e99",
",",
"1e99",
")",
",",
"(",
"delta_t",
",",
"delta_t",
")",
... | Open or download three time scale files, returning a `Timescale`.
This method is how most Skyfield users build a `Timescale`
object, which is necessary for building specific `Time` objects
that name specific moments.
This will open or download the three files that Skyfield needs
to measure time. UT1 is tabulated by the United States Naval
Observatory files ``deltat.data`` and ``deltat.preds``, while
UTC is defined by ``Leap_Second.dat`` from the International
Earth Rotation Service. | [
"Open",
"or",
"download",
"three",
"time",
"scale",
"files",
"returning",
"a",
"Timescale",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/iokit.py#L258-L281 | train | 224,789 |
skyfielders/python-skyfield | skyfield/contrib/iosurvey.py | get_summary | def get_summary(url, spk=True):
''' simple function to retrieve the header of a BSP file and return SPK object'''
# connect to file at URL
bspurl = urllib2.urlopen(url)
# retrieve the "tip" of a file at URL
bsptip = bspurl.read(10**5) # first 100kB
# save data in fake file object (in-memory)
bspstr = StringIO(bsptip)
# load into DAF object
daf = DAF(bspstr)
# return either SPK or DAF object
if spk:
# make a SPK object
spk = SPK(daf)
# return representation
return spk
else:
# return representation
return daf | python | def get_summary(url, spk=True):
''' simple function to retrieve the header of a BSP file and return SPK object'''
# connect to file at URL
bspurl = urllib2.urlopen(url)
# retrieve the "tip" of a file at URL
bsptip = bspurl.read(10**5) # first 100kB
# save data in fake file object (in-memory)
bspstr = StringIO(bsptip)
# load into DAF object
daf = DAF(bspstr)
# return either SPK or DAF object
if spk:
# make a SPK object
spk = SPK(daf)
# return representation
return spk
else:
# return representation
return daf | [
"def",
"get_summary",
"(",
"url",
",",
"spk",
"=",
"True",
")",
":",
"# connect to file at URL",
"bspurl",
"=",
"urllib2",
".",
"urlopen",
"(",
"url",
")",
"# retrieve the \"tip\" of a file at URL",
"bsptip",
"=",
"bspurl",
".",
"read",
"(",
"10",
"**",
"5",
... | simple function to retrieve the header of a BSP file and return SPK object | [
"simple",
"function",
"to",
"retrieve",
"the",
"header",
"of",
"a",
"BSP",
"file",
"and",
"return",
"SPK",
"object"
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/contrib/iosurvey.py#L11-L29 | train | 224,790 |
skyfielders/python-skyfield | skyfield/vectorlib.py | _correct_for_light_travel_time | def _correct_for_light_travel_time(observer, target):
"""Return a light-time corrected astrometric position and velocity.
Given an `observer` that is a `Barycentric` position somewhere in
the solar system, compute where in the sky they will see the body
`target`, by computing the light-time between them and figuring out
where `target` was back when the light was leaving it that is now
reaching the eyes or instruments of the `observer`.
"""
t = observer.t
ts = t.ts
cposition = observer.position.au
cvelocity = observer.velocity.au_per_d
tposition, tvelocity, gcrs_position, message = target._at(t)
distance = length_of(tposition - cposition)
light_time0 = 0.0
t_tdb = t.tdb
for i in range(10):
light_time = distance / C_AUDAY
delta = light_time - light_time0
if -1e-12 < min(delta) and max(delta) < 1e-12:
break
t2 = ts.tdb(jd=t_tdb - light_time)
tposition, tvelocity, gcrs_position, message = target._at(t2)
distance = length_of(tposition - cposition)
light_time0 = light_time
else:
raise ValueError('light-travel time failed to converge')
return tposition - cposition, tvelocity - cvelocity, t, light_time | python | def _correct_for_light_travel_time(observer, target):
"""Return a light-time corrected astrometric position and velocity.
Given an `observer` that is a `Barycentric` position somewhere in
the solar system, compute where in the sky they will see the body
`target`, by computing the light-time between them and figuring out
where `target` was back when the light was leaving it that is now
reaching the eyes or instruments of the `observer`.
"""
t = observer.t
ts = t.ts
cposition = observer.position.au
cvelocity = observer.velocity.au_per_d
tposition, tvelocity, gcrs_position, message = target._at(t)
distance = length_of(tposition - cposition)
light_time0 = 0.0
t_tdb = t.tdb
for i in range(10):
light_time = distance / C_AUDAY
delta = light_time - light_time0
if -1e-12 < min(delta) and max(delta) < 1e-12:
break
t2 = ts.tdb(jd=t_tdb - light_time)
tposition, tvelocity, gcrs_position, message = target._at(t2)
distance = length_of(tposition - cposition)
light_time0 = light_time
else:
raise ValueError('light-travel time failed to converge')
return tposition - cposition, tvelocity - cvelocity, t, light_time | [
"def",
"_correct_for_light_travel_time",
"(",
"observer",
",",
"target",
")",
":",
"t",
"=",
"observer",
".",
"t",
"ts",
"=",
"t",
".",
"ts",
"cposition",
"=",
"observer",
".",
"position",
".",
"au",
"cvelocity",
"=",
"observer",
".",
"velocity",
".",
"a... | Return a light-time corrected astrometric position and velocity.
Given an `observer` that is a `Barycentric` position somewhere in
the solar system, compute where in the sky they will see the body
`target`, by computing the light-time between them and figuring out
where `target` was back when the light was leaving it that is now
reaching the eyes or instruments of the `observer`. | [
"Return",
"a",
"light",
"-",
"time",
"corrected",
"astrometric",
"position",
"and",
"velocity",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/vectorlib.py#L201-L230 | train | 224,791 |
skyfielders/python-skyfield | skyfield/vectorlib.py | VectorFunction.at | def at(self, t):
"""At time ``t``, compute the target's position relative to the center.
If ``t`` is an array of times, then the returned position object
will specify as many positions as there were times. The kind of
position returned depends on the value of the ``center``
attribute:
* Solar System Barycenter: :class:`~skyfield.positionlib.Barycentric`
* Center of the Earth: :class:`~skyfield.positionlib.Geocentric`
* Difference: :class:`~skyfield.positionlib.Geometric`
* Anything else: :class:`~skyfield.positionlib.ICRF`
"""
if not isinstance(t, Time):
raise ValueError('please provide the at() method with a Time'
' instance as its argument, instead of the'
' value {0!r}'.format(t))
observer_data = ObserverData()
observer_data.ephemeris = self.ephemeris
p, v, observer_data.gcrs_position, message = self._at(t)
center = self.center
if center == 0:
observer_data.bcrs_position = p
observer_data.bcrs_velocity = v
self._snag_observer_data(observer_data, t)
position = build_position(p, v, t, center, self.target, observer_data)
position.message = message
return position | python | def at(self, t):
"""At time ``t``, compute the target's position relative to the center.
If ``t`` is an array of times, then the returned position object
will specify as many positions as there were times. The kind of
position returned depends on the value of the ``center``
attribute:
* Solar System Barycenter: :class:`~skyfield.positionlib.Barycentric`
* Center of the Earth: :class:`~skyfield.positionlib.Geocentric`
* Difference: :class:`~skyfield.positionlib.Geometric`
* Anything else: :class:`~skyfield.positionlib.ICRF`
"""
if not isinstance(t, Time):
raise ValueError('please provide the at() method with a Time'
' instance as its argument, instead of the'
' value {0!r}'.format(t))
observer_data = ObserverData()
observer_data.ephemeris = self.ephemeris
p, v, observer_data.gcrs_position, message = self._at(t)
center = self.center
if center == 0:
observer_data.bcrs_position = p
observer_data.bcrs_velocity = v
self._snag_observer_data(observer_data, t)
position = build_position(p, v, t, center, self.target, observer_data)
position.message = message
return position | [
"def",
"at",
"(",
"self",
",",
"t",
")",
":",
"if",
"not",
"isinstance",
"(",
"t",
",",
"Time",
")",
":",
"raise",
"ValueError",
"(",
"'please provide the at() method with a Time'",
"' instance as its argument, instead of the'",
"' value {0!r}'",
".",
"format",
"(",... | At time ``t``, compute the target's position relative to the center.
If ``t`` is an array of times, then the returned position object
will specify as many positions as there were times. The kind of
position returned depends on the value of the ``center``
attribute:
* Solar System Barycenter: :class:`~skyfield.positionlib.Barycentric`
* Center of the Earth: :class:`~skyfield.positionlib.Geocentric`
* Difference: :class:`~skyfield.positionlib.Geometric`
* Anything else: :class:`~skyfield.positionlib.ICRF` | [
"At",
"time",
"t",
"compute",
"the",
"target",
"s",
"position",
"relative",
"to",
"the",
"center",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/vectorlib.py#L54-L82 | train | 224,792 |
skyfielders/python-skyfield | skyfield/timelib.py | _to_array | def _to_array(value):
"""When `value` is a plain Python sequence, return it as a NumPy array."""
if hasattr(value, 'shape'):
return value
elif hasattr(value, '__len__'):
return array(value)
else:
return float_(value) | python | def _to_array(value):
"""When `value` is a plain Python sequence, return it as a NumPy array."""
if hasattr(value, 'shape'):
return value
elif hasattr(value, '__len__'):
return array(value)
else:
return float_(value) | [
"def",
"_to_array",
"(",
"value",
")",
":",
"if",
"hasattr",
"(",
"value",
",",
"'shape'",
")",
":",
"return",
"value",
"elif",
"hasattr",
"(",
"value",
",",
"'__len__'",
")",
":",
"return",
"array",
"(",
"value",
")",
"else",
":",
"return",
"float_",
... | When `value` is a plain Python sequence, return it as a NumPy array. | [
"When",
"value",
"is",
"a",
"plain",
"Python",
"sequence",
"return",
"it",
"as",
"a",
"NumPy",
"array",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/timelib.py#L40-L47 | train | 224,793 |
skyfielders/python-skyfield | skyfield/timelib.py | julian_day | def julian_day(year, month=1, day=1):
"""Given a proleptic Gregorian calendar date, return a Julian day int."""
janfeb = month < 3
return (day
+ 1461 * (year + 4800 - janfeb) // 4
+ 367 * (month - 2 + janfeb * 12) // 12
- 3 * ((year + 4900 - janfeb) // 100) // 4
- 32075) | python | def julian_day(year, month=1, day=1):
"""Given a proleptic Gregorian calendar date, return a Julian day int."""
janfeb = month < 3
return (day
+ 1461 * (year + 4800 - janfeb) // 4
+ 367 * (month - 2 + janfeb * 12) // 12
- 3 * ((year + 4900 - janfeb) // 100) // 4
- 32075) | [
"def",
"julian_day",
"(",
"year",
",",
"month",
"=",
"1",
",",
"day",
"=",
"1",
")",
":",
"janfeb",
"=",
"month",
"<",
"3",
"return",
"(",
"day",
"+",
"1461",
"*",
"(",
"year",
"+",
"4800",
"-",
"janfeb",
")",
"//",
"4",
"+",
"367",
"*",
"(",... | Given a proleptic Gregorian calendar date, return a Julian day int. | [
"Given",
"a",
"proleptic",
"Gregorian",
"calendar",
"date",
"return",
"a",
"Julian",
"day",
"int",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/timelib.py#L700-L707 | train | 224,794 |
skyfielders/python-skyfield | skyfield/timelib.py | julian_date | def julian_date(year, month=1, day=1, hour=0, minute=0, second=0.0):
"""Given a proleptic Gregorian calendar date, return a Julian date float."""
return julian_day(year, month, day) - 0.5 + (
second + minute * 60.0 + hour * 3600.0) / DAY_S | python | def julian_date(year, month=1, day=1, hour=0, minute=0, second=0.0):
"""Given a proleptic Gregorian calendar date, return a Julian date float."""
return julian_day(year, month, day) - 0.5 + (
second + minute * 60.0 + hour * 3600.0) / DAY_S | [
"def",
"julian_date",
"(",
"year",
",",
"month",
"=",
"1",
",",
"day",
"=",
"1",
",",
"hour",
"=",
"0",
",",
"minute",
"=",
"0",
",",
"second",
"=",
"0.0",
")",
":",
"return",
"julian_day",
"(",
"year",
",",
"month",
",",
"day",
")",
"-",
"0.5"... | Given a proleptic Gregorian calendar date, return a Julian date float. | [
"Given",
"a",
"proleptic",
"Gregorian",
"calendar",
"date",
"return",
"a",
"Julian",
"date",
"float",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/timelib.py#L709-L712 | train | 224,795 |
skyfielders/python-skyfield | skyfield/timelib.py | tdb_minus_tt | def tdb_minus_tt(jd_tdb):
"""Computes how far TDB is in advance of TT, given TDB.
Given that the two time scales never diverge by more than 2ms, TT
can also be given as the argument to perform the conversion in the
other direction.
"""
t = (jd_tdb - T0) / 36525.0
# USNO Circular 179, eq. 2.6.
return (0.001657 * sin ( 628.3076 * t + 6.2401)
+ 0.000022 * sin ( 575.3385 * t + 4.2970)
+ 0.000014 * sin (1256.6152 * t + 6.1969)
+ 0.000005 * sin ( 606.9777 * t + 4.0212)
+ 0.000005 * sin ( 52.9691 * t + 0.4444)
+ 0.000002 * sin ( 21.3299 * t + 5.5431)
+ 0.000010 * t * sin ( 628.3076 * t + 4.2490)) | python | def tdb_minus_tt(jd_tdb):
"""Computes how far TDB is in advance of TT, given TDB.
Given that the two time scales never diverge by more than 2ms, TT
can also be given as the argument to perform the conversion in the
other direction.
"""
t = (jd_tdb - T0) / 36525.0
# USNO Circular 179, eq. 2.6.
return (0.001657 * sin ( 628.3076 * t + 6.2401)
+ 0.000022 * sin ( 575.3385 * t + 4.2970)
+ 0.000014 * sin (1256.6152 * t + 6.1969)
+ 0.000005 * sin ( 606.9777 * t + 4.0212)
+ 0.000005 * sin ( 52.9691 * t + 0.4444)
+ 0.000002 * sin ( 21.3299 * t + 5.5431)
+ 0.000010 * t * sin ( 628.3076 * t + 4.2490)) | [
"def",
"tdb_minus_tt",
"(",
"jd_tdb",
")",
":",
"t",
"=",
"(",
"jd_tdb",
"-",
"T0",
")",
"/",
"36525.0",
"# USNO Circular 179, eq. 2.6.",
"return",
"(",
"0.001657",
"*",
"sin",
"(",
"628.3076",
"*",
"t",
"+",
"6.2401",
")",
"+",
"0.000022",
"*",
"sin",
... | Computes how far TDB is in advance of TT, given TDB.
Given that the two time scales never diverge by more than 2ms, TT
can also be given as the argument to perform the conversion in the
other direction. | [
"Computes",
"how",
"far",
"TDB",
"is",
"in",
"advance",
"of",
"TT",
"given",
"TDB",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/timelib.py#L752-L769 | train | 224,796 |
skyfielders/python-skyfield | skyfield/timelib.py | interpolate_delta_t | def interpolate_delta_t(delta_t_table, tt):
"""Return interpolated Delta T values for the times in `tt`.
The 2xN table should provide TT values as element 0 and
corresponding Delta T values for element 1. For times outside the
range of the table, a long-term formula is used instead.
"""
tt_array, delta_t_array = delta_t_table
delta_t = _to_array(interp(tt, tt_array, delta_t_array, nan, nan))
missing = isnan(delta_t)
if missing.any():
# Test if we are dealing with an array and proceed appropriately
if missing.shape:
tt = tt[missing]
delta_t[missing] = delta_t_formula_morrison_and_stephenson_2004(tt)
else:
delta_t = delta_t_formula_morrison_and_stephenson_2004(tt)
return delta_t | python | def interpolate_delta_t(delta_t_table, tt):
"""Return interpolated Delta T values for the times in `tt`.
The 2xN table should provide TT values as element 0 and
corresponding Delta T values for element 1. For times outside the
range of the table, a long-term formula is used instead.
"""
tt_array, delta_t_array = delta_t_table
delta_t = _to_array(interp(tt, tt_array, delta_t_array, nan, nan))
missing = isnan(delta_t)
if missing.any():
# Test if we are dealing with an array and proceed appropriately
if missing.shape:
tt = tt[missing]
delta_t[missing] = delta_t_formula_morrison_and_stephenson_2004(tt)
else:
delta_t = delta_t_formula_morrison_and_stephenson_2004(tt)
return delta_t | [
"def",
"interpolate_delta_t",
"(",
"delta_t_table",
",",
"tt",
")",
":",
"tt_array",
",",
"delta_t_array",
"=",
"delta_t_table",
"delta_t",
"=",
"_to_array",
"(",
"interp",
"(",
"tt",
",",
"tt_array",
",",
"delta_t_array",
",",
"nan",
",",
"nan",
")",
")",
... | Return interpolated Delta T values for the times in `tt`.
The 2xN table should provide TT values as element 0 and
corresponding Delta T values for element 1. For times outside the
range of the table, a long-term formula is used instead. | [
"Return",
"interpolated",
"Delta",
"T",
"values",
"for",
"the",
"times",
"in",
"tt",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/timelib.py#L771-L790 | train | 224,797 |
skyfielders/python-skyfield | skyfield/timelib.py | build_delta_t_table | def build_delta_t_table(delta_t_recent):
"""Build a table for interpolating Delta T.
Given a 2xN array of recent Delta T values, whose element 0 is a
sorted array of TT Julian dates and element 1 is Delta T values,
this routine returns a more complete table by prepending two
built-in data sources that ship with Skyfield as pre-built arrays:
* The historical values from Morrison and Stephenson (2004) which
the http://eclipse.gsfc.nasa.gov/SEcat5/deltat.html NASA web page
presents in an HTML table.
* The United States Naval Observatory ``historic_deltat.data``
values for Delta T over the years 1657 through 1984.
"""
ancient = load_bundled_npy('morrison_stephenson_deltat.npy')
historic = load_bundled_npy('historic_deltat.npy')
# Prefer USNO over Morrison and Stephenson where they overlap.
historic_start_time = historic[0,0]
i = searchsorted(ancient[0], historic_start_time)
bundled = concatenate([ancient[:,:i], historic], axis=1)
# Let recent data replace everything else.
recent_start_time = delta_t_recent[0,0]
i = searchsorted(bundled[0], recent_start_time)
row = ((0,),(0,))
table = concatenate([row, bundled[:,:i], delta_t_recent, row], axis=1)
# Create initial and final point to provide continuity with formula.
century = 36524.0
start = table[0,1] - century
table[:,0] = start, delta_t_formula_morrison_and_stephenson_2004(start)
end = table[0,-2] + century
table[:,-1] = end, delta_t_formula_morrison_and_stephenson_2004(end)
return table | python | def build_delta_t_table(delta_t_recent):
"""Build a table for interpolating Delta T.
Given a 2xN array of recent Delta T values, whose element 0 is a
sorted array of TT Julian dates and element 1 is Delta T values,
this routine returns a more complete table by prepending two
built-in data sources that ship with Skyfield as pre-built arrays:
* The historical values from Morrison and Stephenson (2004) which
the http://eclipse.gsfc.nasa.gov/SEcat5/deltat.html NASA web page
presents in an HTML table.
* The United States Naval Observatory ``historic_deltat.data``
values for Delta T over the years 1657 through 1984.
"""
ancient = load_bundled_npy('morrison_stephenson_deltat.npy')
historic = load_bundled_npy('historic_deltat.npy')
# Prefer USNO over Morrison and Stephenson where they overlap.
historic_start_time = historic[0,0]
i = searchsorted(ancient[0], historic_start_time)
bundled = concatenate([ancient[:,:i], historic], axis=1)
# Let recent data replace everything else.
recent_start_time = delta_t_recent[0,0]
i = searchsorted(bundled[0], recent_start_time)
row = ((0,),(0,))
table = concatenate([row, bundled[:,:i], delta_t_recent, row], axis=1)
# Create initial and final point to provide continuity with formula.
century = 36524.0
start = table[0,1] - century
table[:,0] = start, delta_t_formula_morrison_and_stephenson_2004(start)
end = table[0,-2] + century
table[:,-1] = end, delta_t_formula_morrison_and_stephenson_2004(end)
return table | [
"def",
"build_delta_t_table",
"(",
"delta_t_recent",
")",
":",
"ancient",
"=",
"load_bundled_npy",
"(",
"'morrison_stephenson_deltat.npy'",
")",
"historic",
"=",
"load_bundled_npy",
"(",
"'historic_deltat.npy'",
")",
"# Prefer USNO over Morrison and Stephenson where they overlap.... | Build a table for interpolating Delta T.
Given a 2xN array of recent Delta T values, whose element 0 is a
sorted array of TT Julian dates and element 1 is Delta T values,
this routine returns a more complete table by prepending two
built-in data sources that ship with Skyfield as pre-built arrays:
* The historical values from Morrison and Stephenson (2004) which
the http://eclipse.gsfc.nasa.gov/SEcat5/deltat.html NASA web page
presents in an HTML table.
* The United States Naval Observatory ``historic_deltat.data``
values for Delta T over the years 1657 through 1984. | [
"Build",
"a",
"table",
"for",
"interpolating",
"Delta",
"T",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/timelib.py#L803-L839 | train | 224,798 |
skyfielders/python-skyfield | skyfield/timelib.py | Timescale.utc | def utc(self, year, month=1, day=1, hour=0, minute=0, second=0.0):
"""Build a `Time` from a UTC calendar date.
You can either specify the date as separate components, or
provide a time zone aware Python datetime. The following two
calls are equivalent (the ``utc`` time zone object can be
imported from the ``skyfield.api`` module, or from ``pytz`` if
you have it)::
ts.utc(2014, 1, 18, 1, 35, 37.5)
ts.utc(datetime(2014, 1, 18, 1, 35, 37, 500000, tzinfo=utc))
Note that only by passing the components separately can you
specify a leap second, because a Python datetime will not allow
the value 60 in its seconds field.
"""
if isinstance(year, datetime):
dt = year
tai = _utc_datetime_to_tai(self.leap_dates, self.leap_offsets, dt)
elif isinstance(year, date):
d = year
tai = _utc_date_to_tai(self.leap_dates, self.leap_offsets, d)
elif hasattr(year, '__len__') and isinstance(year[0], datetime):
# TODO: clean this up and better document the possibilities.
list_of_datetimes = year
tai = array([
_utc_datetime_to_tai(self.leap_dates, self.leap_offsets, dt)
for dt in list_of_datetimes])
else:
tai = _utc_to_tai(self.leap_dates, self.leap_offsets,
_to_array(year), _to_array(month),
_to_array(day), _to_array(hour),
_to_array(minute), _to_array(second))
t = Time(self, tai + tt_minus_tai)
t.tai = tai
return t | python | def utc(self, year, month=1, day=1, hour=0, minute=0, second=0.0):
"""Build a `Time` from a UTC calendar date.
You can either specify the date as separate components, or
provide a time zone aware Python datetime. The following two
calls are equivalent (the ``utc`` time zone object can be
imported from the ``skyfield.api`` module, or from ``pytz`` if
you have it)::
ts.utc(2014, 1, 18, 1, 35, 37.5)
ts.utc(datetime(2014, 1, 18, 1, 35, 37, 500000, tzinfo=utc))
Note that only by passing the components separately can you
specify a leap second, because a Python datetime will not allow
the value 60 in its seconds field.
"""
if isinstance(year, datetime):
dt = year
tai = _utc_datetime_to_tai(self.leap_dates, self.leap_offsets, dt)
elif isinstance(year, date):
d = year
tai = _utc_date_to_tai(self.leap_dates, self.leap_offsets, d)
elif hasattr(year, '__len__') and isinstance(year[0], datetime):
# TODO: clean this up and better document the possibilities.
list_of_datetimes = year
tai = array([
_utc_datetime_to_tai(self.leap_dates, self.leap_offsets, dt)
for dt in list_of_datetimes])
else:
tai = _utc_to_tai(self.leap_dates, self.leap_offsets,
_to_array(year), _to_array(month),
_to_array(day), _to_array(hour),
_to_array(minute), _to_array(second))
t = Time(self, tai + tt_minus_tai)
t.tai = tai
return t | [
"def",
"utc",
"(",
"self",
",",
"year",
",",
"month",
"=",
"1",
",",
"day",
"=",
"1",
",",
"hour",
"=",
"0",
",",
"minute",
"=",
"0",
",",
"second",
"=",
"0.0",
")",
":",
"if",
"isinstance",
"(",
"year",
",",
"datetime",
")",
":",
"dt",
"=",
... | Build a `Time` from a UTC calendar date.
You can either specify the date as separate components, or
provide a time zone aware Python datetime. The following two
calls are equivalent (the ``utc`` time zone object can be
imported from the ``skyfield.api`` module, or from ``pytz`` if
you have it)::
ts.utc(2014, 1, 18, 1, 35, 37.5)
ts.utc(datetime(2014, 1, 18, 1, 35, 37, 500000, tzinfo=utc))
Note that only by passing the components separately can you
specify a leap second, because a Python datetime will not allow
the value 60 in its seconds field. | [
"Build",
"a",
"Time",
"from",
"a",
"UTC",
"calendar",
"date",
"."
] | 51d9e042e06457f6b1f2415296d50a38cb3a300f | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/timelib.py#L91-L127 | train | 224,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.